• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

supabase / supavisor / 19370957114

14 Nov 2025 04:30PM UTC coverage: 62.682% (+1.4%) from 61.246%
19370957114

Pull #744

github

web-flow
Merge fd252a012 into 0224a24c8
Pull Request #744: fix(defrag): improve statems, caching, logs, circuit breaking

592 of 785 new or added lines in 22 files covered. (75.41%)

18 existing lines in 5 files now uncovered.

1809 of 2886 relevant lines covered (62.68%)

4508.83 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/lib/supavisor/application.ex
1
defmodule Supavisor.Application do
2
  # See https://hexdocs.pm/elixir/Application.html
3
  # for more information on OTP Applications
4
  @moduledoc false
5

6
  use Application
7

8
  require Logger
9

10
  alias Supavisor.Monitoring.PromEx
11

12
  @metrics_disabled Application.compile_env(:supavisor, :metrics_disabled, false)
13

14
  @impl true
15
  def start(_type, _args) do
16
    primary_config = :logger.get_primary_config()
×
17

18
    host =
×
19
      case node() |> Atom.to_string() |> String.split("@") do
20
        [_, host] -> host
×
21
        _ -> nil
×
22
      end
23

24
    region = Application.get_env(:supavisor, :region)
×
25

26
    global_metadata =
×
27
      %{
28
        nodehost: host,
29
        az: Application.get_env(:supavisor, :availability_zone),
30
        region: region,
31
        location: System.get_env("LOCATION_KEY") || region,
×
32
        instance_id: System.get_env("INSTANCE_ID"),
33
        short_node_id: short_node_id()
34
      }
35

36
    :ok =
×
37
      :logger.set_primary_config(
38
        :metadata,
39
        Map.merge(primary_config.metadata, global_metadata)
×
40
      )
41

42
    :ok = Logger.add_handlers(:supavisor)
×
43

44
    :ok =
×
45
      :gen_event.swap_sup_handler(
46
        :erl_signal_server,
47
        {:erl_signal_handler, []},
48
        {Supavisor.SignalHandler, []}
49
      )
50

51
    session_shards =
×
52
      :supavisor
53
      |> Application.fetch_env!(:session_proxy_ports)
54
      |> build_shards(:session)
55

56
    transaction_shards =
×
57
      :supavisor
58
      |> Application.fetch_env!(:transaction_proxy_ports)
59
      |> build_shards(:transaction)
60

61
    proxy_ports =
×
62
      [
63
        {:pg_proxy_transaction, Application.get_env(:supavisor, :proxy_port_transaction),
64
         %{mode: :transaction, local: false}, Supavisor.ClientHandler},
65
        {:pg_proxy_session, Application.get_env(:supavisor, :proxy_port_session),
66
         %{mode: :session, local: false}, Supavisor.ClientHandler},
67
        {:pg_proxy, Application.get_env(:supavisor, :proxy_port), %{mode: :proxy, local: false},
68
         Supavisor.ClientHandler}
69
      ] ++ session_shards ++ transaction_shards
70

71
    for {key, port, opts, handler} <- proxy_ports do
×
72
      case :ranch.start_listener(
×
73
             key,
74
             :ranch_tcp,
75
             %{
76
               max_connections: String.to_integer(System.get_env("MAX_CONNECTIONS") || "75000"),
×
77
               num_acceptors: String.to_integer(System.get_env("NUM_ACCEPTORS") || "100"),
×
78
               socket_opts: [port: port, keepalive: true]
79
             },
80
             handler,
81
             opts
82
           ) do
83
        {:ok, _ref} ->
84
          Logger.notice(
×
85
            "Proxy started #{opts.mode}(local=#{opts.local}) on port #{:ranch.get_port(key)}"
×
86
          )
87

88
        error ->
89
          Logger.error("Proxy on #{port} not started because of #{inspect(error)}")
×
90
      end
91
    end
92

93
    :syn.set_event_handler(Supavisor.SynHandler)
×
94
    :syn.add_node_to_scopes([:tenants, :availability_zone])
×
95

96
    :syn.join(:availability_zone, Application.get_env(:supavisor, :availability_zone), self(),
×
97
      node: node()
98
    )
99

NEW
100
    Supavisor.CircuitBreaker.init()
×
101

UNCOV
102
    topologies = Application.get_env(:libcluster, :topologies) || []
×
103

104
    children = [
×
105
      Supavisor.ErlSysMon,
106
      Supavisor.Health,
107
      Supavisor.CacheRefreshLimiter,
108
      Supavisor.CircuitBreaker.Janitor,
109
      Supavisor.SecretJanitor,
110
      {Task.Supervisor, name: Supavisor.PoolTerminator},
111
      {Registry, keys: :unique, name: Supavisor.Registry.Tenants},
112
      {Registry, keys: :unique, name: Supavisor.Registry.ManagerTables},
113
      {Registry, keys: :unique, name: Supavisor.Registry.PoolPids},
114
      {Registry, keys: :duplicate, name: Supavisor.Registry.TenantSups},
115
      {Registry,
116
       keys: :duplicate,
117
       name: Supavisor.Registry.TenantClients,
118
       partitions: System.schedulers_online()},
119
      {Registry,
120
       keys: :duplicate,
121
       name: Supavisor.Registry.TenantProxyClients,
122
       partitions: System.schedulers_online()},
123
      {Cluster.Supervisor, [topologies, [name: Supavisor.ClusterSupervisor]]},
124
      Supavisor.Repo,
125
      # Start the Telemetry supervisor
126
      SupavisorWeb.Telemetry,
127
      # Start the PubSub system
128
      {Phoenix.PubSub, name: Supavisor.PubSub},
129
      {
130
        PartitionSupervisor,
131
        child_spec: DynamicSupervisor, strategy: :one_for_one, name: Supavisor.DynamicSupervisor
132
      },
133
      Supavisor.Vault,
134

135
      # Start the Endpoint (http/https)
136
      SupavisorWeb.Endpoint
137
    ]
138

139
    Logger.warning("metrics_disabled is #{inspect(@metrics_disabled)}")
×
140

141
    children =
×
142
      if @metrics_disabled do
143
        children
×
144
      else
145
        children ++ [PromEx, Supavisor.TenantsMetrics, Supavisor.MetricsCleaner]
×
146
      end
147

148
    # start Cachex only if the node uses names, this is necessary for test setup
149
    children =
×
150
      if node() != :nonode@nohost do
×
151
        [{Cachex, name: Supavisor.Cache} | children]
152
      else
153
        children
×
154
      end
155

156
    # See https://hexdocs.pm/elixir/Supervisor.html
157
    # for other strategies and supported options
158
    opts = [strategy: :one_for_one, name: Supavisor.Supervisor]
×
159
    Supervisor.start_link(children, opts)
×
160
  end
161

162
  # Tell Phoenix to update the endpoint configuration
163
  # whenever the application is updated.
164
  @impl true
165
  def config_change(changed, _new, removed) do
166
    SupavisorWeb.Endpoint.config_change(changed, removed)
×
167
    :ok
168
  end
169

170
  @spec build_shards([pos_integer()], atom()) :: term()
171
  defp build_shards(ports, mode) do
172
    for {port, shard} <- Enum.with_index(ports) do
×
173
      {{:pg_proxy_internal, mode, shard}, port, %{mode: mode, local: true, shard: shard},
174
       Supavisor.ClientHandler}
175
    end
176
  end
177

178
  @spec short_node_id() :: String.t() | nil
179
  defp short_node_id do
180
    with {:ok, fly_alloc_id} when is_binary(fly_alloc_id) <-
×
181
           Application.fetch_env(:supavisor, :fly_alloc_id),
182
         [short_alloc_id, _] <- String.split(fly_alloc_id, "-", parts: 2) do
×
183
      short_alloc_id
×
184
    else
185
      _ -> nil
186
    end
187
  end
188
end
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc