Split genserver from service
This commit is contained in:
@@ -15,7 +15,7 @@ defmodule KafkaexLagExporter.Application do
|
|||||||
{Phoenix.PubSub, name: KafkaexLagExporter.PubSub},
|
{Phoenix.PubSub, name: KafkaexLagExporter.PubSub},
|
||||||
# Start the Endpoint (http/https)
|
# Start the Endpoint (http/https)
|
||||||
KafkaexLagExporterWeb.Endpoint,
|
KafkaexLagExporterWeb.Endpoint,
|
||||||
KafkaexLagExporter.ConsumerOffsetFetcher
|
KafkaexLagExporter.ConsumerOffset
|
||||||
]
|
]
|
||||||
|
|
||||||
# See https://hexdocs.pm/elixir/Supervisor.html
|
# See https://hexdocs.pm/elixir/Supervisor.html
|
||||||
|
|||||||
41
lib/kafkaex_lag_exporter/consumer_offset.ex
Normal file
41
lib/kafkaex_lag_exporter/consumer_offset.ex
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
defmodule KafkaexLagExporter.ConsumerOffset do
|
||||||
|
@moduledoc "Genserver implementation to set offset metrics for consumer groups"
|
||||||
|
|
||||||
|
use GenServer
|
||||||
|
|
||||||
|
require Logger
|
||||||
|
|
||||||
|
@interval 5_000
|
||||||
|
|
||||||
|
def start_link(default) when is_list(default) do
|
||||||
|
GenServer.start_link(__MODULE__, default, name: __MODULE__)
|
||||||
|
end
|
||||||
|
|
||||||
|
@impl true
|
||||||
|
def init(_) do
|
||||||
|
Logger.info("Starting #{__MODULE__}")
|
||||||
|
|
||||||
|
clients = Application.get_env(:brod, :clients)
|
||||||
|
endpoints = clients[:kafka_client][:endpoints] || [{"redpanda", 29_092}]
|
||||||
|
|
||||||
|
Logger.info("Reveived Kafka endpoints: #{inspect(endpoints)}")
|
||||||
|
|
||||||
|
Process.send_after(self(), :tick, @interval)
|
||||||
|
|
||||||
|
{:ok, %{endpoints: endpoints}}
|
||||||
|
end
|
||||||
|
|
||||||
|
@impl true
|
||||||
|
def handle_info(:tick, state) do
|
||||||
|
[endpoint | _] = state.endpoints
|
||||||
|
|
||||||
|
{consumer_lags, consumer_lag_sum} = KafkaexLagExporter.ConsumerOffsetFetcher.get(endpoint)
|
||||||
|
|
||||||
|
KafkaexLagExporter.Metrics.group_lag_per_partition(endpoint, consumer_lags)
|
||||||
|
KafkaexLagExporter.Metrics.group_sum_lag(endpoint, consumer_lag_sum)
|
||||||
|
|
||||||
|
Process.send_after(self(), :tick, @interval)
|
||||||
|
|
||||||
|
{:noreply, state}
|
||||||
|
end
|
||||||
|
end
|
||||||
@@ -1,76 +1,28 @@
|
|||||||
defmodule KafkaexLagExporter.ConsumerOffsetFetcher do
|
defmodule KafkaexLagExporter.ConsumerOffsetFetcher do
|
||||||
@moduledoc "Genserver implementation to calculate summarized lag for each consumer group"
|
@moduledoc "Calculate summarized lag for each consumer group"
|
||||||
|
|
||||||
use GenServer
|
|
||||||
|
|
||||||
require Logger
|
require Logger
|
||||||
|
|
||||||
@interval 5_000
|
# TODO: change return type
|
||||||
|
@spec get(KafkaexLagExporter.KafkaWrapper.endpoint()) :: {any(), any()}
|
||||||
def start_link(default) when is_list(default) do
|
def get(endpoint) do
|
||||||
GenServer.start_link(__MODULE__, default, name: __MODULE__)
|
consumer_group_names = KafkaexLagExporter.KafkaUtils.get_consumer_group_names(endpoint)
|
||||||
end
|
|
||||||
|
|
||||||
@impl true
|
|
||||||
def init(_) do
|
|
||||||
Logger.info("Starting #{__MODULE__}")
|
|
||||||
|
|
||||||
clients = Application.get_env(:brod, :clients)
|
|
||||||
endpoints = clients[:kafka_client][:endpoints]
|
|
||||||
|
|
||||||
Logger.info("Reveived Kafka endpoints: #{inspect(endpoints)}")
|
|
||||||
|
|
||||||
Process.send_after(self(), :tick, @interval)
|
|
||||||
{:ok, %{endpoints: endpoints}}
|
|
||||||
end
|
|
||||||
|
|
||||||
@impl true
|
|
||||||
def handle_info(:tick, state) do
|
|
||||||
[endpoint | _] = state.endpoints || [{"redpanda", 29_092}]
|
|
||||||
|
|
||||||
consumer_group_names = get_consumer_group_names(endpoint)
|
|
||||||
|
|
||||||
topic_names_for_consumer_groups =
|
|
||||||
:brod.describe_groups(endpoint, [], consumer_group_names)
|
|
||||||
|> get_topic_names_for_consumer_groups
|
|
||||||
|
|
||||||
consumer_lags =
|
consumer_lags =
|
||||||
topic_names_for_consumer_groups
|
KafkaexLagExporter.KafkaUtils.topic_names_for_consumer_groups(
|
||||||
|
endpoint,
|
||||||
|
[],
|
||||||
|
consumer_group_names
|
||||||
|
)
|
||||||
|> Enum.map(fn [consumer_group, topics] ->
|
|> Enum.map(fn [consumer_group, topics] ->
|
||||||
[consumer_group, get_lag_for_consumer(consumer_group, topics)]
|
[consumer_group, get_lag_for_consumer(consumer_group, topics)]
|
||||||
end)
|
end)
|
||||||
|
|
||||||
consumer_lag_sum = get_lag_for_consumer_sum(consumer_lags)
|
consumer_lag_sum = get_lag_for_consumer_sum(consumer_lags)
|
||||||
|
|
||||||
KafkaexLagExporter.Metrics.group_lag_per_partition(endpoint, consumer_lags)
|
%{lags: consumer_lags, sum: consumer_lag_sum}
|
||||||
KafkaexLagExporter.Metrics.group_sum_lag(endpoint, consumer_lag_sum)
|
|
||||||
|
|
||||||
Process.send_after(self(), :tick, @interval)
|
|
||||||
|
|
||||||
{:noreply, state}
|
|
||||||
end
|
end
|
||||||
|
|
||||||
defp get_consumer_group_names({host, port}) do
|
|
||||||
[{_, groups} | _] = :brod.list_all_groups([{host, port}], [])
|
|
||||||
|
|
||||||
groups
|
|
||||||
|> Enum.filter(fn {_, _, protocol} -> protocol == "consumer" end)
|
|
||||||
|> Enum.map(fn {_, group_name, "consumer"} -> group_name end)
|
|
||||||
end
|
|
||||||
|
|
||||||
defp get_topic_names_for_consumer_groups({:ok, group_descriptions}) do
|
|
||||||
group_descriptions
|
|
||||||
|> Enum.map(fn %{group_id: consumer_group, members: members} -> [consumer_group, members] end)
|
|
||||||
|> Enum.map(fn [consumer_group, members] -> [consumer_group, get_topic_names(members)] end)
|
|
||||||
end
|
|
||||||
|
|
||||||
defp get_topic_names(members) do
|
|
||||||
Enum.flat_map(members, fn member ->
|
|
||||||
KafkaexLagExporter.TopicNameParser.parse_topic_names(member.member_assignment)
|
|
||||||
end)
|
|
||||||
end
|
|
||||||
|
|
||||||
# TODO: test method for multiple topics
|
|
||||||
defp get_lag_for_consumer(consumer_group, topics) do
|
defp get_lag_for_consumer(consumer_group, topics) do
|
||||||
topics
|
topics
|
||||||
|> Enum.flat_map(fn topic ->
|
|> Enum.flat_map(fn topic ->
|
||||||
@@ -78,13 +30,11 @@ defmodule KafkaexLagExporter.ConsumerOffsetFetcher do
|
|||||||
end)
|
end)
|
||||||
end
|
end
|
||||||
|
|
||||||
# TODO: test method for multiple topics
|
|
||||||
defp get_lag_for_consumer_sum(lags_per_consumer_group) do
|
defp get_lag_for_consumer_sum(lags_per_consumer_group) do
|
||||||
lags_per_consumer_group
|
lags_per_consumer_group
|
||||||
|> Enum.map(fn [topic, lag_per_partition] -> [topic, sum_topic_lag(lag_per_partition)] end)
|
|> Enum.map(fn [topic, lag_per_partition] -> [topic, sum_topic_lag(lag_per_partition, 0)] end)
|
||||||
end
|
end
|
||||||
|
|
||||||
defp sum_topic_lag(item, acc \\ 0)
|
|
||||||
defp sum_topic_lag([], acc), do: acc
|
defp sum_topic_lag([], acc), do: acc
|
||||||
defp sum_topic_lag([h | t], acc), do: sum_topic_lag(t, acc + elem(h, 1))
|
defp sum_topic_lag([h | t], acc), do: sum_topic_lag(t, acc + elem(h, 1))
|
||||||
end
|
end
|
||||||
|
|||||||
Reference in New Issue
Block a user