blob: 7126459a4b14d401da8dbaa29fbadcc2ea0e9cad [file] [log] [blame]
Serge Bazanski54e212a2023-06-14 13:45:11 +02001package metrics
2
3import (
4 "context"
5 "crypto/tls"
6 "crypto/x509"
Tim Windelschmidtb551b652023-07-17 16:01:42 +02007 "encoding/json"
Serge Bazanski54e212a2023-06-14 13:45:11 +02008 "fmt"
9 "net"
10 "net/http"
11 "os/exec"
Tim Windelschmidtb551b652023-07-17 16:01:42 +020012 "sync"
13
14 apb "source.monogon.dev/metropolis/node/core/curator/proto/api"
15 ipb "source.monogon.dev/metropolis/node/core/curator/proto/api"
16 cpb "source.monogon.dev/metropolis/proto/common"
Serge Bazanski54e212a2023-06-14 13:45:11 +020017
18 "source.monogon.dev/metropolis/node"
19 "source.monogon.dev/metropolis/node/core/identity"
Tim Windelschmidtb551b652023-07-17 16:01:42 +020020 "source.monogon.dev/metropolis/pkg/event/memory"
Serge Bazanski54e212a2023-06-14 13:45:11 +020021 "source.monogon.dev/metropolis/pkg/supervisor"
22)
23
24// Service is the Metropolis Metrics Service.
25//
26// Currently, metrics means Prometheus metrics.
27//
28// It runs a forwarding proxy from a public HTTPS listener to a number of
29// locally-running exporters, themselves listening over HTTP. The listener uses
30// the main cluster CA and the node's main certificate, authenticating incoming
31// connections with the same CA.
32//
33// Each exporter is exposed on a separate path, /metrics/<name>, where <name> is
34// the name of the exporter.
35//
36// The HTTPS listener is bound to node.MetricsPort.
37type Service struct {
38 // Credentials used to run the TLS/HTTPS listener and verify incoming
39 // connections.
40 Credentials *identity.NodeCredentials
Tim Windelschmidtb551b652023-07-17 16:01:42 +020041 // Curator is the gRPC client that the service will use to reach the cluster's
42 // Curator, for pulling a list of all nodes.
43 Curator ipb.CuratorClient
44 // LocalRoles contains the local node roles which gets listened on and
45 // is required to decide whether or not to start the discovery routine
46 LocalRoles *memory.Value[*cpb.NodeRoles]
Serge Bazanski54e212a2023-06-14 13:45:11 +020047 // List of Exporters to run and to forward HTTP requests to. If not set, defaults
48 // to DefaultExporters.
49 Exporters []Exporter
Serge Bazanski54e212a2023-06-14 13:45:11 +020050 // enableDynamicAddr enables listening on a dynamically chosen TCP port. This is
51 // used by tests to make sure we don't fail due to the default port being already
52 // in use.
53 enableDynamicAddr bool
Tim Windelschmidtb551b652023-07-17 16:01:42 +020054
Serge Bazanski54e212a2023-06-14 13:45:11 +020055 // dynamicAddr will contain the picked dynamic listen address after the service
56 // starts, if enableDynamicAddr is set.
57 dynamicAddr chan string
Tim Windelschmidtb551b652023-07-17 16:01:42 +020058 // sdResp will contain the cached sdResponse
59 sdResp sdResponse
60 // sdRespMtx is the mutex for sdResp to allow usage inside the http handler.
61 sdRespMtx sync.RWMutex
Serge Bazanski54e212a2023-06-14 13:45:11 +020062}
63
64// listen starts the public TLS listener for the service.
65func (s *Service) listen() (net.Listener, error) {
66 cert := s.Credentials.TLSCredentials()
67
68 pool := x509.NewCertPool()
69 pool.AddCert(s.Credentials.ClusterCA())
70
71 tlsc := tls.Config{
72 Certificates: []tls.Certificate{
73 cert,
74 },
75 ClientAuth: tls.RequireAndVerifyClientCert,
76 ClientCAs: pool,
77 // TODO(q3k): use VerifyPeerCertificate/VerifyConnection to check that the
78 // incoming client is allowed to access metrics. Currently we allow
79 // anyone/anything with a valid cluster certificate to access them.
80 }
81
82 addr := net.JoinHostPort("", node.MetricsPort.PortString())
83 if s.enableDynamicAddr {
84 addr = ""
85 }
86 return tls.Listen("tcp", addr, &tlsc)
87}
88
89func (s *Service) Run(ctx context.Context) error {
90 lis, err := s.listen()
91 if err != nil {
92 return fmt.Errorf("listen failed: %w", err)
93 }
94 if s.enableDynamicAddr {
95 s.dynamicAddr <- lis.Addr().String()
96 }
97
98 if s.Exporters == nil {
99 s.Exporters = DefaultExporters
100 }
101
102 // First, make sure we don't have duplicate exporters.
103 seenNames := make(map[string]bool)
104 for _, exporter := range s.Exporters {
105 if seenNames[exporter.Name] {
106 return fmt.Errorf("duplicate exporter name: %q", exporter.Name)
107 }
108 seenNames[exporter.Name] = true
109 }
110
111 // Start all exporters as sub-runnables.
112 for _, exporter := range s.Exporters {
Tim Windelschmidte5abee62023-07-19 16:33:36 +0200113 if exporter.Executable == "" {
114 continue
115 }
116
Serge Bazanski54e212a2023-06-14 13:45:11 +0200117 cmd := exec.CommandContext(ctx, exporter.Executable, exporter.Arguments...)
118 err := supervisor.Run(ctx, exporter.Name, func(ctx context.Context) error {
119 return supervisor.RunCommand(ctx, cmd)
120 })
121 if err != nil {
122 return fmt.Errorf("running %s failed: %w", exporter.Name, err)
123 }
Serge Bazanski54e212a2023-06-14 13:45:11 +0200124 }
125
126 // And register all exporter forwarding functions on a mux.
127 mux := http.NewServeMux()
128 logger := supervisor.Logger(ctx)
129 for _, exporter := range s.Exporters {
130 exporter := exporter
131
132 mux.HandleFunc(exporter.externalPath(), func(w http.ResponseWriter, r *http.Request) {
133 exporter.forward(logger, w, r)
134 })
135
136 logger.Infof("Registered exporter %q", exporter.Name)
137 }
138
Tim Windelschmidtb551b652023-07-17 16:01:42 +0200139 // And register a http_sd discovery endpoint.
140 mux.HandleFunc("/discovery", s.handleDiscovery)
141
142 if err := supervisor.Run(ctx, "watch-roles", s.watchRoles); err != nil {
143 return err
144 }
Serge Bazanski54e212a2023-06-14 13:45:11 +0200145 supervisor.Signal(ctx, supervisor.SignalHealthy)
146
147 // Start forwarding server.
148 srv := http.Server{
149 Handler: mux,
150 BaseContext: func(_ net.Listener) context.Context {
151 return ctx
152 },
153 }
154
155 go func() {
156 <-ctx.Done()
157 srv.Close()
158 }()
159
160 err = srv.Serve(lis)
161 if err != nil && ctx.Err() != nil {
162 return ctx.Err()
163 }
164 return fmt.Errorf("Serve: %w", err)
165}
166
Tim Windelschmidtb551b652023-07-17 16:01:42 +0200167func shouldStartDiscovery(nr *cpb.NodeRoles) bool {
168 return nr.ConsensusMember != nil
169}
170
171func (s *Service) watchRoles(ctx context.Context) error {
172 w := s.LocalRoles.Watch()
173 defer w.Close()
174
175 r, err := w.Get(ctx)
176 if err != nil {
177 return err
178 }
179
180 if shouldStartDiscovery(r) {
181 supervisor.Logger(ctx).Infof("Starting discovery endpoint")
182 if err := supervisor.Run(ctx, "watch", s.watch); err != nil {
183 return err
184 }
185 }
186
187 for {
188 nr, err := w.Get(ctx)
189 if err != nil {
190 return err
191 }
192
193 if shouldStartDiscovery(r) != shouldStartDiscovery(nr) {
194 s.sdRespMtx.Lock()
195 // disable the metrics endpoint until the new routine takes over
196 s.sdResp = nil
197 s.sdRespMtx.Unlock()
198
199 supervisor.Logger(ctx).Infof("Discovery endpoint config changed, restarting")
200 return fmt.Errorf("restarting")
201 }
202 }
203
204}
205
206// watch is the sub-runnable responsible for fetching node updates.
207func (s *Service) watch(ctx context.Context) error {
208 supervisor.Signal(ctx, supervisor.SignalHealthy)
209
210 srv, err := s.Curator.Watch(ctx, &apb.WatchRequest{
211 Kind: &apb.WatchRequest_NodesInCluster_{
212 NodesInCluster: &apb.WatchRequest_NodesInCluster{},
213 },
214 })
215 if err != nil {
216 return fmt.Errorf("curator watch failed: %w", err)
217 }
218 defer srv.CloseSend()
219
220 nodes := make(map[string]*apb.Node)
221 for {
222 ev, err := srv.Recv()
223 if err != nil {
224 return fmt.Errorf("curator watch recv failed: %w", err)
225 }
226
227 for _, n := range ev.Nodes {
228 nodes[n.Id] = n
229 }
230
231 for _, t := range ev.NodeTombstones {
232 n, ok := nodes[t.NodeId]
233 if !ok {
234 // This is an indication of us losing data somehow. If this happens, it likely
235 // means a Curator bug.
236 supervisor.Logger(ctx).Warningf("Node %s: tombstone for unknown node", t.NodeId)
237 continue
238 }
239 delete(nodes, n.Id)
240 }
241
242 s.sdRespMtx.Lock()
243
244 // reset the existing response slice
245 s.sdResp = s.sdResp[:0]
246 for _, n := range nodes {
247 // Only care about nodes that have all required configuration set.
248 if n.Status == nil || n.Status.ExternalAddress == "" || n.Roles == nil {
249 continue
250 }
251
252 s.sdResp = append(s.sdResp, sdTarget{
253 Targets: []string{n.Status.ExternalAddress},
254 Labels: map[string]string{
Tim Windelschmidt4c6720d2023-07-25 14:44:19 +0000255 "__meta_metropolis_role_kubernetes_worker": fmt.Sprintf("%t", n.Roles.KubernetesWorker != nil),
256 "__meta_metropolis_role_kubernetes_controller": fmt.Sprintf("%t", n.Roles.KubernetesController != nil),
257 "__meta_metropolis_role_consensus_member": fmt.Sprintf("%t", n.Roles.ConsensusMember != nil),
Tim Windelschmidtb551b652023-07-17 16:01:42 +0200258 },
259 })
260 }
261
262 s.sdRespMtx.Unlock()
263 }
264}
265
266func (s *Service) handleDiscovery(w http.ResponseWriter, _ *http.Request) {
267 s.sdRespMtx.RLock()
268 defer s.sdRespMtx.RUnlock()
269
270 // If sdResp is nil, which only happens if we are not a master node
271 // or we are still booting, we respond with NotImplemented.
272 if s.sdResp == nil {
273 w.WriteHeader(http.StatusNotImplemented)
274 return
275 }
276
277 w.Header().Set("Content-Type", "application/json")
278 w.WriteHeader(http.StatusOK)
279
280 if err := json.NewEncoder(w).Encode(s.sdResp); err != nil {
281 // If the encoder fails its mostly because of closed connections
282 // so lets just ignore these errors.
283 return
284 }
285}
286
287type sdResponse []sdTarget
288
Serge Bazanski54e212a2023-06-14 13:45:11 +0200289type sdTarget struct {
Tim Windelschmidtb551b652023-07-17 16:01:42 +0200290 Targets []string `json:"targets"`
291 Labels map[string]string `json:"labels"`
Serge Bazanski54e212a2023-06-14 13:45:11 +0200292}