blob: e087adaf7f4ef916b31cc69d009ba5e3798fedf9 [file] [log] [blame]
Serge Bazanski54e212a2023-06-14 13:45:11 +02001package metrics
2
3import (
4 "context"
5 "crypto/tls"
6 "crypto/x509"
Tim Windelschmidtb551b652023-07-17 16:01:42 +02007 "encoding/json"
Serge Bazanski54e212a2023-06-14 13:45:11 +02008 "fmt"
9 "net"
10 "net/http"
11 "os/exec"
Tim Windelschmidtb551b652023-07-17 16:01:42 +020012 "sync"
13
14 apb "source.monogon.dev/metropolis/node/core/curator/proto/api"
15 ipb "source.monogon.dev/metropolis/node/core/curator/proto/api"
16 cpb "source.monogon.dev/metropolis/proto/common"
Serge Bazanski54e212a2023-06-14 13:45:11 +020017
18 "source.monogon.dev/metropolis/node"
19 "source.monogon.dev/metropolis/node/core/identity"
Tim Windelschmidtb551b652023-07-17 16:01:42 +020020 "source.monogon.dev/metropolis/pkg/event/memory"
Serge Bazanski54e212a2023-06-14 13:45:11 +020021 "source.monogon.dev/metropolis/pkg/supervisor"
22)
23
24// Service is the Metropolis Metrics Service.
25//
26// Currently, metrics means Prometheus metrics.
27//
28// It runs a forwarding proxy from a public HTTPS listener to a number of
29// locally-running exporters, themselves listening over HTTP. The listener uses
30// the main cluster CA and the node's main certificate, authenticating incoming
31// connections with the same CA.
32//
33// Each exporter is exposed on a separate path, /metrics/<name>, where <name> is
34// the name of the exporter.
35//
36// The HTTPS listener is bound to node.MetricsPort.
37type Service struct {
38 // Credentials used to run the TLS/HTTPS listener and verify incoming
39 // connections.
40 Credentials *identity.NodeCredentials
Tim Windelschmidtb551b652023-07-17 16:01:42 +020041 // Curator is the gRPC client that the service will use to reach the cluster's
42 // Curator, for pulling a list of all nodes.
43 Curator ipb.CuratorClient
44 // LocalRoles contains the local node roles which gets listened on and
45 // is required to decide whether or not to start the discovery routine
46 LocalRoles *memory.Value[*cpb.NodeRoles]
Tim Windelschmidtfd49f222023-07-20 14:27:50 +020047 // KubeTLSConfig provides the tls.Config for authenticating against kubernetes
48 // services.
49 KubeTLSConfig *tls.Config
50
Serge Bazanski54e212a2023-06-14 13:45:11 +020051 // List of Exporters to run and to forward HTTP requests to. If not set, defaults
52 // to DefaultExporters.
53 Exporters []Exporter
Serge Bazanski54e212a2023-06-14 13:45:11 +020054 // enableDynamicAddr enables listening on a dynamically chosen TCP port. This is
55 // used by tests to make sure we don't fail due to the default port being already
56 // in use.
57 enableDynamicAddr bool
Tim Windelschmidtb551b652023-07-17 16:01:42 +020058
Serge Bazanski54e212a2023-06-14 13:45:11 +020059 // dynamicAddr will contain the picked dynamic listen address after the service
60 // starts, if enableDynamicAddr is set.
61 dynamicAddr chan string
Tim Windelschmidtb551b652023-07-17 16:01:42 +020062 // sdResp will contain the cached sdResponse
63 sdResp sdResponse
64 // sdRespMtx is the mutex for sdResp to allow usage inside the http handler.
65 sdRespMtx sync.RWMutex
Serge Bazanski54e212a2023-06-14 13:45:11 +020066}
67
68// listen starts the public TLS listener for the service.
69func (s *Service) listen() (net.Listener, error) {
70 cert := s.Credentials.TLSCredentials()
71
72 pool := x509.NewCertPool()
73 pool.AddCert(s.Credentials.ClusterCA())
74
75 tlsc := tls.Config{
76 Certificates: []tls.Certificate{
77 cert,
78 },
79 ClientAuth: tls.RequireAndVerifyClientCert,
80 ClientCAs: pool,
81 // TODO(q3k): use VerifyPeerCertificate/VerifyConnection to check that the
82 // incoming client is allowed to access metrics. Currently we allow
83 // anyone/anything with a valid cluster certificate to access them.
84 }
85
86 addr := net.JoinHostPort("", node.MetricsPort.PortString())
87 if s.enableDynamicAddr {
88 addr = ""
89 }
90 return tls.Listen("tcp", addr, &tlsc)
91}
92
93func (s *Service) Run(ctx context.Context) error {
94 lis, err := s.listen()
95 if err != nil {
96 return fmt.Errorf("listen failed: %w", err)
97 }
98 if s.enableDynamicAddr {
99 s.dynamicAddr <- lis.Addr().String()
100 }
101
102 if s.Exporters == nil {
103 s.Exporters = DefaultExporters
104 }
105
106 // First, make sure we don't have duplicate exporters.
107 seenNames := make(map[string]bool)
108 for _, exporter := range s.Exporters {
109 if seenNames[exporter.Name] {
110 return fmt.Errorf("duplicate exporter name: %q", exporter.Name)
111 }
112 seenNames[exporter.Name] = true
113 }
114
115 // Start all exporters as sub-runnables.
116 for _, exporter := range s.Exporters {
Tim Windelschmidte5abee62023-07-19 16:33:36 +0200117 if exporter.Executable == "" {
118 continue
119 }
120
Serge Bazanski54e212a2023-06-14 13:45:11 +0200121 cmd := exec.CommandContext(ctx, exporter.Executable, exporter.Arguments...)
122 err := supervisor.Run(ctx, exporter.Name, func(ctx context.Context) error {
123 return supervisor.RunCommand(ctx, cmd)
124 })
125 if err != nil {
126 return fmt.Errorf("running %s failed: %w", exporter.Name, err)
127 }
Serge Bazanski54e212a2023-06-14 13:45:11 +0200128 }
129
130 // And register all exporter forwarding functions on a mux.
131 mux := http.NewServeMux()
132 logger := supervisor.Logger(ctx)
133 for _, exporter := range s.Exporters {
134 exporter := exporter
135
136 mux.HandleFunc(exporter.externalPath(), func(w http.ResponseWriter, r *http.Request) {
Tim Windelschmidtfd49f222023-07-20 14:27:50 +0200137 exporter.forward(s, logger, w, r)
Serge Bazanski54e212a2023-06-14 13:45:11 +0200138 })
139
140 logger.Infof("Registered exporter %q", exporter.Name)
141 }
142
Tim Windelschmidtb551b652023-07-17 16:01:42 +0200143 // And register a http_sd discovery endpoint.
144 mux.HandleFunc("/discovery", s.handleDiscovery)
145
146 if err := supervisor.Run(ctx, "watch-roles", s.watchRoles); err != nil {
147 return err
148 }
Serge Bazanski54e212a2023-06-14 13:45:11 +0200149 supervisor.Signal(ctx, supervisor.SignalHealthy)
150
151 // Start forwarding server.
152 srv := http.Server{
153 Handler: mux,
154 BaseContext: func(_ net.Listener) context.Context {
155 return ctx
156 },
157 }
158
159 go func() {
160 <-ctx.Done()
161 srv.Close()
162 }()
163
164 err = srv.Serve(lis)
165 if err != nil && ctx.Err() != nil {
166 return ctx.Err()
167 }
168 return fmt.Errorf("Serve: %w", err)
169}
170
Tim Windelschmidtb551b652023-07-17 16:01:42 +0200171func shouldStartDiscovery(nr *cpb.NodeRoles) bool {
172 return nr.ConsensusMember != nil
173}
174
175func (s *Service) watchRoles(ctx context.Context) error {
176 w := s.LocalRoles.Watch()
177 defer w.Close()
178
179 r, err := w.Get(ctx)
180 if err != nil {
181 return err
182 }
183
184 if shouldStartDiscovery(r) {
185 supervisor.Logger(ctx).Infof("Starting discovery endpoint")
186 if err := supervisor.Run(ctx, "watch", s.watch); err != nil {
187 return err
188 }
189 }
190
191 for {
192 nr, err := w.Get(ctx)
193 if err != nil {
194 return err
195 }
196
197 if shouldStartDiscovery(r) != shouldStartDiscovery(nr) {
198 s.sdRespMtx.Lock()
199 // disable the metrics endpoint until the new routine takes over
200 s.sdResp = nil
201 s.sdRespMtx.Unlock()
202
203 supervisor.Logger(ctx).Infof("Discovery endpoint config changed, restarting")
204 return fmt.Errorf("restarting")
205 }
206 }
207
208}
209
210// watch is the sub-runnable responsible for fetching node updates.
211func (s *Service) watch(ctx context.Context) error {
212 supervisor.Signal(ctx, supervisor.SignalHealthy)
213
214 srv, err := s.Curator.Watch(ctx, &apb.WatchRequest{
215 Kind: &apb.WatchRequest_NodesInCluster_{
216 NodesInCluster: &apb.WatchRequest_NodesInCluster{},
217 },
218 })
219 if err != nil {
220 return fmt.Errorf("curator watch failed: %w", err)
221 }
222 defer srv.CloseSend()
223
224 nodes := make(map[string]*apb.Node)
225 for {
226 ev, err := srv.Recv()
227 if err != nil {
228 return fmt.Errorf("curator watch recv failed: %w", err)
229 }
230
231 for _, n := range ev.Nodes {
232 nodes[n.Id] = n
233 }
234
235 for _, t := range ev.NodeTombstones {
236 n, ok := nodes[t.NodeId]
237 if !ok {
238 // This is an indication of us losing data somehow. If this happens, it likely
239 // means a Curator bug.
240 supervisor.Logger(ctx).Warningf("Node %s: tombstone for unknown node", t.NodeId)
241 continue
242 }
243 delete(nodes, n.Id)
244 }
245
246 s.sdRespMtx.Lock()
247
248 // reset the existing response slice
249 s.sdResp = s.sdResp[:0]
250 for _, n := range nodes {
251 // Only care about nodes that have all required configuration set.
252 if n.Status == nil || n.Status.ExternalAddress == "" || n.Roles == nil {
253 continue
254 }
255
256 s.sdResp = append(s.sdResp, sdTarget{
257 Targets: []string{n.Status.ExternalAddress},
258 Labels: map[string]string{
Tim Windelschmidt4c6720d2023-07-25 14:44:19 +0000259 "__meta_metropolis_role_kubernetes_worker": fmt.Sprintf("%t", n.Roles.KubernetesWorker != nil),
260 "__meta_metropolis_role_kubernetes_controller": fmt.Sprintf("%t", n.Roles.KubernetesController != nil),
261 "__meta_metropolis_role_consensus_member": fmt.Sprintf("%t", n.Roles.ConsensusMember != nil),
Tim Windelschmidtb551b652023-07-17 16:01:42 +0200262 },
263 })
264 }
265
266 s.sdRespMtx.Unlock()
267 }
268}
269
270func (s *Service) handleDiscovery(w http.ResponseWriter, _ *http.Request) {
271 s.sdRespMtx.RLock()
272 defer s.sdRespMtx.RUnlock()
273
274 // If sdResp is nil, which only happens if we are not a master node
275 // or we are still booting, we respond with NotImplemented.
276 if s.sdResp == nil {
277 w.WriteHeader(http.StatusNotImplemented)
278 return
279 }
280
281 w.Header().Set("Content-Type", "application/json")
282 w.WriteHeader(http.StatusOK)
283
284 if err := json.NewEncoder(w).Encode(s.sdResp); err != nil {
285 // If the encoder fails its mostly because of closed connections
286 // so lets just ignore these errors.
287 return
288 }
289}
290
291type sdResponse []sdTarget
292
Serge Bazanski54e212a2023-06-14 13:45:11 +0200293type sdTarget struct {
Tim Windelschmidtb551b652023-07-17 16:01:42 +0200294 Targets []string `json:"targets"`
295 Labels map[string]string `json:"labels"`
Serge Bazanski54e212a2023-06-14 13:45:11 +0200296}