blob: 631a9d1fdc45121084d2ac9bdeb21e5a882346e8 [file] [log] [blame]
Serge Bazanski54e212a2023-06-14 13:45:11 +02001package metrics
2
3import (
4 "context"
5 "crypto/tls"
6 "crypto/x509"
Tim Windelschmidtb551b652023-07-17 16:01:42 +02007 "encoding/json"
Serge Bazanski54e212a2023-06-14 13:45:11 +02008 "fmt"
9 "net"
10 "net/http"
11 "os/exec"
Tim Windelschmidtb551b652023-07-17 16:01:42 +020012 "sync"
13
14 apb "source.monogon.dev/metropolis/node/core/curator/proto/api"
15 ipb "source.monogon.dev/metropolis/node/core/curator/proto/api"
16 cpb "source.monogon.dev/metropolis/proto/common"
Serge Bazanski54e212a2023-06-14 13:45:11 +020017
18 "source.monogon.dev/metropolis/node"
19 "source.monogon.dev/metropolis/node/core/identity"
Tim Windelschmidtb551b652023-07-17 16:01:42 +020020 "source.monogon.dev/metropolis/pkg/event/memory"
Serge Bazanski54e212a2023-06-14 13:45:11 +020021 "source.monogon.dev/metropolis/pkg/supervisor"
22)
23
24// Service is the Metropolis Metrics Service.
25//
26// Currently, metrics means Prometheus metrics.
27//
28// It runs a forwarding proxy from a public HTTPS listener to a number of
29// locally-running exporters, themselves listening over HTTP. The listener uses
30// the main cluster CA and the node's main certificate, authenticating incoming
31// connections with the same CA.
32//
33// Each exporter is exposed on a separate path, /metrics/<name>, where <name> is
34// the name of the exporter.
35//
36// The HTTPS listener is bound to node.MetricsPort.
37type Service struct {
38 // Credentials used to run the TLS/HTTPS listener and verify incoming
39 // connections.
40 Credentials *identity.NodeCredentials
Tim Windelschmidtb551b652023-07-17 16:01:42 +020041 // Curator is the gRPC client that the service will use to reach the cluster's
42 // Curator, for pulling a list of all nodes.
43 Curator ipb.CuratorClient
44 // LocalRoles contains the local node roles which gets listened on and
45 // is required to decide whether or not to start the discovery routine
46 LocalRoles *memory.Value[*cpb.NodeRoles]
Serge Bazanski54e212a2023-06-14 13:45:11 +020047 // List of Exporters to run and to forward HTTP requests to. If not set, defaults
48 // to DefaultExporters.
49 Exporters []Exporter
Serge Bazanski54e212a2023-06-14 13:45:11 +020050 // enableDynamicAddr enables listening on a dynamically chosen TCP port. This is
51 // used by tests to make sure we don't fail due to the default port being already
52 // in use.
53 enableDynamicAddr bool
Tim Windelschmidtb551b652023-07-17 16:01:42 +020054
Serge Bazanski54e212a2023-06-14 13:45:11 +020055 // dynamicAddr will contain the picked dynamic listen address after the service
56 // starts, if enableDynamicAddr is set.
57 dynamicAddr chan string
Tim Windelschmidtb551b652023-07-17 16:01:42 +020058 // sdResp will contain the cached sdResponse
59 sdResp sdResponse
60 // sdRespMtx is the mutex for sdResp to allow usage inside the http handler.
61 sdRespMtx sync.RWMutex
Serge Bazanski54e212a2023-06-14 13:45:11 +020062}
63
64// listen starts the public TLS listener for the service.
65func (s *Service) listen() (net.Listener, error) {
66 cert := s.Credentials.TLSCredentials()
67
68 pool := x509.NewCertPool()
69 pool.AddCert(s.Credentials.ClusterCA())
70
71 tlsc := tls.Config{
72 Certificates: []tls.Certificate{
73 cert,
74 },
75 ClientAuth: tls.RequireAndVerifyClientCert,
76 ClientCAs: pool,
77 // TODO(q3k): use VerifyPeerCertificate/VerifyConnection to check that the
78 // incoming client is allowed to access metrics. Currently we allow
79 // anyone/anything with a valid cluster certificate to access them.
80 }
81
82 addr := net.JoinHostPort("", node.MetricsPort.PortString())
83 if s.enableDynamicAddr {
84 addr = ""
85 }
86 return tls.Listen("tcp", addr, &tlsc)
87}
88
89func (s *Service) Run(ctx context.Context) error {
90 lis, err := s.listen()
91 if err != nil {
92 return fmt.Errorf("listen failed: %w", err)
93 }
94 if s.enableDynamicAddr {
95 s.dynamicAddr <- lis.Addr().String()
96 }
97
98 if s.Exporters == nil {
99 s.Exporters = DefaultExporters
100 }
101
102 // First, make sure we don't have duplicate exporters.
103 seenNames := make(map[string]bool)
104 for _, exporter := range s.Exporters {
105 if seenNames[exporter.Name] {
106 return fmt.Errorf("duplicate exporter name: %q", exporter.Name)
107 }
108 seenNames[exporter.Name] = true
109 }
110
111 // Start all exporters as sub-runnables.
112 for _, exporter := range s.Exporters {
113 cmd := exec.CommandContext(ctx, exporter.Executable, exporter.Arguments...)
114 err := supervisor.Run(ctx, exporter.Name, func(ctx context.Context) error {
115 return supervisor.RunCommand(ctx, cmd)
116 })
117 if err != nil {
118 return fmt.Errorf("running %s failed: %w", exporter.Name, err)
119 }
120
121 }
122
123 // And register all exporter forwarding functions on a mux.
124 mux := http.NewServeMux()
125 logger := supervisor.Logger(ctx)
126 for _, exporter := range s.Exporters {
127 exporter := exporter
128
129 mux.HandleFunc(exporter.externalPath(), func(w http.ResponseWriter, r *http.Request) {
130 exporter.forward(logger, w, r)
131 })
132
133 logger.Infof("Registered exporter %q", exporter.Name)
134 }
135
Tim Windelschmidtb551b652023-07-17 16:01:42 +0200136 // And register a http_sd discovery endpoint.
137 mux.HandleFunc("/discovery", s.handleDiscovery)
138
139 if err := supervisor.Run(ctx, "watch-roles", s.watchRoles); err != nil {
140 return err
141 }
Serge Bazanski54e212a2023-06-14 13:45:11 +0200142 supervisor.Signal(ctx, supervisor.SignalHealthy)
143
144 // Start forwarding server.
145 srv := http.Server{
146 Handler: mux,
147 BaseContext: func(_ net.Listener) context.Context {
148 return ctx
149 },
150 }
151
152 go func() {
153 <-ctx.Done()
154 srv.Close()
155 }()
156
157 err = srv.Serve(lis)
158 if err != nil && ctx.Err() != nil {
159 return ctx.Err()
160 }
161 return fmt.Errorf("Serve: %w", err)
162}
163
Tim Windelschmidtb551b652023-07-17 16:01:42 +0200164func shouldStartDiscovery(nr *cpb.NodeRoles) bool {
165 return nr.ConsensusMember != nil
166}
167
168func (s *Service) watchRoles(ctx context.Context) error {
169 w := s.LocalRoles.Watch()
170 defer w.Close()
171
172 r, err := w.Get(ctx)
173 if err != nil {
174 return err
175 }
176
177 if shouldStartDiscovery(r) {
178 supervisor.Logger(ctx).Infof("Starting discovery endpoint")
179 if err := supervisor.Run(ctx, "watch", s.watch); err != nil {
180 return err
181 }
182 }
183
184 for {
185 nr, err := w.Get(ctx)
186 if err != nil {
187 return err
188 }
189
190 if shouldStartDiscovery(r) != shouldStartDiscovery(nr) {
191 s.sdRespMtx.Lock()
192 // disable the metrics endpoint until the new routine takes over
193 s.sdResp = nil
194 s.sdRespMtx.Unlock()
195
196 supervisor.Logger(ctx).Infof("Discovery endpoint config changed, restarting")
197 return fmt.Errorf("restarting")
198 }
199 }
200
201}
202
203// watch is the sub-runnable responsible for fetching node updates.
204func (s *Service) watch(ctx context.Context) error {
205 supervisor.Signal(ctx, supervisor.SignalHealthy)
206
207 srv, err := s.Curator.Watch(ctx, &apb.WatchRequest{
208 Kind: &apb.WatchRequest_NodesInCluster_{
209 NodesInCluster: &apb.WatchRequest_NodesInCluster{},
210 },
211 })
212 if err != nil {
213 return fmt.Errorf("curator watch failed: %w", err)
214 }
215 defer srv.CloseSend()
216
217 nodes := make(map[string]*apb.Node)
218 for {
219 ev, err := srv.Recv()
220 if err != nil {
221 return fmt.Errorf("curator watch recv failed: %w", err)
222 }
223
224 for _, n := range ev.Nodes {
225 nodes[n.Id] = n
226 }
227
228 for _, t := range ev.NodeTombstones {
229 n, ok := nodes[t.NodeId]
230 if !ok {
231 // This is an indication of us losing data somehow. If this happens, it likely
232 // means a Curator bug.
233 supervisor.Logger(ctx).Warningf("Node %s: tombstone for unknown node", t.NodeId)
234 continue
235 }
236 delete(nodes, n.Id)
237 }
238
239 s.sdRespMtx.Lock()
240
241 // reset the existing response slice
242 s.sdResp = s.sdResp[:0]
243 for _, n := range nodes {
244 // Only care about nodes that have all required configuration set.
245 if n.Status == nil || n.Status.ExternalAddress == "" || n.Roles == nil {
246 continue
247 }
248
249 s.sdResp = append(s.sdResp, sdTarget{
250 Targets: []string{n.Status.ExternalAddress},
251 Labels: map[string]string{
252 "kubernetes_worker": fmt.Sprintf("%t", n.Roles.KubernetesWorker != nil),
253 "consensus_member": fmt.Sprintf("%t", n.Roles.ConsensusMember != nil),
254 "kubernetes_controller": fmt.Sprintf("%t", n.Roles.KubernetesController != nil),
255 },
256 })
257 }
258
259 s.sdRespMtx.Unlock()
260 }
261}
262
263func (s *Service) handleDiscovery(w http.ResponseWriter, _ *http.Request) {
264 s.sdRespMtx.RLock()
265 defer s.sdRespMtx.RUnlock()
266
267 // If sdResp is nil, which only happens if we are not a master node
268 // or we are still booting, we respond with NotImplemented.
269 if s.sdResp == nil {
270 w.WriteHeader(http.StatusNotImplemented)
271 return
272 }
273
274 w.Header().Set("Content-Type", "application/json")
275 w.WriteHeader(http.StatusOK)
276
277 if err := json.NewEncoder(w).Encode(s.sdResp); err != nil {
278 // If the encoder fails its mostly because of closed connections
279 // so lets just ignore these errors.
280 return
281 }
282}
283
284type sdResponse []sdTarget
285
Serge Bazanski54e212a2023-06-14 13:45:11 +0200286type sdTarget struct {
Tim Windelschmidtb551b652023-07-17 16:01:42 +0200287 Targets []string `json:"targets"`
288 Labels map[string]string `json:"labels"`
Serge Bazanski54e212a2023-06-14 13:45:11 +0200289}