blob: e749f7bb36d3cfc7a222ffdc87a9fc7fdc037916 [file] [log] [blame]
Hendrik Hofstadt0d7c91e2019-10-23 21:44:47 +02001// Copyright 2020 The Monogon Project Authors.
2//
3// SPDX-License-Identifier: Apache-2.0
4//
5// Licensed under the Apache License, Version 2.0 (the "License");
6// you may not use this file except in compliance with the License.
7// You may obtain a copy of the License at
8//
9// http://www.apache.org/licenses/LICENSE-2.0
10//
11// Unless required by applicable law or agreed to in writing, software
12// distributed under the License is distributed on an "AS IS" BASIS,
13// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14// See the License for the specific language governing permissions and
15// limitations under the License.
16
Serge Bazanskif05e80a2021-10-12 11:53:34 +020017// Package consensus implements a runnable that manages an etcd instance which
18// forms part of a Metropolis etcd cluster. This cluster is a foundational
19// building block of Metropolis and its startup/management sequencing needs to
20// be as robust as possible.
Serge Bazanskicb883e22020-07-06 17:47:55 +020021//
Serge Bazanskif05e80a2021-10-12 11:53:34 +020022// Cluster Structure
Serge Bazanskicb883e22020-07-06 17:47:55 +020023//
Serge Bazanskif05e80a2021-10-12 11:53:34 +020024// Each etcd instance listens for two kinds of traffic:
Serge Bazanskicb883e22020-07-06 17:47:55 +020025//
Serge Bazanskif05e80a2021-10-12 11:53:34 +020026// 1. Peer traffic over TLS on a TCP port of the node's main interface. This is
27// where other etcd instances connect to to exchange peer traffic, perform
28// transactions and build quorum. The TLS credentials are stored in a PKI that
29// is managed internally by the consensus runnable, with its state stored in
30// etcd itself.
31//
32// 2. Client traffic over a local domain socket, with access control based on
33// standard Linux user/group permissions. Currently this allows any code running
34// as root on the host namespace full access to the etcd cluster.
35//
36// This means that if code running on a node wishes to perform etcd
37// transactions, it must also run an etcd instance. This colocation of all
38// direct etcd access and the etcd intances themselves effectively delegate all
39// Metropolis control plane functionality to whatever subset of nodes is running
40// consensus and all codes that connects to etcd directly (the Curator).
41//
42// For example, if nodes foo and bar are parts of the control plane, but node
43// worker is not:
44//
45// .---------------------.
46// | node-foo |
47// |---------------------|
48// | .--------------------.
49// | | etcd |<---etcd/TLS--. (node.ConsensusPort)
50// | '--------------------' |
51// | ^ Domain Socket | |
52// | | etcd/plain | |
53// | .--------------------. |
54// | | curator |<---gRPC/TLS----. (node.CuratorServicePort)
55// | '--------------------' | |
56// | ^ Domain Socket | | |
57// | | gRPC/plain | | |
58// | .-----------------. | | |
59// | | node logic | | | |
60// | '-----------------' | | |
61// '---------------------' | |
62// | |
63// .---------------------. | |
64// | node-baz | | |
65// |---------------------| | |
66// | .--------------------. | |
67// | | etcd |<-------------' |
68// | '--------------------' |
69// | ^ Domain Socket | |
70// | | gRPC/plain | |
71// | .--------------------. |
72// | | curator |<---gRPC/TLS----:
73// | '--------------------' |
74// | ... | |
75// '---------------------' |
76// |
77// .---------------------. |
78// | node-worker | |
79// |---------------------| |
80// | .-----------------. | |
81// | | node logic |-------------------'
82// | '-----------------' |
83// '---------------------'
84//
85
Hendrik Hofstadt0d7c91e2019-10-23 21:44:47 +020086package consensus
87
88import (
89 "context"
Serge Bazanskif05e80a2021-10-12 11:53:34 +020090 "crypto/ed25519"
91 "crypto/x509"
92 "crypto/x509/pkix"
Lorenz Bruna6223792023-07-31 17:13:11 +020093 "errors"
Hendrik Hofstadt0d7c91e2019-10-23 21:44:47 +020094 "fmt"
Serge Bazanskif05e80a2021-10-12 11:53:34 +020095 "math/big"
Serge Bazanskic1cb37c2023-03-16 17:54:33 +010096 "net"
97 "net/url"
Lorenz Bruna4ea9d02019-10-31 11:40:30 +010098 "time"
99
Lorenz Brund13c1c62022-03-30 19:58:58 +0200100 clientv3 "go.etcd.io/etcd/client/v3"
101 "go.etcd.io/etcd/server/v3/embed"
Hendrik Hofstadt8efe51e2020-02-28 12:53:41 +0100102
Serge Bazanskia105db52021-04-12 19:57:46 +0200103 "source.monogon.dev/metropolis/node/core/consensus/client"
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200104 "source.monogon.dev/metropolis/node/core/identity"
Tim Windelschmidt9f21f532024-05-07 15:14:20 +0200105 "source.monogon.dev/osbase/event"
106 "source.monogon.dev/osbase/event/memory"
107 "source.monogon.dev/osbase/logtree/unraw"
108 "source.monogon.dev/osbase/pki"
109 "source.monogon.dev/osbase/supervisor"
Hendrik Hofstadt0d7c91e2019-10-23 21:44:47 +0200110)
111
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200112var (
113 pkiNamespace = pki.Namespaced("/pki/")
Hendrik Hofstadt0d7c91e2019-10-23 21:44:47 +0200114)
115
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200116func pkiCA() *pki.Certificate {
117 return &pki.Certificate{
118 Name: "CA",
119 Namespace: &pkiNamespace,
120 Issuer: pki.SelfSigned,
121 Template: x509.Certificate{
122 SerialNumber: big.NewInt(1),
123 Subject: pkix.Name{
124 CommonName: "Metropolis etcd CA Certificate",
125 },
126 IsCA: true,
127 KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign | x509.KeyUsageDigitalSignature,
128 ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageOCSPSigning},
129 },
130 }
131}
132
133func pkiPeerCertificate(pubkey ed25519.PublicKey, extraNames []string) x509.Certificate {
134 return x509.Certificate{
135 Subject: pkix.Name{
136 CommonName: identity.NodeID(pubkey),
137 },
138 KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment,
139 ExtKeyUsage: []x509.ExtKeyUsage{
140 x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth,
141 },
142 DNSNames: append(extraNames, identity.NodeID(pubkey)),
143 }
144}
145
146// Service is the etcd cluster member service. See package-level documentation
147// for more information.
Serge Bazanskicb883e22020-07-06 17:47:55 +0200148type Service struct {
Serge Bazanskicb883e22020-07-06 17:47:55 +0200149 config *Config
Lorenz Bruna4ea9d02019-10-31 11:40:30 +0100150
Serge Bazanski37110c32023-03-01 13:57:27 +0000151 value memory.Value[*Status]
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200152 ca *pki.Certificate
Serge Bazanskicb883e22020-07-06 17:47:55 +0200153}
Hendrik Hofstadt0d7c91e2019-10-23 21:44:47 +0200154
Serge Bazanskicb883e22020-07-06 17:47:55 +0200155func New(config Config) *Service {
156 return &Service{
Hendrik Hofstadt0d7c91e2019-10-23 21:44:47 +0200157 config: &config,
158 }
Hendrik Hofstadt0d7c91e2019-10-23 21:44:47 +0200159}
160
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200161// Run is a Supervisor runnable that starts the etcd member service. It will
162// become healthy once the member joins the cluster successfully.
163func (s *Service) Run(ctx context.Context) error {
164 // Always re-create CA to make sure we don't have PKI state from previous runs.
165 //
166 // TODO(q3k): make the PKI library immune to this misuse.
167 s.ca = pkiCA()
Lorenz Brun52f7f292020-06-24 16:42:02 +0200168
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200169 // Create log converter. This will ingest etcd logs and pipe them out to this
170 // runnable's leveled logging facilities.
Serge Bazanskic1cb37c2023-03-16 17:54:33 +0100171
172 // This is not where etcd will run, but where its log ingestion machinery lives.
173 // This ensures that the (annoying verbose) etcd logs are contained into just
174 // .etcd.
175 err := supervisor.Run(ctx, "etcd", func(ctx context.Context) error {
176 converter := unraw.Converter{
177 Parser: parseEtcdLogEntry,
178 MaximumLineLength: 8192,
179 LeveledLogger: supervisor.Logger(ctx),
180 }
Serge Bazanski5ad31442024-04-17 15:40:52 +0200181 pipe, err := converter.NamedPipeReader(s.config.Ephemeral.ServerLogsFIFO.FullPath())
Serge Bazanskic1cb37c2023-03-16 17:54:33 +0100182 if err != nil {
183 return fmt.Errorf("when creating pipe reader: %w", err)
184 }
185 if err := supervisor.Run(ctx, "piper", pipe); err != nil {
186 return fmt.Errorf("when starting log piper: %w", err)
187 }
188 supervisor.Signal(ctx, supervisor.SignalHealthy)
189 <-ctx.Done()
190 return ctx.Err()
191 })
Serge Bazanski50009e02021-07-07 14:35:27 +0200192 if err != nil {
Serge Bazanskic1cb37c2023-03-16 17:54:33 +0100193 return fmt.Errorf("when starting etcd logger: %w", err)
Serge Bazanski50009e02021-07-07 14:35:27 +0200194 }
195
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200196 // Create autopromoter, which will automatically promote all learners to full
197 // etcd members.
198 if err := supervisor.Run(ctx, "autopromoter", s.autopromoter); err != nil {
199 return fmt.Errorf("when starting autopromtoer: %w", err)
200 }
Hendrik Hofstadt0d7c91e2019-10-23 21:44:47 +0200201
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200202 // Create selfupdater, which will perform a one-shot update of this member's
203 // peer address in etcd.
Mateusz Zalega619029b2022-05-05 17:18:26 +0200204 if err := supervisor.Run(ctx, "selfupdater", s.selfupdater); err != nil {
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200205 return fmt.Errorf("when starting selfupdater: %w", err)
206 }
207
208 // Prepare cluster PKI credentials.
209 ppki := s.config.Data.PeerPKI
210 jc := s.config.JoinCluster
211 if jc != nil {
Serge Bazanski97d68082022-06-22 13:15:21 +0200212 supervisor.Logger(ctx).Info("JoinCluster set, writing PPKI data to disk...")
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200213 // For nodes that join an existing cluster, or re-join it, always write whatever
214 // we've been given on startup.
215 if err := ppki.WriteAll(jc.NodeCertificate.Raw, s.config.NodePrivateKey, jc.CACertificate.Raw); err != nil {
216 return fmt.Errorf("when writing credentials for join: %w", err)
217 }
218 if err := s.config.Data.PeerCRL.Write(jc.InitialCRL.Raw, 0400); err != nil {
219 return fmt.Errorf("when writing CRL for join: %w", err)
220 }
221 } else {
222 // For other nodes, we should already have credentials from a previous join, or
223 // a previous bootstrap. If none exist, assume we need to bootstrap these
224 // credentials.
225 //
226 // TODO(q3k): once we have node join (ie. node restart from disk) flow, add a
227 // special configuration marker to prevent spurious bootstraps.
228 absent, err := ppki.AllAbsent()
229 if err != nil {
230 return fmt.Errorf("when checking for PKI file absence: %w", err)
231 }
232 if absent {
Serge Bazanski97d68082022-06-22 13:15:21 +0200233 supervisor.Logger(ctx).Info("PKI data absent, bootstrapping.")
Serge Bazanski5ad31442024-04-17 15:40:52 +0200234 if err := s.bootstrap(ctx); err != nil {
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200235 return fmt.Errorf("bootstrap failed: %w", err)
236 }
237 } else {
238 supervisor.Logger(ctx).Info("PKI data present, not bootstrapping.")
239 }
240 }
241
Serge Bazanskic1cb37c2023-03-16 17:54:33 +0100242 // If we're joining a cluster, make sure that our peers are actually DNS
243 // resolvable. This prevents us from immediately failing due to transient DNS
244 // issues.
245 if jc := s.config.JoinCluster; jc != nil {
246 supervisor.Logger(ctx).Infof("Waiting for initial peers to be DNS resolvable...")
247 startLogging := time.Now().Add(5 * time.Second)
248 for {
249 allOkay := true
250 shouldLog := time.Now().After(startLogging)
251 for _, node := range jc.ExistingNodes {
Tim Windelschmidtd5cabde2024-04-19 02:56:46 +0200252 u, err := url.Parse(node.URL)
Serge Bazanskic1cb37c2023-03-16 17:54:33 +0100253 if err != nil {
254 // Just pretend this node is up. If the URL is really bad, etcd will complain
255 // more clearly than us. This shouldn't happen, anyway.
Tim Windelschmidtd5cabde2024-04-19 02:56:46 +0200256 continue
Serge Bazanskic1cb37c2023-03-16 17:54:33 +0100257 }
258 host := u.Hostname()
Tim Windelschmidtd5cabde2024-04-19 02:56:46 +0200259 if _, err := net.LookupIP(host); err == nil {
Serge Bazanskic1cb37c2023-03-16 17:54:33 +0100260 continue
261 }
262 if shouldLog {
263 supervisor.Logger(ctx).Errorf("Still can't resolve peer %s (%s): %v", node.Name, host, err)
264 }
265 allOkay = false
266 }
267 if allOkay {
268 supervisor.Logger(ctx).Infof("All peers resolvable, continuing startup.")
269 break
270 }
271
272 time.Sleep(100 * time.Millisecond)
273 if shouldLog {
274 startLogging = time.Now().Add(5 * time.Second)
275 }
276 }
277 }
278
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200279 // Start etcd ...
Serge Bazanskic1cb37c2023-03-16 17:54:33 +0100280 supervisor.Logger(ctx).Infof("Starting etcd...")
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200281 cfg := s.config.build(true)
282 server, err := embed.StartEtcd(cfg)
283 if err != nil {
284 return fmt.Errorf("when starting etcd: %w", err)
285 }
286
287 // ... wait for server to be ready...
288 select {
289 case <-ctx.Done():
290 return ctx.Err()
291 case <-server.Server.ReadyNotify():
292 }
293
294 // ... build a client to its' socket...
295 cl, err := s.config.localClient()
296 if err != nil {
297 return fmt.Errorf("getting local client failed: %w", err)
298 }
299
300 // ... and wait until we're not a learner anymore.
301 for {
302 members, err := cl.MemberList(ctx)
303 if err != nil {
304 supervisor.Logger(ctx).Warningf("MemberList failed: %v", err)
305 time.Sleep(time.Second)
306 continue
307 }
308
309 isMember := false
310 for _, member := range members.Members {
311 if member.ID != uint64(server.Server.ID()) {
312 continue
313 }
314 if !member.IsLearner {
315 isMember = true
316 break
317 }
318 }
319 if isMember {
320 break
321 }
322 supervisor.Logger(ctx).Warningf("Still a learner, waiting...")
323 time.Sleep(time.Second)
324 }
325
326 // All done! Report status.
327 supervisor.Logger(ctx).Infof("etcd server ready")
328
329 st := &Status{
Lorenz Brun6211e4d2023-11-14 19:09:40 +0100330 localPeerURL: cfg.AdvertisePeerUrls[0].String(),
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200331 localMemberID: uint64(server.Server.ID()),
332 cl: cl,
333 ca: s.ca,
334 }
Serge Bazanski98a6ccc2023-06-20 13:09:12 +0200335 st2 := *st
336 s.value.Set(&st2)
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200337
338 // Wait until server dies for whatever reason, update status when that
339 // happens.
340 supervisor.Signal(ctx, supervisor.SignalHealthy)
341 select {
342 case err = <-server.Err():
343 err = fmt.Errorf("server returned error: %w", err)
344 case <-ctx.Done():
345 server.Close()
346 err = ctx.Err()
347 }
Serge Bazanski98a6ccc2023-06-20 13:09:12 +0200348
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200349 st.stopped = true
Serge Bazanski98a6ccc2023-06-20 13:09:12 +0200350 st3 := *st
351 s.value.Set(&st3)
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200352 return err
Serge Bazanskicb883e22020-07-06 17:47:55 +0200353}
354
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200355func clientFor(kv *clientv3.Client, parts ...string) (client.Namespaced, error) {
356 var err error
357 namespaced := client.NewLocal(kv)
358 for _, el := range parts {
359 namespaced, err = namespaced.Sub(el)
Serge Bazanskicb883e22020-07-06 17:47:55 +0200360 if err != nil {
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200361 return nil, fmt.Errorf("when getting sub client: %w", err)
Serge Bazanskicb883e22020-07-06 17:47:55 +0200362 }
Serge Bazanskicb883e22020-07-06 17:47:55 +0200363
Serge Bazanskicb883e22020-07-06 17:47:55 +0200364 }
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200365 return namespaced, nil
366}
Serge Bazanskicb883e22020-07-06 17:47:55 +0200367
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200368// bootstrap performs a procedure to resolve the following bootstrap problems:
369// in order to start an etcd server for consensus, we need it to serve over TLS.
370// However, these TLS certificates also need to be stored in etcd so that
371// further certificates can be issued for new nodes.
372//
373// This was previously solved by a using a special PKI/TLS management system that
374// could first create certificates and keys in memory, then only commit them to
375// etcd. However, this ended up being somewhat brittle in the face of startup
376// sequencing issues, so we're now going with a different approach.
377//
378// This function starts an etcd instance first without any PKI/TLS support,
379// without listening on any external port for peer traffic. Once the instance is
380// running, it uses the standard metropolis pki library to create all required
381// data directly in the running etcd instance. It then writes all required
382// startup data (node private key, member certificate, CA certificate) to disk,
383// so that a 'full' etcd instance can be started.
Serge Bazanski5ad31442024-04-17 15:40:52 +0200384func (s *Service) bootstrap(ctx context.Context) error {
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200385 supervisor.Logger(ctx).Infof("Bootstrapping PKI: starting etcd...")
Serge Bazanskicb883e22020-07-06 17:47:55 +0200386
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200387 cfg := s.config.build(false)
388 // This will make etcd create data directories and create a fully new cluster if
389 // needed. If we're restarting due to an error, the old cluster data will still
390 // exist.
391 cfg.ClusterState = "new"
Serge Bazanskicb883e22020-07-06 17:47:55 +0200392
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200393 // Start the bootstrap etcd instance...
Hendrik Hofstadt0d7c91e2019-10-23 21:44:47 +0200394 server, err := embed.StartEtcd(cfg)
Lorenz Bruna4ea9d02019-10-31 11:40:30 +0100395 if err != nil {
Serge Bazanskib76b8d12023-03-16 00:46:56 +0100396 return fmt.Errorf("failed to start bootstrap etcd: %w", err)
Lorenz Bruna4ea9d02019-10-31 11:40:30 +0100397 }
Serge Bazanskib76b8d12023-03-16 00:46:56 +0100398 defer server.Close()
Lorenz Bruna4ea9d02019-10-31 11:40:30 +0100399
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200400 // ... wait for it to run ...
Serge Bazanskicb883e22020-07-06 17:47:55 +0200401 select {
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200402 case <-server.Server.ReadyNotify():
Serge Bazanskicb883e22020-07-06 17:47:55 +0200403 case <-ctx.Done():
Lorenz Bruna6223792023-07-31 17:13:11 +0200404 return errors.New("timed out waiting for etcd to become ready")
Lorenz Brun52f7f292020-06-24 16:42:02 +0200405 }
406
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200407 // ... create a client to it ...
408 cl, err := s.config.localClient()
Lorenz Brun52f7f292020-06-24 16:42:02 +0200409 if err != nil {
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200410 return fmt.Errorf("when getting bootstrap client: %w", err)
Serge Bazanskicb883e22020-07-06 17:47:55 +0200411 }
412
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200413 // ... and build PKI there. This is idempotent, so we will never override
414 // anything that's already in the cluster, instead just retrieve it.
415 supervisor.Logger(ctx).Infof("Bootstrapping PKI: etcd running, building PKI...")
416 clPKI, err := clientFor(cl, "namespaced", "etcd-pki")
417 if err != nil {
418 return fmt.Errorf("when getting pki client: %w", err)
Serge Bazanskicb883e22020-07-06 17:47:55 +0200419 }
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200420 defer clPKI.Close()
421 caCert, err := s.ca.Ensure(ctx, clPKI)
422 if err != nil {
423 return fmt.Errorf("failed to ensure CA certificate: %w", err)
Serge Bazanskicb883e22020-07-06 17:47:55 +0200424 }
425
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200426 // If we're running with a test overridden external address (eg. localhost), we
427 // need to also make that part of the member certificate.
428 var extraNames []string
429 if external := s.config.testOverrides.externalAddress; external != "" {
430 extraNames = []string{external}
431 }
432 memberTemplate := pki.Certificate{
433 Name: identity.NodeID(s.config.nodePublicKey()),
434 Namespace: &pkiNamespace,
435 Issuer: s.ca,
436 Template: pkiPeerCertificate(s.config.nodePublicKey(), extraNames),
437 Mode: pki.CertificateExternal,
438 PublicKey: s.config.nodePublicKey(),
439 }
440 memberCert, err := memberTemplate.Ensure(ctx, clPKI)
441 if err != nil {
442 return fmt.Errorf("failed to ensure member certificate: %w", err)
443 }
Serge Bazanskicb883e22020-07-06 17:47:55 +0200444
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200445 // Retrieve CRL.
446 crlW := s.ca.WatchCRL(clPKI)
447 crl, err := crlW.Get(ctx)
448 if err != nil {
449 return fmt.Errorf("failed to retrieve initial CRL: %w", err)
450 }
451
452 // We have everything we need. Write things to disk.
453 supervisor.Logger(ctx).Infof("Bootstrapping PKI: certificates issued, writing to disk...")
454
455 if err := s.config.Data.PeerPKI.WriteAll(memberCert, s.config.NodePrivateKey, caCert); err != nil {
456 return fmt.Errorf("failed to write bootstrapped certificates: %w", err)
457 }
458 if err := s.config.Data.PeerCRL.Write(crl.Raw, 0400); err != nil {
459 return fmt.Errorf("failed tow rite CRL: %w", err)
460 }
461
462 // Stop the server synchronously (blocking until it's fully shutdown), and
463 // return. The caller can now run the 'full' etcd instance with PKI.
464 supervisor.Logger(ctx).Infof("Bootstrapping PKI: done, stopping server...")
465 server.Close()
Serge Bazanskicb883e22020-07-06 17:47:55 +0200466 return ctx.Err()
Lorenz Bruna4ea9d02019-10-31 11:40:30 +0100467}
468
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200469// autopromoter is a runnable which repeatedly attempts to promote etcd learners
470// in the cluster to full followers. This is needed to bring any new cluster
471// members (which are always added as learners) to full membership and make them
472// part of the etcd quorum.
Serge Bazanskicb883e22020-07-06 17:47:55 +0200473func (s *Service) autopromoter(ctx context.Context) error {
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200474 autopromote := func(ctx context.Context, cl *clientv3.Client) {
475 // Only autopromote if our endpoint is a leader. This is a bargain bin version
476 // of leader election: it's simple and cheap, but not very reliable. The most
477 // obvious failure mode is that the instance we contacted isn't a leader by the
478 // time we promote a member, but that's fine - the promotion is idempotent. What
479 // we really use the 'leader election' here for isn't for consistency, but to
480 // prevent the cluster from being hammered by spurious leadership promotion
481 // requests from every etcd member.
482 status, err := cl.Status(ctx, cl.Endpoints()[0])
483 if err != nil {
484 supervisor.Logger(ctx).Warningf("Failed to get endpoint status: %v", err)
Jan Schärb9769672024-04-09 15:31:40 +0200485 return
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200486 }
487 if status.Leader != status.Header.MemberId {
Serge Bazanskicb883e22020-07-06 17:47:55 +0200488 return
489 }
490
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200491 members, err := cl.MemberList(ctx)
492 if err != nil {
493 supervisor.Logger(ctx).Warningf("Failed to list members: %v", err)
494 return
495 }
496 for _, member := range members.Members {
Serge Bazanskicb883e22020-07-06 17:47:55 +0200497 if !member.IsLearner {
Lorenz Bruna4ea9d02019-10-31 11:40:30 +0100498 continue
499 }
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200500 // Always call PromoteMember since the metadata necessary to decide if we should
501 // is private. Luckily etcd already does consistency checks internally and will
502 // refuse to promote nodes that aren't connected or are still behind on
503 // transactions.
504 if _, err := cl.MemberPromote(ctx, member.ID); err != nil {
Serge Bazanskic7359672020-10-30 16:38:57 +0100505 supervisor.Logger(ctx).Infof("Failed to promote consensus node %s: %v", member.Name, err)
Serge Bazanskicb883e22020-07-06 17:47:55 +0200506 } else {
Serge Bazanskic7359672020-10-30 16:38:57 +0100507 supervisor.Logger(ctx).Infof("Promoted new consensus node %s", member.Name)
Lorenz Bruna4ea9d02019-10-31 11:40:30 +0100508 }
509 }
510 }
Lorenz Bruna4ea9d02019-10-31 11:40:30 +0100511
Serge Bazanski37110c32023-03-01 13:57:27 +0000512 w := s.value.Watch()
Serge Bazanskicb883e22020-07-06 17:47:55 +0200513 for {
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200514 st, err := w.Get(ctx)
515 if err != nil {
516 return fmt.Errorf("status get failed: %w", err)
Lorenz Brun52f7f292020-06-24 16:42:02 +0200517 }
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200518 t := time.NewTicker(5 * time.Second)
519 for {
520 autopromote(ctx, st.cl)
521 select {
522 case <-ctx.Done():
523 t.Stop()
524 return ctx.Err()
525 case <-t.C:
Serge Bazanskicb883e22020-07-06 17:47:55 +0200526 }
527 }
528 }
Hendrik Hofstadt0d7c91e2019-10-23 21:44:47 +0200529}
530
Serge Bazanski37110c32023-03-01 13:57:27 +0000531func (s *Service) Watch() event.Watcher[*Status] {
532 return s.value.Watch()
533}
534
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200535// selfupdater is a runnable that performs a one-shot (once per Service Run,
536// thus once for each configuration) update of the node's Peer URL in etcd. This
537// is currently only really needed because the first node in the cluster
538// bootstraps itself without any peer URLs at first, and this allows it to then
539// add the peer URLs afterwards. Instead of a runnable, this might as well have
540// been part of the bootstarp logic, but making it a restartable runnable is
541// more robust.
542func (s *Service) selfupdater(ctx context.Context) error {
543 supervisor.Signal(ctx, supervisor.SignalHealthy)
Serge Bazanski37110c32023-03-01 13:57:27 +0000544 w := s.value.Watch()
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200545 for {
546 st, err := w.Get(ctx)
547 if err != nil {
548 return fmt.Errorf("failed to get status: %w", err)
549 }
550
Serge Bazanski5839e972021-11-16 15:46:19 +0100551 if st.localPeerURL != "" {
552 supervisor.Logger(ctx).Infof("Updating local peer URL...")
553 peerURL := st.localPeerURL
554 if _, err := st.cl.MemberUpdate(ctx, st.localMemberID, []string{peerURL}); err != nil {
555 supervisor.Logger(ctx).Warningf("failed to update member: %v", err)
556 time.Sleep(1 * time.Second)
557 continue
558 }
559 } else {
560 supervisor.Logger(ctx).Infof("No local peer URL, not updating.")
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200561 }
562
563 supervisor.Signal(ctx, supervisor.SignalDone)
564 return nil
Serge Bazanskia105db52021-04-12 19:57:46 +0200565 }
Hendrik Hofstadt0d7c91e2019-10-23 21:44:47 +0200566}