Hendrik Hofstadt | 0d7c91e | 2019-10-23 21:44:47 +0200 | [diff] [blame] | 1 | // Copyright 2020 The Monogon Project Authors. |
| 2 | // |
| 3 | // SPDX-License-Identifier: Apache-2.0 |
| 4 | // |
| 5 | // Licensed under the Apache License, Version 2.0 (the "License"); |
| 6 | // you may not use this file except in compliance with the License. |
| 7 | // You may obtain a copy of the License at |
| 8 | // |
| 9 | // http://www.apache.org/licenses/LICENSE-2.0 |
| 10 | // |
| 11 | // Unless required by applicable law or agreed to in writing, software |
| 12 | // distributed under the License is distributed on an "AS IS" BASIS, |
| 13 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | // See the License for the specific language governing permissions and |
| 15 | // limitations under the License. |
| 16 | |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 17 | // Package consensus implements a runnable that manages an etcd instance which |
| 18 | // forms part of a Metropolis etcd cluster. This cluster is a foundational |
| 19 | // building block of Metropolis and its startup/management sequencing needs to |
| 20 | // be as robust as possible. |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 21 | // |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 22 | // Cluster Structure |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 23 | // |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 24 | // Each etcd instance listens for two kinds of traffic: |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 25 | // |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 26 | // 1. Peer traffic over TLS on a TCP port of the node's main interface. This is |
| 27 | // where other etcd instances connect to to exchange peer traffic, perform |
| 28 | // transactions and build quorum. The TLS credentials are stored in a PKI that |
| 29 | // is managed internally by the consensus runnable, with its state stored in |
| 30 | // etcd itself. |
| 31 | // |
| 32 | // 2. Client traffic over a local domain socket, with access control based on |
| 33 | // standard Linux user/group permissions. Currently this allows any code running |
| 34 | // as root on the host namespace full access to the etcd cluster. |
| 35 | // |
| 36 | // This means that if code running on a node wishes to perform etcd |
| 37 | // transactions, it must also run an etcd instance. This colocation of all |
| 38 | // direct etcd access and the etcd intances themselves effectively delegate all |
| 39 | // Metropolis control plane functionality to whatever subset of nodes is running |
| 40 | // consensus and all codes that connects to etcd directly (the Curator). |
| 41 | // |
| 42 | // For example, if nodes foo and bar are parts of the control plane, but node |
| 43 | // worker is not: |
| 44 | // |
| 45 | // .---------------------. |
| 46 | // | node-foo | |
| 47 | // |---------------------| |
| 48 | // | .--------------------. |
| 49 | // | | etcd |<---etcd/TLS--. (node.ConsensusPort) |
| 50 | // | '--------------------' | |
| 51 | // | ^ Domain Socket | | |
| 52 | // | | etcd/plain | | |
| 53 | // | .--------------------. | |
| 54 | // | | curator |<---gRPC/TLS----. (node.CuratorServicePort) |
| 55 | // | '--------------------' | | |
| 56 | // | ^ Domain Socket | | | |
| 57 | // | | gRPC/plain | | | |
| 58 | // | .-----------------. | | | |
| 59 | // | | node logic | | | | |
| 60 | // | '-----------------' | | | |
| 61 | // '---------------------' | | |
| 62 | // | | |
| 63 | // .---------------------. | | |
| 64 | // | node-baz | | | |
| 65 | // |---------------------| | | |
| 66 | // | .--------------------. | | |
| 67 | // | | etcd |<-------------' | |
| 68 | // | '--------------------' | |
| 69 | // | ^ Domain Socket | | |
| 70 | // | | gRPC/plain | | |
| 71 | // | .--------------------. | |
| 72 | // | | curator |<---gRPC/TLS----: |
| 73 | // | '--------------------' | |
| 74 | // | ... | | |
| 75 | // '---------------------' | |
| 76 | // | |
| 77 | // .---------------------. | |
| 78 | // | node-worker | | |
| 79 | // |---------------------| | |
| 80 | // | .-----------------. | | |
| 81 | // | | node logic |-------------------' |
| 82 | // | '-----------------' | |
| 83 | // '---------------------' |
| 84 | // |
| 85 | |
Hendrik Hofstadt | 0d7c91e | 2019-10-23 21:44:47 +0200 | [diff] [blame] | 86 | package consensus |
| 87 | |
| 88 | import ( |
| 89 | "context" |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 90 | "crypto/ed25519" |
| 91 | "crypto/x509" |
| 92 | "crypto/x509/pkix" |
Hendrik Hofstadt | 0d7c91e | 2019-10-23 21:44:47 +0200 | [diff] [blame] | 93 | "fmt" |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 94 | "math/big" |
Serge Bazanski | c1cb37c | 2023-03-16 17:54:33 +0100 | [diff] [blame] | 95 | "net" |
| 96 | "net/url" |
Lorenz Brun | a4ea9d0 | 2019-10-31 11:40:30 +0100 | [diff] [blame] | 97 | "time" |
| 98 | |
Lorenz Brun | d13c1c6 | 2022-03-30 19:58:58 +0200 | [diff] [blame] | 99 | clientv3 "go.etcd.io/etcd/client/v3" |
| 100 | "go.etcd.io/etcd/server/v3/embed" |
Hendrik Hofstadt | 8efe51e | 2020-02-28 12:53:41 +0100 | [diff] [blame] | 101 | |
Serge Bazanski | a105db5 | 2021-04-12 19:57:46 +0200 | [diff] [blame] | 102 | "source.monogon.dev/metropolis/node/core/consensus/client" |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 103 | "source.monogon.dev/metropolis/node/core/identity" |
Serge Bazanski | 37110c3 | 2023-03-01 13:57:27 +0000 | [diff] [blame] | 104 | "source.monogon.dev/metropolis/pkg/event" |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 105 | "source.monogon.dev/metropolis/pkg/event/memory" |
Serge Bazanski | 50009e0 | 2021-07-07 14:35:27 +0200 | [diff] [blame] | 106 | "source.monogon.dev/metropolis/pkg/logtree/unraw" |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 107 | "source.monogon.dev/metropolis/pkg/pki" |
Serge Bazanski | 31370b0 | 2021-01-07 16:31:14 +0100 | [diff] [blame] | 108 | "source.monogon.dev/metropolis/pkg/supervisor" |
Hendrik Hofstadt | 0d7c91e | 2019-10-23 21:44:47 +0200 | [diff] [blame] | 109 | ) |
| 110 | |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 111 | var ( |
| 112 | pkiNamespace = pki.Namespaced("/pki/") |
Hendrik Hofstadt | 0d7c91e | 2019-10-23 21:44:47 +0200 | [diff] [blame] | 113 | ) |
| 114 | |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 115 | func pkiCA() *pki.Certificate { |
| 116 | return &pki.Certificate{ |
| 117 | Name: "CA", |
| 118 | Namespace: &pkiNamespace, |
| 119 | Issuer: pki.SelfSigned, |
| 120 | Template: x509.Certificate{ |
| 121 | SerialNumber: big.NewInt(1), |
| 122 | Subject: pkix.Name{ |
| 123 | CommonName: "Metropolis etcd CA Certificate", |
| 124 | }, |
| 125 | IsCA: true, |
| 126 | KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign | x509.KeyUsageDigitalSignature, |
| 127 | ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageOCSPSigning}, |
| 128 | }, |
| 129 | } |
| 130 | } |
| 131 | |
| 132 | func pkiPeerCertificate(pubkey ed25519.PublicKey, extraNames []string) x509.Certificate { |
| 133 | return x509.Certificate{ |
| 134 | Subject: pkix.Name{ |
| 135 | CommonName: identity.NodeID(pubkey), |
| 136 | }, |
| 137 | KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment, |
| 138 | ExtKeyUsage: []x509.ExtKeyUsage{ |
| 139 | x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth, |
| 140 | }, |
| 141 | DNSNames: append(extraNames, identity.NodeID(pubkey)), |
| 142 | } |
| 143 | } |
| 144 | |
| 145 | // Service is the etcd cluster member service. See package-level documentation |
| 146 | // for more information. |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 147 | type Service struct { |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 148 | config *Config |
Lorenz Brun | a4ea9d0 | 2019-10-31 11:40:30 +0100 | [diff] [blame] | 149 | |
Serge Bazanski | 37110c3 | 2023-03-01 13:57:27 +0000 | [diff] [blame] | 150 | value memory.Value[*Status] |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 151 | ca *pki.Certificate |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 152 | } |
Hendrik Hofstadt | 0d7c91e | 2019-10-23 21:44:47 +0200 | [diff] [blame] | 153 | |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 154 | func New(config Config) *Service { |
| 155 | return &Service{ |
Hendrik Hofstadt | 0d7c91e | 2019-10-23 21:44:47 +0200 | [diff] [blame] | 156 | config: &config, |
| 157 | } |
Hendrik Hofstadt | 0d7c91e | 2019-10-23 21:44:47 +0200 | [diff] [blame] | 158 | } |
| 159 | |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 160 | // Run is a Supervisor runnable that starts the etcd member service. It will |
| 161 | // become healthy once the member joins the cluster successfully. |
| 162 | func (s *Service) Run(ctx context.Context) error { |
| 163 | // Always re-create CA to make sure we don't have PKI state from previous runs. |
| 164 | // |
| 165 | // TODO(q3k): make the PKI library immune to this misuse. |
| 166 | s.ca = pkiCA() |
Lorenz Brun | 52f7f29 | 2020-06-24 16:42:02 +0200 | [diff] [blame] | 167 | |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 168 | // Create log converter. This will ingest etcd logs and pipe them out to this |
| 169 | // runnable's leveled logging facilities. |
| 170 | // |
| 171 | // TODO(q3k): add support for streaming to a sub-logger in the tree to get |
| 172 | // cleaner logs. |
Serge Bazanski | c1cb37c | 2023-03-16 17:54:33 +0100 | [diff] [blame] | 173 | |
Serge Bazanski | 50009e0 | 2021-07-07 14:35:27 +0200 | [diff] [blame] | 174 | fifoPath := s.config.Ephemeral.ServerLogsFIFO.FullPath() |
Serge Bazanski | c1cb37c | 2023-03-16 17:54:33 +0100 | [diff] [blame] | 175 | |
| 176 | // This is not where etcd will run, but where its log ingestion machinery lives. |
| 177 | // This ensures that the (annoying verbose) etcd logs are contained into just |
| 178 | // .etcd. |
| 179 | err := supervisor.Run(ctx, "etcd", func(ctx context.Context) error { |
| 180 | converter := unraw.Converter{ |
| 181 | Parser: parseEtcdLogEntry, |
| 182 | MaximumLineLength: 8192, |
| 183 | LeveledLogger: supervisor.Logger(ctx), |
| 184 | } |
| 185 | pipe, err := converter.NamedPipeReader(fifoPath) |
| 186 | if err != nil { |
| 187 | return fmt.Errorf("when creating pipe reader: %w", err) |
| 188 | } |
| 189 | if err := supervisor.Run(ctx, "piper", pipe); err != nil { |
| 190 | return fmt.Errorf("when starting log piper: %w", err) |
| 191 | } |
| 192 | supervisor.Signal(ctx, supervisor.SignalHealthy) |
| 193 | <-ctx.Done() |
| 194 | return ctx.Err() |
| 195 | }) |
Serge Bazanski | 50009e0 | 2021-07-07 14:35:27 +0200 | [diff] [blame] | 196 | if err != nil { |
Serge Bazanski | c1cb37c | 2023-03-16 17:54:33 +0100 | [diff] [blame] | 197 | return fmt.Errorf("when starting etcd logger: %w", err) |
Serge Bazanski | 50009e0 | 2021-07-07 14:35:27 +0200 | [diff] [blame] | 198 | } |
| 199 | |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 200 | // Create autopromoter, which will automatically promote all learners to full |
| 201 | // etcd members. |
| 202 | if err := supervisor.Run(ctx, "autopromoter", s.autopromoter); err != nil { |
| 203 | return fmt.Errorf("when starting autopromtoer: %w", err) |
| 204 | } |
Hendrik Hofstadt | 0d7c91e | 2019-10-23 21:44:47 +0200 | [diff] [blame] | 205 | |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 206 | // Create selfupdater, which will perform a one-shot update of this member's |
| 207 | // peer address in etcd. |
Mateusz Zalega | 619029b | 2022-05-05 17:18:26 +0200 | [diff] [blame] | 208 | if err := supervisor.Run(ctx, "selfupdater", s.selfupdater); err != nil { |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 209 | return fmt.Errorf("when starting selfupdater: %w", err) |
| 210 | } |
| 211 | |
| 212 | // Prepare cluster PKI credentials. |
| 213 | ppki := s.config.Data.PeerPKI |
| 214 | jc := s.config.JoinCluster |
| 215 | if jc != nil { |
Serge Bazanski | 97d6808 | 2022-06-22 13:15:21 +0200 | [diff] [blame] | 216 | supervisor.Logger(ctx).Info("JoinCluster set, writing PPKI data to disk...") |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 217 | // For nodes that join an existing cluster, or re-join it, always write whatever |
| 218 | // we've been given on startup. |
| 219 | if err := ppki.WriteAll(jc.NodeCertificate.Raw, s.config.NodePrivateKey, jc.CACertificate.Raw); err != nil { |
| 220 | return fmt.Errorf("when writing credentials for join: %w", err) |
| 221 | } |
| 222 | if err := s.config.Data.PeerCRL.Write(jc.InitialCRL.Raw, 0400); err != nil { |
| 223 | return fmt.Errorf("when writing CRL for join: %w", err) |
| 224 | } |
| 225 | } else { |
| 226 | // For other nodes, we should already have credentials from a previous join, or |
| 227 | // a previous bootstrap. If none exist, assume we need to bootstrap these |
| 228 | // credentials. |
| 229 | // |
| 230 | // TODO(q3k): once we have node join (ie. node restart from disk) flow, add a |
| 231 | // special configuration marker to prevent spurious bootstraps. |
| 232 | absent, err := ppki.AllAbsent() |
| 233 | if err != nil { |
| 234 | return fmt.Errorf("when checking for PKI file absence: %w", err) |
| 235 | } |
| 236 | if absent { |
Serge Bazanski | 97d6808 | 2022-06-22 13:15:21 +0200 | [diff] [blame] | 237 | supervisor.Logger(ctx).Info("PKI data absent, bootstrapping.") |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 238 | if err := s.bootstrap(ctx, fifoPath); err != nil { |
| 239 | return fmt.Errorf("bootstrap failed: %w", err) |
| 240 | } |
| 241 | } else { |
| 242 | supervisor.Logger(ctx).Info("PKI data present, not bootstrapping.") |
| 243 | } |
| 244 | } |
| 245 | |
Serge Bazanski | c1cb37c | 2023-03-16 17:54:33 +0100 | [diff] [blame] | 246 | // If we're joining a cluster, make sure that our peers are actually DNS |
| 247 | // resolvable. This prevents us from immediately failing due to transient DNS |
| 248 | // issues. |
| 249 | if jc := s.config.JoinCluster; jc != nil { |
| 250 | supervisor.Logger(ctx).Infof("Waiting for initial peers to be DNS resolvable...") |
| 251 | startLogging := time.Now().Add(5 * time.Second) |
| 252 | for { |
| 253 | allOkay := true |
| 254 | shouldLog := time.Now().After(startLogging) |
| 255 | for _, node := range jc.ExistingNodes { |
| 256 | u, _ := url.Parse(node.URL) |
| 257 | if err != nil { |
| 258 | // Just pretend this node is up. If the URL is really bad, etcd will complain |
| 259 | // more clearly than us. This shouldn't happen, anyway. |
| 260 | } |
| 261 | host := u.Hostname() |
| 262 | _, err := net.LookupIP(host) |
| 263 | if err == nil { |
| 264 | continue |
| 265 | } |
| 266 | if shouldLog { |
| 267 | supervisor.Logger(ctx).Errorf("Still can't resolve peer %s (%s): %v", node.Name, host, err) |
| 268 | } |
| 269 | allOkay = false |
| 270 | } |
| 271 | if allOkay { |
| 272 | supervisor.Logger(ctx).Infof("All peers resolvable, continuing startup.") |
| 273 | break |
| 274 | } |
| 275 | |
| 276 | time.Sleep(100 * time.Millisecond) |
| 277 | if shouldLog { |
| 278 | startLogging = time.Now().Add(5 * time.Second) |
| 279 | } |
| 280 | } |
| 281 | } |
| 282 | |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 283 | // Start etcd ... |
Serge Bazanski | c1cb37c | 2023-03-16 17:54:33 +0100 | [diff] [blame] | 284 | supervisor.Logger(ctx).Infof("Starting etcd...") |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 285 | cfg := s.config.build(true) |
| 286 | server, err := embed.StartEtcd(cfg) |
| 287 | if err != nil { |
| 288 | return fmt.Errorf("when starting etcd: %w", err) |
| 289 | } |
| 290 | |
| 291 | // ... wait for server to be ready... |
| 292 | select { |
| 293 | case <-ctx.Done(): |
| 294 | return ctx.Err() |
| 295 | case <-server.Server.ReadyNotify(): |
| 296 | } |
| 297 | |
| 298 | // ... build a client to its' socket... |
| 299 | cl, err := s.config.localClient() |
| 300 | if err != nil { |
| 301 | return fmt.Errorf("getting local client failed: %w", err) |
| 302 | } |
| 303 | |
| 304 | // ... and wait until we're not a learner anymore. |
| 305 | for { |
| 306 | members, err := cl.MemberList(ctx) |
| 307 | if err != nil { |
| 308 | supervisor.Logger(ctx).Warningf("MemberList failed: %v", err) |
| 309 | time.Sleep(time.Second) |
| 310 | continue |
| 311 | } |
| 312 | |
| 313 | isMember := false |
| 314 | for _, member := range members.Members { |
| 315 | if member.ID != uint64(server.Server.ID()) { |
| 316 | continue |
| 317 | } |
| 318 | if !member.IsLearner { |
| 319 | isMember = true |
| 320 | break |
| 321 | } |
| 322 | } |
| 323 | if isMember { |
| 324 | break |
| 325 | } |
| 326 | supervisor.Logger(ctx).Warningf("Still a learner, waiting...") |
| 327 | time.Sleep(time.Second) |
| 328 | } |
| 329 | |
| 330 | // All done! Report status. |
| 331 | supervisor.Logger(ctx).Infof("etcd server ready") |
| 332 | |
| 333 | st := &Status{ |
| 334 | localPeerURL: cfg.APUrls[0].String(), |
| 335 | localMemberID: uint64(server.Server.ID()), |
| 336 | cl: cl, |
| 337 | ca: s.ca, |
| 338 | } |
| 339 | s.value.Set(st) |
| 340 | |
| 341 | // Wait until server dies for whatever reason, update status when that |
| 342 | // happens. |
| 343 | supervisor.Signal(ctx, supervisor.SignalHealthy) |
| 344 | select { |
| 345 | case err = <-server.Err(): |
| 346 | err = fmt.Errorf("server returned error: %w", err) |
| 347 | case <-ctx.Done(): |
| 348 | server.Close() |
| 349 | err = ctx.Err() |
| 350 | } |
| 351 | st.stopped = true |
| 352 | s.value.Set(st) |
| 353 | return err |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 354 | } |
| 355 | |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 356 | func clientFor(kv *clientv3.Client, parts ...string) (client.Namespaced, error) { |
| 357 | var err error |
| 358 | namespaced := client.NewLocal(kv) |
| 359 | for _, el := range parts { |
| 360 | namespaced, err = namespaced.Sub(el) |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 361 | if err != nil { |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 362 | return nil, fmt.Errorf("when getting sub client: %w", err) |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 363 | } |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 364 | |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 365 | } |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 366 | return namespaced, nil |
| 367 | } |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 368 | |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 369 | // bootstrap performs a procedure to resolve the following bootstrap problems: |
| 370 | // in order to start an etcd server for consensus, we need it to serve over TLS. |
| 371 | // However, these TLS certificates also need to be stored in etcd so that |
| 372 | // further certificates can be issued for new nodes. |
| 373 | // |
| 374 | // This was previously solved by a using a special PKI/TLS management system that |
| 375 | // could first create certificates and keys in memory, then only commit them to |
| 376 | // etcd. However, this ended up being somewhat brittle in the face of startup |
| 377 | // sequencing issues, so we're now going with a different approach. |
| 378 | // |
| 379 | // This function starts an etcd instance first without any PKI/TLS support, |
| 380 | // without listening on any external port for peer traffic. Once the instance is |
| 381 | // running, it uses the standard metropolis pki library to create all required |
| 382 | // data directly in the running etcd instance. It then writes all required |
| 383 | // startup data (node private key, member certificate, CA certificate) to disk, |
| 384 | // so that a 'full' etcd instance can be started. |
| 385 | func (s *Service) bootstrap(ctx context.Context, fifoPath string) error { |
| 386 | supervisor.Logger(ctx).Infof("Bootstrapping PKI: starting etcd...") |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 387 | |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 388 | cfg := s.config.build(false) |
| 389 | // This will make etcd create data directories and create a fully new cluster if |
| 390 | // needed. If we're restarting due to an error, the old cluster data will still |
| 391 | // exist. |
| 392 | cfg.ClusterState = "new" |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 393 | |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 394 | // Start the bootstrap etcd instance... |
Hendrik Hofstadt | 0d7c91e | 2019-10-23 21:44:47 +0200 | [diff] [blame] | 395 | server, err := embed.StartEtcd(cfg) |
Lorenz Brun | a4ea9d0 | 2019-10-31 11:40:30 +0100 | [diff] [blame] | 396 | if err != nil { |
Serge Bazanski | b76b8d1 | 2023-03-16 00:46:56 +0100 | [diff] [blame] | 397 | return fmt.Errorf("failed to start bootstrap etcd: %w", err) |
Lorenz Brun | a4ea9d0 | 2019-10-31 11:40:30 +0100 | [diff] [blame] | 398 | } |
Serge Bazanski | b76b8d1 | 2023-03-16 00:46:56 +0100 | [diff] [blame] | 399 | defer server.Close() |
Lorenz Brun | a4ea9d0 | 2019-10-31 11:40:30 +0100 | [diff] [blame] | 400 | |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 401 | // ... wait for it to run ... |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 402 | select { |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 403 | case <-server.Server.ReadyNotify(): |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 404 | case <-ctx.Done(): |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 405 | return fmt.Errorf("when waiting for bootstrap etcd: %w", err) |
Lorenz Brun | 52f7f29 | 2020-06-24 16:42:02 +0200 | [diff] [blame] | 406 | } |
| 407 | |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 408 | // ... create a client to it ... |
| 409 | cl, err := s.config.localClient() |
Lorenz Brun | 52f7f29 | 2020-06-24 16:42:02 +0200 | [diff] [blame] | 410 | if err != nil { |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 411 | return fmt.Errorf("when getting bootstrap client: %w", err) |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 412 | } |
| 413 | |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 414 | // ... and build PKI there. This is idempotent, so we will never override |
| 415 | // anything that's already in the cluster, instead just retrieve it. |
| 416 | supervisor.Logger(ctx).Infof("Bootstrapping PKI: etcd running, building PKI...") |
| 417 | clPKI, err := clientFor(cl, "namespaced", "etcd-pki") |
| 418 | if err != nil { |
| 419 | return fmt.Errorf("when getting pki client: %w", err) |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 420 | } |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 421 | defer clPKI.Close() |
| 422 | caCert, err := s.ca.Ensure(ctx, clPKI) |
| 423 | if err != nil { |
| 424 | return fmt.Errorf("failed to ensure CA certificate: %w", err) |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 425 | } |
| 426 | |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 427 | // If we're running with a test overridden external address (eg. localhost), we |
| 428 | // need to also make that part of the member certificate. |
| 429 | var extraNames []string |
| 430 | if external := s.config.testOverrides.externalAddress; external != "" { |
| 431 | extraNames = []string{external} |
| 432 | } |
| 433 | memberTemplate := pki.Certificate{ |
| 434 | Name: identity.NodeID(s.config.nodePublicKey()), |
| 435 | Namespace: &pkiNamespace, |
| 436 | Issuer: s.ca, |
| 437 | Template: pkiPeerCertificate(s.config.nodePublicKey(), extraNames), |
| 438 | Mode: pki.CertificateExternal, |
| 439 | PublicKey: s.config.nodePublicKey(), |
| 440 | } |
| 441 | memberCert, err := memberTemplate.Ensure(ctx, clPKI) |
| 442 | if err != nil { |
| 443 | return fmt.Errorf("failed to ensure member certificate: %w", err) |
| 444 | } |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 445 | |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 446 | // Retrieve CRL. |
| 447 | crlW := s.ca.WatchCRL(clPKI) |
| 448 | crl, err := crlW.Get(ctx) |
| 449 | if err != nil { |
| 450 | return fmt.Errorf("failed to retrieve initial CRL: %w", err) |
| 451 | } |
| 452 | |
| 453 | // We have everything we need. Write things to disk. |
| 454 | supervisor.Logger(ctx).Infof("Bootstrapping PKI: certificates issued, writing to disk...") |
| 455 | |
| 456 | if err := s.config.Data.PeerPKI.WriteAll(memberCert, s.config.NodePrivateKey, caCert); err != nil { |
| 457 | return fmt.Errorf("failed to write bootstrapped certificates: %w", err) |
| 458 | } |
| 459 | if err := s.config.Data.PeerCRL.Write(crl.Raw, 0400); err != nil { |
| 460 | return fmt.Errorf("failed tow rite CRL: %w", err) |
| 461 | } |
| 462 | |
| 463 | // Stop the server synchronously (blocking until it's fully shutdown), and |
| 464 | // return. The caller can now run the 'full' etcd instance with PKI. |
| 465 | supervisor.Logger(ctx).Infof("Bootstrapping PKI: done, stopping server...") |
| 466 | server.Close() |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 467 | return ctx.Err() |
Lorenz Brun | a4ea9d0 | 2019-10-31 11:40:30 +0100 | [diff] [blame] | 468 | } |
| 469 | |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 470 | // autopromoter is a runnable which repeatedly attempts to promote etcd learners |
| 471 | // in the cluster to full followers. This is needed to bring any new cluster |
| 472 | // members (which are always added as learners) to full membership and make them |
| 473 | // part of the etcd quorum. |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 474 | func (s *Service) autopromoter(ctx context.Context) error { |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 475 | autopromote := func(ctx context.Context, cl *clientv3.Client) { |
| 476 | // Only autopromote if our endpoint is a leader. This is a bargain bin version |
| 477 | // of leader election: it's simple and cheap, but not very reliable. The most |
| 478 | // obvious failure mode is that the instance we contacted isn't a leader by the |
| 479 | // time we promote a member, but that's fine - the promotion is idempotent. What |
| 480 | // we really use the 'leader election' here for isn't for consistency, but to |
| 481 | // prevent the cluster from being hammered by spurious leadership promotion |
| 482 | // requests from every etcd member. |
| 483 | status, err := cl.Status(ctx, cl.Endpoints()[0]) |
| 484 | if err != nil { |
| 485 | supervisor.Logger(ctx).Warningf("Failed to get endpoint status: %v", err) |
| 486 | } |
| 487 | if status.Leader != status.Header.MemberId { |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 488 | return |
| 489 | } |
| 490 | |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 491 | members, err := cl.MemberList(ctx) |
| 492 | if err != nil { |
| 493 | supervisor.Logger(ctx).Warningf("Failed to list members: %v", err) |
| 494 | return |
| 495 | } |
| 496 | for _, member := range members.Members { |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 497 | if !member.IsLearner { |
Lorenz Brun | a4ea9d0 | 2019-10-31 11:40:30 +0100 | [diff] [blame] | 498 | continue |
| 499 | } |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 500 | // Always call PromoteMember since the metadata necessary to decide if we should |
| 501 | // is private. Luckily etcd already does consistency checks internally and will |
| 502 | // refuse to promote nodes that aren't connected or are still behind on |
| 503 | // transactions. |
| 504 | if _, err := cl.MemberPromote(ctx, member.ID); err != nil { |
Serge Bazanski | c735967 | 2020-10-30 16:38:57 +0100 | [diff] [blame] | 505 | supervisor.Logger(ctx).Infof("Failed to promote consensus node %s: %v", member.Name, err) |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 506 | } else { |
Serge Bazanski | c735967 | 2020-10-30 16:38:57 +0100 | [diff] [blame] | 507 | supervisor.Logger(ctx).Infof("Promoted new consensus node %s", member.Name) |
Lorenz Brun | a4ea9d0 | 2019-10-31 11:40:30 +0100 | [diff] [blame] | 508 | } |
| 509 | } |
| 510 | } |
Lorenz Brun | a4ea9d0 | 2019-10-31 11:40:30 +0100 | [diff] [blame] | 511 | |
Serge Bazanski | 37110c3 | 2023-03-01 13:57:27 +0000 | [diff] [blame] | 512 | w := s.value.Watch() |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 513 | for { |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 514 | st, err := w.Get(ctx) |
| 515 | if err != nil { |
| 516 | return fmt.Errorf("status get failed: %w", err) |
Lorenz Brun | 52f7f29 | 2020-06-24 16:42:02 +0200 | [diff] [blame] | 517 | } |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 518 | t := time.NewTicker(5 * time.Second) |
| 519 | for { |
| 520 | autopromote(ctx, st.cl) |
| 521 | select { |
| 522 | case <-ctx.Done(): |
| 523 | t.Stop() |
| 524 | return ctx.Err() |
| 525 | case <-t.C: |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 526 | } |
| 527 | } |
| 528 | } |
Hendrik Hofstadt | 0d7c91e | 2019-10-23 21:44:47 +0200 | [diff] [blame] | 529 | } |
| 530 | |
Serge Bazanski | 37110c3 | 2023-03-01 13:57:27 +0000 | [diff] [blame] | 531 | func (s *Service) Watch() event.Watcher[*Status] { |
| 532 | return s.value.Watch() |
| 533 | } |
| 534 | |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 535 | // selfupdater is a runnable that performs a one-shot (once per Service Run, |
| 536 | // thus once for each configuration) update of the node's Peer URL in etcd. This |
| 537 | // is currently only really needed because the first node in the cluster |
| 538 | // bootstraps itself without any peer URLs at first, and this allows it to then |
| 539 | // add the peer URLs afterwards. Instead of a runnable, this might as well have |
| 540 | // been part of the bootstarp logic, but making it a restartable runnable is |
| 541 | // more robust. |
| 542 | func (s *Service) selfupdater(ctx context.Context) error { |
| 543 | supervisor.Signal(ctx, supervisor.SignalHealthy) |
Serge Bazanski | 37110c3 | 2023-03-01 13:57:27 +0000 | [diff] [blame] | 544 | w := s.value.Watch() |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 545 | for { |
| 546 | st, err := w.Get(ctx) |
| 547 | if err != nil { |
| 548 | return fmt.Errorf("failed to get status: %w", err) |
| 549 | } |
| 550 | |
Serge Bazanski | 5839e97 | 2021-11-16 15:46:19 +0100 | [diff] [blame] | 551 | if st.localPeerURL != "" { |
| 552 | supervisor.Logger(ctx).Infof("Updating local peer URL...") |
| 553 | peerURL := st.localPeerURL |
| 554 | if _, err := st.cl.MemberUpdate(ctx, st.localMemberID, []string{peerURL}); err != nil { |
| 555 | supervisor.Logger(ctx).Warningf("failed to update member: %v", err) |
| 556 | time.Sleep(1 * time.Second) |
| 557 | continue |
| 558 | } |
| 559 | } else { |
| 560 | supervisor.Logger(ctx).Infof("No local peer URL, not updating.") |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 561 | } |
| 562 | |
| 563 | supervisor.Signal(ctx, supervisor.SignalDone) |
| 564 | return nil |
Serge Bazanski | a105db5 | 2021-04-12 19:57:46 +0200 | [diff] [blame] | 565 | } |
Hendrik Hofstadt | 0d7c91e | 2019-10-23 21:44:47 +0200 | [diff] [blame] | 566 | } |