| Hendrik Hofstadt | 0d7c91e | 2019-10-23 21:44:47 +0200 | [diff] [blame] | 1 | // Copyright 2020 The Monogon Project Authors. |
| 2 | // |
| 3 | // SPDX-License-Identifier: Apache-2.0 |
| 4 | // |
| 5 | // Licensed under the Apache License, Version 2.0 (the "License"); |
| 6 | // you may not use this file except in compliance with the License. |
| 7 | // You may obtain a copy of the License at |
| 8 | // |
| 9 | // http://www.apache.org/licenses/LICENSE-2.0 |
| 10 | // |
| 11 | // Unless required by applicable law or agreed to in writing, software |
| 12 | // distributed under the License is distributed on an "AS IS" BASIS, |
| 13 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | // See the License for the specific language governing permissions and |
| 15 | // limitations under the License. |
| 16 | |
| Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 17 | // Package consensus implements a runnable that manages an etcd instance which |
| 18 | // forms part of a Metropolis etcd cluster. This cluster is a foundational |
| 19 | // building block of Metropolis and its startup/management sequencing needs to |
| 20 | // be as robust as possible. |
| Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 21 | // |
| Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 22 | // Cluster Structure |
| Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 23 | // |
| Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 24 | // Each etcd instance listens for two kinds of traffic: |
| Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 25 | // |
| Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 26 | // 1. Peer traffic over TLS on a TCP port of the node's main interface. This is |
| 27 | // where other etcd instances connect to to exchange peer traffic, perform |
| 28 | // transactions and build quorum. The TLS credentials are stored in a PKI that |
| 29 | // is managed internally by the consensus runnable, with its state stored in |
| 30 | // etcd itself. |
| 31 | // |
| 32 | // 2. Client traffic over a local domain socket, with access control based on |
| 33 | // standard Linux user/group permissions. Currently this allows any code running |
| 34 | // as root on the host namespace full access to the etcd cluster. |
| 35 | // |
| 36 | // This means that if code running on a node wishes to perform etcd |
| 37 | // transactions, it must also run an etcd instance. This colocation of all |
| 38 | // direct etcd access and the etcd intances themselves effectively delegate all |
| 39 | // Metropolis control plane functionality to whatever subset of nodes is running |
| 40 | // consensus and all codes that connects to etcd directly (the Curator). |
| 41 | // |
| 42 | // For example, if nodes foo and bar are parts of the control plane, but node |
| 43 | // worker is not: |
| 44 | // |
| 45 | // .---------------------. |
| 46 | // | node-foo | |
| 47 | // |---------------------| |
| 48 | // | .--------------------. |
| 49 | // | | etcd |<---etcd/TLS--. (node.ConsensusPort) |
| 50 | // | '--------------------' | |
| 51 | // | ^ Domain Socket | | |
| 52 | // | | etcd/plain | | |
| 53 | // | .--------------------. | |
| 54 | // | | curator |<---gRPC/TLS----. (node.CuratorServicePort) |
| 55 | // | '--------------------' | | |
| 56 | // | ^ Domain Socket | | | |
| 57 | // | | gRPC/plain | | | |
| 58 | // | .-----------------. | | | |
| 59 | // | | node logic | | | | |
| 60 | // | '-----------------' | | | |
| 61 | // '---------------------' | | |
| 62 | // | | |
| 63 | // .---------------------. | | |
| 64 | // | node-baz | | | |
| 65 | // |---------------------| | | |
| 66 | // | .--------------------. | | |
| 67 | // | | etcd |<-------------' | |
| 68 | // | '--------------------' | |
| 69 | // | ^ Domain Socket | | |
| 70 | // | | gRPC/plain | | |
| 71 | // | .--------------------. | |
| 72 | // | | curator |<---gRPC/TLS----: |
| 73 | // | '--------------------' | |
| 74 | // | ... | | |
| 75 | // '---------------------' | |
| 76 | // | |
| 77 | // .---------------------. | |
| 78 | // | node-worker | | |
| 79 | // |---------------------| | |
| 80 | // | .-----------------. | | |
| 81 | // | | node logic |-------------------' |
| 82 | // | '-----------------' | |
| 83 | // '---------------------' |
| 84 | // |
| 85 | |
| Hendrik Hofstadt | 0d7c91e | 2019-10-23 21:44:47 +0200 | [diff] [blame] | 86 | package consensus |
| 87 | |
| 88 | import ( |
| 89 | "context" |
| Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 90 | "crypto/ed25519" |
| 91 | "crypto/x509" |
| 92 | "crypto/x509/pkix" |
| Lorenz Brun | a622379 | 2023-07-31 17:13:11 +0200 | [diff] [blame] | 93 | "errors" |
| Hendrik Hofstadt | 0d7c91e | 2019-10-23 21:44:47 +0200 | [diff] [blame] | 94 | "fmt" |
| Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 95 | "math/big" |
| Serge Bazanski | c1cb37c | 2023-03-16 17:54:33 +0100 | [diff] [blame] | 96 | "net" |
| 97 | "net/url" |
| Lorenz Brun | a4ea9d0 | 2019-10-31 11:40:30 +0100 | [diff] [blame] | 98 | "time" |
| 99 | |
| Lorenz Brun | d13c1c6 | 2022-03-30 19:58:58 +0200 | [diff] [blame] | 100 | clientv3 "go.etcd.io/etcd/client/v3" |
| 101 | "go.etcd.io/etcd/server/v3/embed" |
| Hendrik Hofstadt | 8efe51e | 2020-02-28 12:53:41 +0100 | [diff] [blame] | 102 | |
| Serge Bazanski | a105db5 | 2021-04-12 19:57:46 +0200 | [diff] [blame] | 103 | "source.monogon.dev/metropolis/node/core/consensus/client" |
| Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 104 | "source.monogon.dev/metropolis/node/core/identity" |
| Serge Bazanski | 37110c3 | 2023-03-01 13:57:27 +0000 | [diff] [blame] | 105 | "source.monogon.dev/metropolis/pkg/event" |
| Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 106 | "source.monogon.dev/metropolis/pkg/event/memory" |
| Serge Bazanski | 50009e0 | 2021-07-07 14:35:27 +0200 | [diff] [blame] | 107 | "source.monogon.dev/metropolis/pkg/logtree/unraw" |
| Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 108 | "source.monogon.dev/metropolis/pkg/pki" |
| Serge Bazanski | 31370b0 | 2021-01-07 16:31:14 +0100 | [diff] [blame] | 109 | "source.monogon.dev/metropolis/pkg/supervisor" |
| Hendrik Hofstadt | 0d7c91e | 2019-10-23 21:44:47 +0200 | [diff] [blame] | 110 | ) |
| 111 | |
| Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 112 | var ( |
| 113 | pkiNamespace = pki.Namespaced("/pki/") |
| Hendrik Hofstadt | 0d7c91e | 2019-10-23 21:44:47 +0200 | [diff] [blame] | 114 | ) |
| 115 | |
| Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 116 | func pkiCA() *pki.Certificate { |
| 117 | return &pki.Certificate{ |
| 118 | Name: "CA", |
| 119 | Namespace: &pkiNamespace, |
| 120 | Issuer: pki.SelfSigned, |
| 121 | Template: x509.Certificate{ |
| 122 | SerialNumber: big.NewInt(1), |
| 123 | Subject: pkix.Name{ |
| 124 | CommonName: "Metropolis etcd CA Certificate", |
| 125 | }, |
| 126 | IsCA: true, |
| 127 | KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign | x509.KeyUsageDigitalSignature, |
| 128 | ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageOCSPSigning}, |
| 129 | }, |
| 130 | } |
| 131 | } |
| 132 | |
| 133 | func pkiPeerCertificate(pubkey ed25519.PublicKey, extraNames []string) x509.Certificate { |
| 134 | return x509.Certificate{ |
| 135 | Subject: pkix.Name{ |
| 136 | CommonName: identity.NodeID(pubkey), |
| 137 | }, |
| 138 | KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment, |
| 139 | ExtKeyUsage: []x509.ExtKeyUsage{ |
| 140 | x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth, |
| 141 | }, |
| 142 | DNSNames: append(extraNames, identity.NodeID(pubkey)), |
| 143 | } |
| 144 | } |
| 145 | |
| 146 | // Service is the etcd cluster member service. See package-level documentation |
| 147 | // for more information. |
| Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 148 | type Service struct { |
| Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 149 | config *Config |
| Lorenz Brun | a4ea9d0 | 2019-10-31 11:40:30 +0100 | [diff] [blame] | 150 | |
| Serge Bazanski | 37110c3 | 2023-03-01 13:57:27 +0000 | [diff] [blame] | 151 | value memory.Value[*Status] |
| Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 152 | ca *pki.Certificate |
| Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 153 | } |
| Hendrik Hofstadt | 0d7c91e | 2019-10-23 21:44:47 +0200 | [diff] [blame] | 154 | |
| Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 155 | func New(config Config) *Service { |
| 156 | return &Service{ |
| Hendrik Hofstadt | 0d7c91e | 2019-10-23 21:44:47 +0200 | [diff] [blame] | 157 | config: &config, |
| 158 | } |
| Hendrik Hofstadt | 0d7c91e | 2019-10-23 21:44:47 +0200 | [diff] [blame] | 159 | } |
| 160 | |
| Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 161 | // Run is a Supervisor runnable that starts the etcd member service. It will |
| 162 | // become healthy once the member joins the cluster successfully. |
| 163 | func (s *Service) Run(ctx context.Context) error { |
| 164 | // Always re-create CA to make sure we don't have PKI state from previous runs. |
| 165 | // |
| 166 | // TODO(q3k): make the PKI library immune to this misuse. |
| 167 | s.ca = pkiCA() |
| Lorenz Brun | 52f7f29 | 2020-06-24 16:42:02 +0200 | [diff] [blame] | 168 | |
| Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 169 | // Create log converter. This will ingest etcd logs and pipe them out to this |
| 170 | // runnable's leveled logging facilities. |
| Serge Bazanski | c1cb37c | 2023-03-16 17:54:33 +0100 | [diff] [blame] | 171 | |
| 172 | // This is not where etcd will run, but where its log ingestion machinery lives. |
| 173 | // This ensures that the (annoying verbose) etcd logs are contained into just |
| 174 | // .etcd. |
| 175 | err := supervisor.Run(ctx, "etcd", func(ctx context.Context) error { |
| 176 | converter := unraw.Converter{ |
| 177 | Parser: parseEtcdLogEntry, |
| 178 | MaximumLineLength: 8192, |
| 179 | LeveledLogger: supervisor.Logger(ctx), |
| 180 | } |
| Serge Bazanski | 5ad3144 | 2024-04-17 15:40:52 +0200 | [diff] [blame] | 181 | pipe, err := converter.NamedPipeReader(s.config.Ephemeral.ServerLogsFIFO.FullPath()) |
| Serge Bazanski | c1cb37c | 2023-03-16 17:54:33 +0100 | [diff] [blame] | 182 | if err != nil { |
| 183 | return fmt.Errorf("when creating pipe reader: %w", err) |
| 184 | } |
| 185 | if err := supervisor.Run(ctx, "piper", pipe); err != nil { |
| 186 | return fmt.Errorf("when starting log piper: %w", err) |
| 187 | } |
| 188 | supervisor.Signal(ctx, supervisor.SignalHealthy) |
| 189 | <-ctx.Done() |
| 190 | return ctx.Err() |
| 191 | }) |
| Serge Bazanski | 50009e0 | 2021-07-07 14:35:27 +0200 | [diff] [blame] | 192 | if err != nil { |
| Serge Bazanski | c1cb37c | 2023-03-16 17:54:33 +0100 | [diff] [blame] | 193 | return fmt.Errorf("when starting etcd logger: %w", err) |
| Serge Bazanski | 50009e0 | 2021-07-07 14:35:27 +0200 | [diff] [blame] | 194 | } |
| 195 | |
| Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 196 | // Create autopromoter, which will automatically promote all learners to full |
| 197 | // etcd members. |
| 198 | if err := supervisor.Run(ctx, "autopromoter", s.autopromoter); err != nil { |
| 199 | return fmt.Errorf("when starting autopromtoer: %w", err) |
| 200 | } |
| Hendrik Hofstadt | 0d7c91e | 2019-10-23 21:44:47 +0200 | [diff] [blame] | 201 | |
| Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 202 | // Create selfupdater, which will perform a one-shot update of this member's |
| 203 | // peer address in etcd. |
| Mateusz Zalega | 619029b | 2022-05-05 17:18:26 +0200 | [diff] [blame] | 204 | if err := supervisor.Run(ctx, "selfupdater", s.selfupdater); err != nil { |
| Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 205 | return fmt.Errorf("when starting selfupdater: %w", err) |
| 206 | } |
| 207 | |
| 208 | // Prepare cluster PKI credentials. |
| 209 | ppki := s.config.Data.PeerPKI |
| 210 | jc := s.config.JoinCluster |
| 211 | if jc != nil { |
| Serge Bazanski | 97d6808 | 2022-06-22 13:15:21 +0200 | [diff] [blame] | 212 | supervisor.Logger(ctx).Info("JoinCluster set, writing PPKI data to disk...") |
| Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 213 | // For nodes that join an existing cluster, or re-join it, always write whatever |
| 214 | // we've been given on startup. |
| 215 | if err := ppki.WriteAll(jc.NodeCertificate.Raw, s.config.NodePrivateKey, jc.CACertificate.Raw); err != nil { |
| 216 | return fmt.Errorf("when writing credentials for join: %w", err) |
| 217 | } |
| 218 | if err := s.config.Data.PeerCRL.Write(jc.InitialCRL.Raw, 0400); err != nil { |
| 219 | return fmt.Errorf("when writing CRL for join: %w", err) |
| 220 | } |
| 221 | } else { |
| 222 | // For other nodes, we should already have credentials from a previous join, or |
| 223 | // a previous bootstrap. If none exist, assume we need to bootstrap these |
| 224 | // credentials. |
| 225 | // |
| 226 | // TODO(q3k): once we have node join (ie. node restart from disk) flow, add a |
| 227 | // special configuration marker to prevent spurious bootstraps. |
| 228 | absent, err := ppki.AllAbsent() |
| 229 | if err != nil { |
| 230 | return fmt.Errorf("when checking for PKI file absence: %w", err) |
| 231 | } |
| 232 | if absent { |
| Serge Bazanski | 97d6808 | 2022-06-22 13:15:21 +0200 | [diff] [blame] | 233 | supervisor.Logger(ctx).Info("PKI data absent, bootstrapping.") |
| Serge Bazanski | 5ad3144 | 2024-04-17 15:40:52 +0200 | [diff] [blame] | 234 | if err := s.bootstrap(ctx); err != nil { |
| Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 235 | return fmt.Errorf("bootstrap failed: %w", err) |
| 236 | } |
| 237 | } else { |
| 238 | supervisor.Logger(ctx).Info("PKI data present, not bootstrapping.") |
| 239 | } |
| 240 | } |
| 241 | |
| Serge Bazanski | c1cb37c | 2023-03-16 17:54:33 +0100 | [diff] [blame] | 242 | // If we're joining a cluster, make sure that our peers are actually DNS |
| 243 | // resolvable. This prevents us from immediately failing due to transient DNS |
| 244 | // issues. |
| 245 | if jc := s.config.JoinCluster; jc != nil { |
| 246 | supervisor.Logger(ctx).Infof("Waiting for initial peers to be DNS resolvable...") |
| 247 | startLogging := time.Now().Add(5 * time.Second) |
| 248 | for { |
| 249 | allOkay := true |
| 250 | shouldLog := time.Now().After(startLogging) |
| 251 | for _, node := range jc.ExistingNodes { |
| 252 | u, _ := url.Parse(node.URL) |
| 253 | if err != nil { |
| 254 | // Just pretend this node is up. If the URL is really bad, etcd will complain |
| 255 | // more clearly than us. This shouldn't happen, anyway. |
| 256 | } |
| 257 | host := u.Hostname() |
| 258 | _, err := net.LookupIP(host) |
| 259 | if err == nil { |
| 260 | continue |
| 261 | } |
| 262 | if shouldLog { |
| 263 | supervisor.Logger(ctx).Errorf("Still can't resolve peer %s (%s): %v", node.Name, host, err) |
| 264 | } |
| 265 | allOkay = false |
| 266 | } |
| 267 | if allOkay { |
| 268 | supervisor.Logger(ctx).Infof("All peers resolvable, continuing startup.") |
| 269 | break |
| 270 | } |
| 271 | |
| 272 | time.Sleep(100 * time.Millisecond) |
| 273 | if shouldLog { |
| 274 | startLogging = time.Now().Add(5 * time.Second) |
| 275 | } |
| 276 | } |
| 277 | } |
| 278 | |
| Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 279 | // Start etcd ... |
| Serge Bazanski | c1cb37c | 2023-03-16 17:54:33 +0100 | [diff] [blame] | 280 | supervisor.Logger(ctx).Infof("Starting etcd...") |
| Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 281 | cfg := s.config.build(true) |
| 282 | server, err := embed.StartEtcd(cfg) |
| 283 | if err != nil { |
| 284 | return fmt.Errorf("when starting etcd: %w", err) |
| 285 | } |
| 286 | |
| 287 | // ... wait for server to be ready... |
| 288 | select { |
| 289 | case <-ctx.Done(): |
| 290 | return ctx.Err() |
| 291 | case <-server.Server.ReadyNotify(): |
| 292 | } |
| 293 | |
| 294 | // ... build a client to its' socket... |
| 295 | cl, err := s.config.localClient() |
| 296 | if err != nil { |
| 297 | return fmt.Errorf("getting local client failed: %w", err) |
| 298 | } |
| 299 | |
| 300 | // ... and wait until we're not a learner anymore. |
| 301 | for { |
| 302 | members, err := cl.MemberList(ctx) |
| 303 | if err != nil { |
| 304 | supervisor.Logger(ctx).Warningf("MemberList failed: %v", err) |
| 305 | time.Sleep(time.Second) |
| 306 | continue |
| 307 | } |
| 308 | |
| 309 | isMember := false |
| 310 | for _, member := range members.Members { |
| 311 | if member.ID != uint64(server.Server.ID()) { |
| 312 | continue |
| 313 | } |
| 314 | if !member.IsLearner { |
| 315 | isMember = true |
| 316 | break |
| 317 | } |
| 318 | } |
| 319 | if isMember { |
| 320 | break |
| 321 | } |
| 322 | supervisor.Logger(ctx).Warningf("Still a learner, waiting...") |
| 323 | time.Sleep(time.Second) |
| 324 | } |
| 325 | |
| 326 | // All done! Report status. |
| 327 | supervisor.Logger(ctx).Infof("etcd server ready") |
| 328 | |
| 329 | st := &Status{ |
| Lorenz Brun | 6211e4d | 2023-11-14 19:09:40 +0100 | [diff] [blame] | 330 | localPeerURL: cfg.AdvertisePeerUrls[0].String(), |
| Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 331 | localMemberID: uint64(server.Server.ID()), |
| 332 | cl: cl, |
| 333 | ca: s.ca, |
| 334 | } |
| Serge Bazanski | 98a6ccc | 2023-06-20 13:09:12 +0200 | [diff] [blame] | 335 | st2 := *st |
| 336 | s.value.Set(&st2) |
| Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 337 | |
| 338 | // Wait until server dies for whatever reason, update status when that |
| 339 | // happens. |
| 340 | supervisor.Signal(ctx, supervisor.SignalHealthy) |
| 341 | select { |
| 342 | case err = <-server.Err(): |
| 343 | err = fmt.Errorf("server returned error: %w", err) |
| 344 | case <-ctx.Done(): |
| 345 | server.Close() |
| 346 | err = ctx.Err() |
| 347 | } |
| Serge Bazanski | 98a6ccc | 2023-06-20 13:09:12 +0200 | [diff] [blame] | 348 | |
| Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 349 | st.stopped = true |
| Serge Bazanski | 98a6ccc | 2023-06-20 13:09:12 +0200 | [diff] [blame] | 350 | st3 := *st |
| 351 | s.value.Set(&st3) |
| Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 352 | return err |
| Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 353 | } |
| 354 | |
| Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 355 | func clientFor(kv *clientv3.Client, parts ...string) (client.Namespaced, error) { |
| 356 | var err error |
| 357 | namespaced := client.NewLocal(kv) |
| 358 | for _, el := range parts { |
| 359 | namespaced, err = namespaced.Sub(el) |
| Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 360 | if err != nil { |
| Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 361 | return nil, fmt.Errorf("when getting sub client: %w", err) |
| Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 362 | } |
| Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 363 | |
| Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 364 | } |
| Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 365 | return namespaced, nil |
| 366 | } |
| Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 367 | |
| Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 368 | // bootstrap performs a procedure to resolve the following bootstrap problems: |
| 369 | // in order to start an etcd server for consensus, we need it to serve over TLS. |
| 370 | // However, these TLS certificates also need to be stored in etcd so that |
| 371 | // further certificates can be issued for new nodes. |
| 372 | // |
| 373 | // This was previously solved by a using a special PKI/TLS management system that |
| 374 | // could first create certificates and keys in memory, then only commit them to |
| 375 | // etcd. However, this ended up being somewhat brittle in the face of startup |
| 376 | // sequencing issues, so we're now going with a different approach. |
| 377 | // |
| 378 | // This function starts an etcd instance first without any PKI/TLS support, |
| 379 | // without listening on any external port for peer traffic. Once the instance is |
| 380 | // running, it uses the standard metropolis pki library to create all required |
| 381 | // data directly in the running etcd instance. It then writes all required |
| 382 | // startup data (node private key, member certificate, CA certificate) to disk, |
| 383 | // so that a 'full' etcd instance can be started. |
| Serge Bazanski | 5ad3144 | 2024-04-17 15:40:52 +0200 | [diff] [blame] | 384 | func (s *Service) bootstrap(ctx context.Context) error { |
| Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 385 | supervisor.Logger(ctx).Infof("Bootstrapping PKI: starting etcd...") |
| Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 386 | |
| Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 387 | cfg := s.config.build(false) |
| 388 | // This will make etcd create data directories and create a fully new cluster if |
| 389 | // needed. If we're restarting due to an error, the old cluster data will still |
| 390 | // exist. |
| 391 | cfg.ClusterState = "new" |
| Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 392 | |
| Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 393 | // Start the bootstrap etcd instance... |
| Hendrik Hofstadt | 0d7c91e | 2019-10-23 21:44:47 +0200 | [diff] [blame] | 394 | server, err := embed.StartEtcd(cfg) |
| Lorenz Brun | a4ea9d0 | 2019-10-31 11:40:30 +0100 | [diff] [blame] | 395 | if err != nil { |
| Serge Bazanski | b76b8d1 | 2023-03-16 00:46:56 +0100 | [diff] [blame] | 396 | return fmt.Errorf("failed to start bootstrap etcd: %w", err) |
| Lorenz Brun | a4ea9d0 | 2019-10-31 11:40:30 +0100 | [diff] [blame] | 397 | } |
| Serge Bazanski | b76b8d1 | 2023-03-16 00:46:56 +0100 | [diff] [blame] | 398 | defer server.Close() |
| Lorenz Brun | a4ea9d0 | 2019-10-31 11:40:30 +0100 | [diff] [blame] | 399 | |
| Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 400 | // ... wait for it to run ... |
| Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 401 | select { |
| Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 402 | case <-server.Server.ReadyNotify(): |
| Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 403 | case <-ctx.Done(): |
| Lorenz Brun | a622379 | 2023-07-31 17:13:11 +0200 | [diff] [blame] | 404 | return errors.New("timed out waiting for etcd to become ready") |
| Lorenz Brun | 52f7f29 | 2020-06-24 16:42:02 +0200 | [diff] [blame] | 405 | } |
| 406 | |
| Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 407 | // ... create a client to it ... |
| 408 | cl, err := s.config.localClient() |
| Lorenz Brun | 52f7f29 | 2020-06-24 16:42:02 +0200 | [diff] [blame] | 409 | if err != nil { |
| Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 410 | return fmt.Errorf("when getting bootstrap client: %w", err) |
| Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 411 | } |
| 412 | |
| Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 413 | // ... and build PKI there. This is idempotent, so we will never override |
| 414 | // anything that's already in the cluster, instead just retrieve it. |
| 415 | supervisor.Logger(ctx).Infof("Bootstrapping PKI: etcd running, building PKI...") |
| 416 | clPKI, err := clientFor(cl, "namespaced", "etcd-pki") |
| 417 | if err != nil { |
| 418 | return fmt.Errorf("when getting pki client: %w", err) |
| Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 419 | } |
| Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 420 | defer clPKI.Close() |
| 421 | caCert, err := s.ca.Ensure(ctx, clPKI) |
| 422 | if err != nil { |
| 423 | return fmt.Errorf("failed to ensure CA certificate: %w", err) |
| Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 424 | } |
| 425 | |
| Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 426 | // If we're running with a test overridden external address (eg. localhost), we |
| 427 | // need to also make that part of the member certificate. |
| 428 | var extraNames []string |
| 429 | if external := s.config.testOverrides.externalAddress; external != "" { |
| 430 | extraNames = []string{external} |
| 431 | } |
| 432 | memberTemplate := pki.Certificate{ |
| 433 | Name: identity.NodeID(s.config.nodePublicKey()), |
| 434 | Namespace: &pkiNamespace, |
| 435 | Issuer: s.ca, |
| 436 | Template: pkiPeerCertificate(s.config.nodePublicKey(), extraNames), |
| 437 | Mode: pki.CertificateExternal, |
| 438 | PublicKey: s.config.nodePublicKey(), |
| 439 | } |
| 440 | memberCert, err := memberTemplate.Ensure(ctx, clPKI) |
| 441 | if err != nil { |
| 442 | return fmt.Errorf("failed to ensure member certificate: %w", err) |
| 443 | } |
| Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 444 | |
| Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 445 | // Retrieve CRL. |
| 446 | crlW := s.ca.WatchCRL(clPKI) |
| 447 | crl, err := crlW.Get(ctx) |
| 448 | if err != nil { |
| 449 | return fmt.Errorf("failed to retrieve initial CRL: %w", err) |
| 450 | } |
| 451 | |
| 452 | // We have everything we need. Write things to disk. |
| 453 | supervisor.Logger(ctx).Infof("Bootstrapping PKI: certificates issued, writing to disk...") |
| 454 | |
| 455 | if err := s.config.Data.PeerPKI.WriteAll(memberCert, s.config.NodePrivateKey, caCert); err != nil { |
| 456 | return fmt.Errorf("failed to write bootstrapped certificates: %w", err) |
| 457 | } |
| 458 | if err := s.config.Data.PeerCRL.Write(crl.Raw, 0400); err != nil { |
| 459 | return fmt.Errorf("failed tow rite CRL: %w", err) |
| 460 | } |
| 461 | |
| 462 | // Stop the server synchronously (blocking until it's fully shutdown), and |
| 463 | // return. The caller can now run the 'full' etcd instance with PKI. |
| 464 | supervisor.Logger(ctx).Infof("Bootstrapping PKI: done, stopping server...") |
| 465 | server.Close() |
| Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 466 | return ctx.Err() |
| Lorenz Brun | a4ea9d0 | 2019-10-31 11:40:30 +0100 | [diff] [blame] | 467 | } |
| 468 | |
| Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 469 | // autopromoter is a runnable which repeatedly attempts to promote etcd learners |
| 470 | // in the cluster to full followers. This is needed to bring any new cluster |
| 471 | // members (which are always added as learners) to full membership and make them |
| 472 | // part of the etcd quorum. |
| Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 473 | func (s *Service) autopromoter(ctx context.Context) error { |
| Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 474 | autopromote := func(ctx context.Context, cl *clientv3.Client) { |
| 475 | // Only autopromote if our endpoint is a leader. This is a bargain bin version |
| 476 | // of leader election: it's simple and cheap, but not very reliable. The most |
| 477 | // obvious failure mode is that the instance we contacted isn't a leader by the |
| 478 | // time we promote a member, but that's fine - the promotion is idempotent. What |
| 479 | // we really use the 'leader election' here for isn't for consistency, but to |
| 480 | // prevent the cluster from being hammered by spurious leadership promotion |
| 481 | // requests from every etcd member. |
| 482 | status, err := cl.Status(ctx, cl.Endpoints()[0]) |
| 483 | if err != nil { |
| 484 | supervisor.Logger(ctx).Warningf("Failed to get endpoint status: %v", err) |
| Jan Schär | b976967 | 2024-04-09 15:31:40 +0200 | [diff] [blame] | 485 | return |
| Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 486 | } |
| 487 | if status.Leader != status.Header.MemberId { |
| Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 488 | return |
| 489 | } |
| 490 | |
| Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 491 | members, err := cl.MemberList(ctx) |
| 492 | if err != nil { |
| 493 | supervisor.Logger(ctx).Warningf("Failed to list members: %v", err) |
| 494 | return |
| 495 | } |
| 496 | for _, member := range members.Members { |
| Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 497 | if !member.IsLearner { |
| Lorenz Brun | a4ea9d0 | 2019-10-31 11:40:30 +0100 | [diff] [blame] | 498 | continue |
| 499 | } |
| Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 500 | // Always call PromoteMember since the metadata necessary to decide if we should |
| 501 | // is private. Luckily etcd already does consistency checks internally and will |
| 502 | // refuse to promote nodes that aren't connected or are still behind on |
| 503 | // transactions. |
| 504 | if _, err := cl.MemberPromote(ctx, member.ID); err != nil { |
| Serge Bazanski | c735967 | 2020-10-30 16:38:57 +0100 | [diff] [blame] | 505 | supervisor.Logger(ctx).Infof("Failed to promote consensus node %s: %v", member.Name, err) |
| Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 506 | } else { |
| Serge Bazanski | c735967 | 2020-10-30 16:38:57 +0100 | [diff] [blame] | 507 | supervisor.Logger(ctx).Infof("Promoted new consensus node %s", member.Name) |
| Lorenz Brun | a4ea9d0 | 2019-10-31 11:40:30 +0100 | [diff] [blame] | 508 | } |
| 509 | } |
| 510 | } |
| Lorenz Brun | a4ea9d0 | 2019-10-31 11:40:30 +0100 | [diff] [blame] | 511 | |
| Serge Bazanski | 37110c3 | 2023-03-01 13:57:27 +0000 | [diff] [blame] | 512 | w := s.value.Watch() |
| Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 513 | for { |
| Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 514 | st, err := w.Get(ctx) |
| 515 | if err != nil { |
| 516 | return fmt.Errorf("status get failed: %w", err) |
| Lorenz Brun | 52f7f29 | 2020-06-24 16:42:02 +0200 | [diff] [blame] | 517 | } |
| Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 518 | t := time.NewTicker(5 * time.Second) |
| 519 | for { |
| 520 | autopromote(ctx, st.cl) |
| 521 | select { |
| 522 | case <-ctx.Done(): |
| 523 | t.Stop() |
| 524 | return ctx.Err() |
| 525 | case <-t.C: |
| Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 526 | } |
| 527 | } |
| 528 | } |
| Hendrik Hofstadt | 0d7c91e | 2019-10-23 21:44:47 +0200 | [diff] [blame] | 529 | } |
| 530 | |
| Serge Bazanski | 37110c3 | 2023-03-01 13:57:27 +0000 | [diff] [blame] | 531 | func (s *Service) Watch() event.Watcher[*Status] { |
| 532 | return s.value.Watch() |
| 533 | } |
| 534 | |
| Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 535 | // selfupdater is a runnable that performs a one-shot (once per Service Run, |
| 536 | // thus once for each configuration) update of the node's Peer URL in etcd. This |
| 537 | // is currently only really needed because the first node in the cluster |
| 538 | // bootstraps itself without any peer URLs at first, and this allows it to then |
| 539 | // add the peer URLs afterwards. Instead of a runnable, this might as well have |
| 540 | // been part of the bootstarp logic, but making it a restartable runnable is |
| 541 | // more robust. |
| 542 | func (s *Service) selfupdater(ctx context.Context) error { |
| 543 | supervisor.Signal(ctx, supervisor.SignalHealthy) |
| Serge Bazanski | 37110c3 | 2023-03-01 13:57:27 +0000 | [diff] [blame] | 544 | w := s.value.Watch() |
| Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 545 | for { |
| 546 | st, err := w.Get(ctx) |
| 547 | if err != nil { |
| 548 | return fmt.Errorf("failed to get status: %w", err) |
| 549 | } |
| 550 | |
| Serge Bazanski | 5839e97 | 2021-11-16 15:46:19 +0100 | [diff] [blame] | 551 | if st.localPeerURL != "" { |
| 552 | supervisor.Logger(ctx).Infof("Updating local peer URL...") |
| 553 | peerURL := st.localPeerURL |
| 554 | if _, err := st.cl.MemberUpdate(ctx, st.localMemberID, []string{peerURL}); err != nil { |
| 555 | supervisor.Logger(ctx).Warningf("failed to update member: %v", err) |
| 556 | time.Sleep(1 * time.Second) |
| 557 | continue |
| 558 | } |
| 559 | } else { |
| 560 | supervisor.Logger(ctx).Infof("No local peer URL, not updating.") |
| Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame] | 561 | } |
| 562 | |
| 563 | supervisor.Signal(ctx, supervisor.SignalDone) |
| 564 | return nil |
| Serge Bazanski | a105db5 | 2021-04-12 19:57:46 +0200 | [diff] [blame] | 565 | } |
| Hendrik Hofstadt | 0d7c91e | 2019-10-23 21:44:47 +0200 | [diff] [blame] | 566 | } |