Hendrik Hofstadt | 0d7c91e | 2019-10-23 21:44:47 +0200 | [diff] [blame] | 1 | // Copyright 2020 The Monogon Project Authors. |
| 2 | // |
| 3 | // SPDX-License-Identifier: Apache-2.0 |
| 4 | // |
| 5 | // Licensed under the Apache License, Version 2.0 (the "License"); |
| 6 | // you may not use this file except in compliance with the License. |
| 7 | // You may obtain a copy of the License at |
| 8 | // |
| 9 | // http://www.apache.org/licenses/LICENSE-2.0 |
| 10 | // |
| 11 | // Unless required by applicable law or agreed to in writing, software |
| 12 | // distributed under the License is distributed on an "AS IS" BASIS, |
| 13 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | // See the License for the specific language governing permissions and |
| 15 | // limitations under the License. |
| 16 | |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame^] | 17 | // Package consensus implements a runnable that manages an etcd instance which |
| 18 | // forms part of a Metropolis etcd cluster. This cluster is a foundational |
| 19 | // building block of Metropolis and its startup/management sequencing needs to |
| 20 | // be as robust as possible. |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 21 | // |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame^] | 22 | // Cluster Structure |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 23 | // |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame^] | 24 | // Each etcd instance listens for two kinds of traffic: |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 25 | // |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame^] | 26 | // 1. Peer traffic over TLS on a TCP port of the node's main interface. This is |
| 27 | // where other etcd instances connect to to exchange peer traffic, perform |
| 28 | // transactions and build quorum. The TLS credentials are stored in a PKI that |
| 29 | // is managed internally by the consensus runnable, with its state stored in |
| 30 | // etcd itself. |
| 31 | // |
| 32 | // 2. Client traffic over a local domain socket, with access control based on |
| 33 | // standard Linux user/group permissions. Currently this allows any code running |
| 34 | // as root on the host namespace full access to the etcd cluster. |
| 35 | // |
| 36 | // This means that if code running on a node wishes to perform etcd |
| 37 | // transactions, it must also run an etcd instance. This colocation of all |
| 38 | // direct etcd access and the etcd intances themselves effectively delegate all |
| 39 | // Metropolis control plane functionality to whatever subset of nodes is running |
| 40 | // consensus and all codes that connects to etcd directly (the Curator). |
| 41 | // |
| 42 | // For example, if nodes foo and bar are parts of the control plane, but node |
| 43 | // worker is not: |
| 44 | // |
| 45 | // .---------------------. |
| 46 | // | node-foo | |
| 47 | // |---------------------| |
| 48 | // | .--------------------. |
| 49 | // | | etcd |<---etcd/TLS--. (node.ConsensusPort) |
| 50 | // | '--------------------' | |
| 51 | // | ^ Domain Socket | | |
| 52 | // | | etcd/plain | | |
| 53 | // | .--------------------. | |
| 54 | // | | curator |<---gRPC/TLS----. (node.CuratorServicePort) |
| 55 | // | '--------------------' | | |
| 56 | // | ^ Domain Socket | | | |
| 57 | // | | gRPC/plain | | | |
| 58 | // | .-----------------. | | | |
| 59 | // | | node logic | | | | |
| 60 | // | '-----------------' | | | |
| 61 | // '---------------------' | | |
| 62 | // | | |
| 63 | // .---------------------. | | |
| 64 | // | node-baz | | | |
| 65 | // |---------------------| | | |
| 66 | // | .--------------------. | | |
| 67 | // | | etcd |<-------------' | |
| 68 | // | '--------------------' | |
| 69 | // | ^ Domain Socket | | |
| 70 | // | | gRPC/plain | | |
| 71 | // | .--------------------. | |
| 72 | // | | curator |<---gRPC/TLS----: |
| 73 | // | '--------------------' | |
| 74 | // | ... | | |
| 75 | // '---------------------' | |
| 76 | // | |
| 77 | // .---------------------. | |
| 78 | // | node-worker | | |
| 79 | // |---------------------| | |
| 80 | // | .-----------------. | | |
| 81 | // | | node logic |-------------------' |
| 82 | // | '-----------------' | |
| 83 | // '---------------------' |
| 84 | // |
| 85 | |
Hendrik Hofstadt | 0d7c91e | 2019-10-23 21:44:47 +0200 | [diff] [blame] | 86 | package consensus |
| 87 | |
| 88 | import ( |
| 89 | "context" |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame^] | 90 | "crypto/ed25519" |
| 91 | "crypto/x509" |
| 92 | "crypto/x509/pkix" |
Hendrik Hofstadt | 0d7c91e | 2019-10-23 21:44:47 +0200 | [diff] [blame] | 93 | "fmt" |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame^] | 94 | "math/big" |
Lorenz Brun | a4ea9d0 | 2019-10-31 11:40:30 +0100 | [diff] [blame] | 95 | "time" |
| 96 | |
Hendrik Hofstadt | 0d7c91e | 2019-10-23 21:44:47 +0200 | [diff] [blame] | 97 | "go.etcd.io/etcd/clientv3" |
Hendrik Hofstadt | 0d7c91e | 2019-10-23 21:44:47 +0200 | [diff] [blame] | 98 | "go.etcd.io/etcd/embed" |
Hendrik Hofstadt | 8efe51e | 2020-02-28 12:53:41 +0100 | [diff] [blame] | 99 | |
Serge Bazanski | a105db5 | 2021-04-12 19:57:46 +0200 | [diff] [blame] | 100 | "source.monogon.dev/metropolis/node/core/consensus/client" |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame^] | 101 | "source.monogon.dev/metropolis/node/core/identity" |
| 102 | "source.monogon.dev/metropolis/pkg/event/memory" |
Serge Bazanski | 50009e0 | 2021-07-07 14:35:27 +0200 | [diff] [blame] | 103 | "source.monogon.dev/metropolis/pkg/logtree/unraw" |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame^] | 104 | "source.monogon.dev/metropolis/pkg/pki" |
Serge Bazanski | 31370b0 | 2021-01-07 16:31:14 +0100 | [diff] [blame] | 105 | "source.monogon.dev/metropolis/pkg/supervisor" |
Hendrik Hofstadt | 0d7c91e | 2019-10-23 21:44:47 +0200 | [diff] [blame] | 106 | ) |
| 107 | |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame^] | 108 | var ( |
| 109 | pkiNamespace = pki.Namespaced("/pki/") |
Hendrik Hofstadt | 0d7c91e | 2019-10-23 21:44:47 +0200 | [diff] [blame] | 110 | ) |
| 111 | |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame^] | 112 | func pkiCA() *pki.Certificate { |
| 113 | return &pki.Certificate{ |
| 114 | Name: "CA", |
| 115 | Namespace: &pkiNamespace, |
| 116 | Issuer: pki.SelfSigned, |
| 117 | Template: x509.Certificate{ |
| 118 | SerialNumber: big.NewInt(1), |
| 119 | Subject: pkix.Name{ |
| 120 | CommonName: "Metropolis etcd CA Certificate", |
| 121 | }, |
| 122 | IsCA: true, |
| 123 | KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign | x509.KeyUsageDigitalSignature, |
| 124 | ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageOCSPSigning}, |
| 125 | }, |
| 126 | } |
| 127 | } |
| 128 | |
| 129 | func pkiPeerCertificate(pubkey ed25519.PublicKey, extraNames []string) x509.Certificate { |
| 130 | return x509.Certificate{ |
| 131 | Subject: pkix.Name{ |
| 132 | CommonName: identity.NodeID(pubkey), |
| 133 | }, |
| 134 | KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment, |
| 135 | ExtKeyUsage: []x509.ExtKeyUsage{ |
| 136 | x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth, |
| 137 | }, |
| 138 | DNSNames: append(extraNames, identity.NodeID(pubkey)), |
| 139 | } |
| 140 | } |
| 141 | |
| 142 | // Service is the etcd cluster member service. See package-level documentation |
| 143 | // for more information. |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 144 | type Service struct { |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 145 | config *Config |
Lorenz Brun | a4ea9d0 | 2019-10-31 11:40:30 +0100 | [diff] [blame] | 146 | |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame^] | 147 | value memory.Value |
| 148 | ca *pki.Certificate |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 149 | } |
Hendrik Hofstadt | 0d7c91e | 2019-10-23 21:44:47 +0200 | [diff] [blame] | 150 | |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 151 | func New(config Config) *Service { |
| 152 | return &Service{ |
Hendrik Hofstadt | 0d7c91e | 2019-10-23 21:44:47 +0200 | [diff] [blame] | 153 | config: &config, |
| 154 | } |
Hendrik Hofstadt | 0d7c91e | 2019-10-23 21:44:47 +0200 | [diff] [blame] | 155 | } |
| 156 | |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame^] | 157 | // Run is a Supervisor runnable that starts the etcd member service. It will |
| 158 | // become healthy once the member joins the cluster successfully. |
| 159 | func (s *Service) Run(ctx context.Context) error { |
| 160 | // Always re-create CA to make sure we don't have PKI state from previous runs. |
| 161 | // |
| 162 | // TODO(q3k): make the PKI library immune to this misuse. |
| 163 | s.ca = pkiCA() |
Lorenz Brun | 52f7f29 | 2020-06-24 16:42:02 +0200 | [diff] [blame] | 164 | |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame^] | 165 | // Create log converter. This will ingest etcd logs and pipe them out to this |
| 166 | // runnable's leveled logging facilities. |
| 167 | // |
| 168 | // TODO(q3k): add support for streaming to a sub-logger in the tree to get |
| 169 | // cleaner logs. |
Serge Bazanski | 50009e0 | 2021-07-07 14:35:27 +0200 | [diff] [blame] | 170 | converter := unraw.Converter{ |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame^] | 171 | Parser: parseEtcdLogEntry, |
Serge Bazanski | 50009e0 | 2021-07-07 14:35:27 +0200 | [diff] [blame] | 172 | MaximumLineLength: 8192, |
| 173 | LeveledLogger: supervisor.Logger(ctx), |
| 174 | } |
| 175 | fifoPath := s.config.Ephemeral.ServerLogsFIFO.FullPath() |
| 176 | pipe, err := converter.NamedPipeReader(fifoPath) |
| 177 | if err != nil { |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame^] | 178 | return fmt.Errorf("when creating pipe reader: %w", err) |
Serge Bazanski | 50009e0 | 2021-07-07 14:35:27 +0200 | [diff] [blame] | 179 | } |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame^] | 180 | if err := supervisor.Run(ctx, "piper", pipe); err != nil { |
| 181 | return fmt.Errorf("when starting log piper: %w", err) |
Serge Bazanski | 50009e0 | 2021-07-07 14:35:27 +0200 | [diff] [blame] | 182 | } |
| 183 | |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame^] | 184 | // Create autopromoter, which will automatically promote all learners to full |
| 185 | // etcd members. |
| 186 | if err := supervisor.Run(ctx, "autopromoter", s.autopromoter); err != nil { |
| 187 | return fmt.Errorf("when starting autopromtoer: %w", err) |
| 188 | } |
Hendrik Hofstadt | 0d7c91e | 2019-10-23 21:44:47 +0200 | [diff] [blame] | 189 | |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame^] | 190 | // Create selfupdater, which will perform a one-shot update of this member's |
| 191 | // peer address in etcd. |
| 192 | if err := supervisor.Run(ctx, "selfupadater", s.selfupdater); err != nil { |
| 193 | return fmt.Errorf("when starting selfupdater: %w", err) |
| 194 | } |
| 195 | |
| 196 | // Prepare cluster PKI credentials. |
| 197 | ppki := s.config.Data.PeerPKI |
| 198 | jc := s.config.JoinCluster |
| 199 | if jc != nil { |
| 200 | // For nodes that join an existing cluster, or re-join it, always write whatever |
| 201 | // we've been given on startup. |
| 202 | if err := ppki.WriteAll(jc.NodeCertificate.Raw, s.config.NodePrivateKey, jc.CACertificate.Raw); err != nil { |
| 203 | return fmt.Errorf("when writing credentials for join: %w", err) |
| 204 | } |
| 205 | if err := s.config.Data.PeerCRL.Write(jc.InitialCRL.Raw, 0400); err != nil { |
| 206 | return fmt.Errorf("when writing CRL for join: %w", err) |
| 207 | } |
| 208 | } else { |
| 209 | // For other nodes, we should already have credentials from a previous join, or |
| 210 | // a previous bootstrap. If none exist, assume we need to bootstrap these |
| 211 | // credentials. |
| 212 | // |
| 213 | // TODO(q3k): once we have node join (ie. node restart from disk) flow, add a |
| 214 | // special configuration marker to prevent spurious bootstraps. |
| 215 | absent, err := ppki.AllAbsent() |
| 216 | if err != nil { |
| 217 | return fmt.Errorf("when checking for PKI file absence: %w", err) |
| 218 | } |
| 219 | if absent { |
| 220 | if err := s.bootstrap(ctx, fifoPath); err != nil { |
| 221 | return fmt.Errorf("bootstrap failed: %w", err) |
| 222 | } |
| 223 | } else { |
| 224 | supervisor.Logger(ctx).Info("PKI data present, not bootstrapping.") |
| 225 | } |
| 226 | } |
| 227 | |
| 228 | // Start etcd ... |
| 229 | cfg := s.config.build(true) |
| 230 | server, err := embed.StartEtcd(cfg) |
| 231 | if err != nil { |
| 232 | return fmt.Errorf("when starting etcd: %w", err) |
| 233 | } |
| 234 | |
| 235 | // ... wait for server to be ready... |
| 236 | select { |
| 237 | case <-ctx.Done(): |
| 238 | return ctx.Err() |
| 239 | case <-server.Server.ReadyNotify(): |
| 240 | } |
| 241 | |
| 242 | // ... build a client to its' socket... |
| 243 | cl, err := s.config.localClient() |
| 244 | if err != nil { |
| 245 | return fmt.Errorf("getting local client failed: %w", err) |
| 246 | } |
| 247 | |
| 248 | // ... and wait until we're not a learner anymore. |
| 249 | for { |
| 250 | members, err := cl.MemberList(ctx) |
| 251 | if err != nil { |
| 252 | supervisor.Logger(ctx).Warningf("MemberList failed: %v", err) |
| 253 | time.Sleep(time.Second) |
| 254 | continue |
| 255 | } |
| 256 | |
| 257 | isMember := false |
| 258 | for _, member := range members.Members { |
| 259 | if member.ID != uint64(server.Server.ID()) { |
| 260 | continue |
| 261 | } |
| 262 | if !member.IsLearner { |
| 263 | isMember = true |
| 264 | break |
| 265 | } |
| 266 | } |
| 267 | if isMember { |
| 268 | break |
| 269 | } |
| 270 | supervisor.Logger(ctx).Warningf("Still a learner, waiting...") |
| 271 | time.Sleep(time.Second) |
| 272 | } |
| 273 | |
| 274 | // All done! Report status. |
| 275 | supervisor.Logger(ctx).Infof("etcd server ready") |
| 276 | |
| 277 | st := &Status{ |
| 278 | localPeerURL: cfg.APUrls[0].String(), |
| 279 | localMemberID: uint64(server.Server.ID()), |
| 280 | cl: cl, |
| 281 | ca: s.ca, |
| 282 | } |
| 283 | s.value.Set(st) |
| 284 | |
| 285 | // Wait until server dies for whatever reason, update status when that |
| 286 | // happens. |
| 287 | supervisor.Signal(ctx, supervisor.SignalHealthy) |
| 288 | select { |
| 289 | case err = <-server.Err(): |
| 290 | err = fmt.Errorf("server returned error: %w", err) |
| 291 | case <-ctx.Done(): |
| 292 | server.Close() |
| 293 | err = ctx.Err() |
| 294 | } |
| 295 | st.stopped = true |
| 296 | s.value.Set(st) |
| 297 | return err |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 298 | } |
| 299 | |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame^] | 300 | func clientFor(kv *clientv3.Client, parts ...string) (client.Namespaced, error) { |
| 301 | var err error |
| 302 | namespaced := client.NewLocal(kv) |
| 303 | for _, el := range parts { |
| 304 | namespaced, err = namespaced.Sub(el) |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 305 | if err != nil { |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame^] | 306 | return nil, fmt.Errorf("when getting sub client: %w", err) |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 307 | } |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 308 | |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 309 | } |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame^] | 310 | return namespaced, nil |
| 311 | } |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 312 | |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame^] | 313 | // bootstrap performs a procedure to resolve the following bootstrap problems: |
| 314 | // in order to start an etcd server for consensus, we need it to serve over TLS. |
| 315 | // However, these TLS certificates also need to be stored in etcd so that |
| 316 | // further certificates can be issued for new nodes. |
| 317 | // |
| 318 | // This was previously solved by a using a special PKI/TLS management system that |
| 319 | // could first create certificates and keys in memory, then only commit them to |
| 320 | // etcd. However, this ended up being somewhat brittle in the face of startup |
| 321 | // sequencing issues, so we're now going with a different approach. |
| 322 | // |
| 323 | // This function starts an etcd instance first without any PKI/TLS support, |
| 324 | // without listening on any external port for peer traffic. Once the instance is |
| 325 | // running, it uses the standard metropolis pki library to create all required |
| 326 | // data directly in the running etcd instance. It then writes all required |
| 327 | // startup data (node private key, member certificate, CA certificate) to disk, |
| 328 | // so that a 'full' etcd instance can be started. |
| 329 | func (s *Service) bootstrap(ctx context.Context, fifoPath string) error { |
| 330 | supervisor.Logger(ctx).Infof("Bootstrapping PKI: starting etcd...") |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 331 | |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame^] | 332 | cfg := s.config.build(false) |
| 333 | // This will make etcd create data directories and create a fully new cluster if |
| 334 | // needed. If we're restarting due to an error, the old cluster data will still |
| 335 | // exist. |
| 336 | cfg.ClusterState = "new" |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 337 | |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame^] | 338 | // Start the bootstrap etcd instance... |
Hendrik Hofstadt | 0d7c91e | 2019-10-23 21:44:47 +0200 | [diff] [blame] | 339 | server, err := embed.StartEtcd(cfg) |
Lorenz Brun | a4ea9d0 | 2019-10-31 11:40:30 +0100 | [diff] [blame] | 340 | if err != nil { |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 341 | return fmt.Errorf("failed to start etcd: %w", err) |
Lorenz Brun | a4ea9d0 | 2019-10-31 11:40:30 +0100 | [diff] [blame] | 342 | } |
Lorenz Brun | a4ea9d0 | 2019-10-31 11:40:30 +0100 | [diff] [blame] | 343 | |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame^] | 344 | // ... wait for it to run ... |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 345 | select { |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame^] | 346 | case <-server.Server.ReadyNotify(): |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 347 | case <-ctx.Done(): |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame^] | 348 | return fmt.Errorf("when waiting for bootstrap etcd: %w", err) |
Lorenz Brun | 52f7f29 | 2020-06-24 16:42:02 +0200 | [diff] [blame] | 349 | } |
| 350 | |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame^] | 351 | // ... create a client to it ... |
| 352 | cl, err := s.config.localClient() |
Lorenz Brun | 52f7f29 | 2020-06-24 16:42:02 +0200 | [diff] [blame] | 353 | if err != nil { |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame^] | 354 | return fmt.Errorf("when getting bootstrap client: %w", err) |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 355 | } |
| 356 | |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame^] | 357 | // ... and build PKI there. This is idempotent, so we will never override |
| 358 | // anything that's already in the cluster, instead just retrieve it. |
| 359 | supervisor.Logger(ctx).Infof("Bootstrapping PKI: etcd running, building PKI...") |
| 360 | clPKI, err := clientFor(cl, "namespaced", "etcd-pki") |
| 361 | if err != nil { |
| 362 | return fmt.Errorf("when getting pki client: %w", err) |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 363 | } |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame^] | 364 | defer clPKI.Close() |
| 365 | caCert, err := s.ca.Ensure(ctx, clPKI) |
| 366 | if err != nil { |
| 367 | return fmt.Errorf("failed to ensure CA certificate: %w", err) |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 368 | } |
| 369 | |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame^] | 370 | // If we're running with a test overridden external address (eg. localhost), we |
| 371 | // need to also make that part of the member certificate. |
| 372 | var extraNames []string |
| 373 | if external := s.config.testOverrides.externalAddress; external != "" { |
| 374 | extraNames = []string{external} |
| 375 | } |
| 376 | memberTemplate := pki.Certificate{ |
| 377 | Name: identity.NodeID(s.config.nodePublicKey()), |
| 378 | Namespace: &pkiNamespace, |
| 379 | Issuer: s.ca, |
| 380 | Template: pkiPeerCertificate(s.config.nodePublicKey(), extraNames), |
| 381 | Mode: pki.CertificateExternal, |
| 382 | PublicKey: s.config.nodePublicKey(), |
| 383 | } |
| 384 | memberCert, err := memberTemplate.Ensure(ctx, clPKI) |
| 385 | if err != nil { |
| 386 | return fmt.Errorf("failed to ensure member certificate: %w", err) |
| 387 | } |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 388 | |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame^] | 389 | // Retrieve CRL. |
| 390 | crlW := s.ca.WatchCRL(clPKI) |
| 391 | crl, err := crlW.Get(ctx) |
| 392 | if err != nil { |
| 393 | return fmt.Errorf("failed to retrieve initial CRL: %w", err) |
| 394 | } |
| 395 | |
| 396 | // We have everything we need. Write things to disk. |
| 397 | supervisor.Logger(ctx).Infof("Bootstrapping PKI: certificates issued, writing to disk...") |
| 398 | |
| 399 | if err := s.config.Data.PeerPKI.WriteAll(memberCert, s.config.NodePrivateKey, caCert); err != nil { |
| 400 | return fmt.Errorf("failed to write bootstrapped certificates: %w", err) |
| 401 | } |
| 402 | if err := s.config.Data.PeerCRL.Write(crl.Raw, 0400); err != nil { |
| 403 | return fmt.Errorf("failed tow rite CRL: %w", err) |
| 404 | } |
| 405 | |
| 406 | // Stop the server synchronously (blocking until it's fully shutdown), and |
| 407 | // return. The caller can now run the 'full' etcd instance with PKI. |
| 408 | supervisor.Logger(ctx).Infof("Bootstrapping PKI: done, stopping server...") |
| 409 | server.Close() |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 410 | return ctx.Err() |
Lorenz Brun | a4ea9d0 | 2019-10-31 11:40:30 +0100 | [diff] [blame] | 411 | } |
| 412 | |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame^] | 413 | // autopromoter is a runnable which repeatedly attempts to promote etcd learners |
| 414 | // in the cluster to full followers. This is needed to bring any new cluster |
| 415 | // members (which are always added as learners) to full membership and make them |
| 416 | // part of the etcd quorum. |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 417 | func (s *Service) autopromoter(ctx context.Context) error { |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame^] | 418 | autopromote := func(ctx context.Context, cl *clientv3.Client) { |
| 419 | // Only autopromote if our endpoint is a leader. This is a bargain bin version |
| 420 | // of leader election: it's simple and cheap, but not very reliable. The most |
| 421 | // obvious failure mode is that the instance we contacted isn't a leader by the |
| 422 | // time we promote a member, but that's fine - the promotion is idempotent. What |
| 423 | // we really use the 'leader election' here for isn't for consistency, but to |
| 424 | // prevent the cluster from being hammered by spurious leadership promotion |
| 425 | // requests from every etcd member. |
| 426 | status, err := cl.Status(ctx, cl.Endpoints()[0]) |
| 427 | if err != nil { |
| 428 | supervisor.Logger(ctx).Warningf("Failed to get endpoint status: %v", err) |
| 429 | } |
| 430 | if status.Leader != status.Header.MemberId { |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 431 | return |
| 432 | } |
| 433 | |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame^] | 434 | members, err := cl.MemberList(ctx) |
| 435 | if err != nil { |
| 436 | supervisor.Logger(ctx).Warningf("Failed to list members: %v", err) |
| 437 | return |
| 438 | } |
| 439 | for _, member := range members.Members { |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 440 | if !member.IsLearner { |
Lorenz Brun | a4ea9d0 | 2019-10-31 11:40:30 +0100 | [diff] [blame] | 441 | continue |
| 442 | } |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame^] | 443 | // Always call PromoteMember since the metadata necessary to decide if we should |
| 444 | // is private. Luckily etcd already does consistency checks internally and will |
| 445 | // refuse to promote nodes that aren't connected or are still behind on |
| 446 | // transactions. |
| 447 | if _, err := cl.MemberPromote(ctx, member.ID); err != nil { |
Serge Bazanski | c735967 | 2020-10-30 16:38:57 +0100 | [diff] [blame] | 448 | supervisor.Logger(ctx).Infof("Failed to promote consensus node %s: %v", member.Name, err) |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 449 | } else { |
Serge Bazanski | c735967 | 2020-10-30 16:38:57 +0100 | [diff] [blame] | 450 | supervisor.Logger(ctx).Infof("Promoted new consensus node %s", member.Name) |
Lorenz Brun | a4ea9d0 | 2019-10-31 11:40:30 +0100 | [diff] [blame] | 451 | } |
| 452 | } |
| 453 | } |
Lorenz Brun | a4ea9d0 | 2019-10-31 11:40:30 +0100 | [diff] [blame] | 454 | |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame^] | 455 | w := s.Watch() |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 456 | for { |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame^] | 457 | st, err := w.Get(ctx) |
| 458 | if err != nil { |
| 459 | return fmt.Errorf("status get failed: %w", err) |
Lorenz Brun | 52f7f29 | 2020-06-24 16:42:02 +0200 | [diff] [blame] | 460 | } |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame^] | 461 | t := time.NewTicker(5 * time.Second) |
| 462 | for { |
| 463 | autopromote(ctx, st.cl) |
| 464 | select { |
| 465 | case <-ctx.Done(): |
| 466 | t.Stop() |
| 467 | return ctx.Err() |
| 468 | case <-t.C: |
Serge Bazanski | cb883e2 | 2020-07-06 17:47:55 +0200 | [diff] [blame] | 469 | } |
| 470 | } |
| 471 | } |
Hendrik Hofstadt | 0d7c91e | 2019-10-23 21:44:47 +0200 | [diff] [blame] | 472 | } |
| 473 | |
Serge Bazanski | f05e80a | 2021-10-12 11:53:34 +0200 | [diff] [blame^] | 474 | // selfupdater is a runnable that performs a one-shot (once per Service Run, |
| 475 | // thus once for each configuration) update of the node's Peer URL in etcd. This |
| 476 | // is currently only really needed because the first node in the cluster |
| 477 | // bootstraps itself without any peer URLs at first, and this allows it to then |
| 478 | // add the peer URLs afterwards. Instead of a runnable, this might as well have |
| 479 | // been part of the bootstarp logic, but making it a restartable runnable is |
| 480 | // more robust. |
| 481 | func (s *Service) selfupdater(ctx context.Context) error { |
| 482 | supervisor.Signal(ctx, supervisor.SignalHealthy) |
| 483 | w := s.Watch() |
| 484 | for { |
| 485 | st, err := w.Get(ctx) |
| 486 | if err != nil { |
| 487 | return fmt.Errorf("failed to get status: %w", err) |
| 488 | } |
| 489 | |
| 490 | peerURL := st.localPeerURL |
| 491 | if _, err := st.cl.MemberUpdate(ctx, st.localMemberID, []string{peerURL}); err != nil { |
| 492 | supervisor.Logger(ctx).Warningf("failed to update member: %v", err) |
| 493 | time.Sleep(1 * time.Second) |
| 494 | continue |
| 495 | } |
| 496 | |
| 497 | supervisor.Signal(ctx, supervisor.SignalDone) |
| 498 | return nil |
Serge Bazanski | a105db5 | 2021-04-12 19:57:46 +0200 | [diff] [blame] | 499 | } |
Hendrik Hofstadt | 0d7c91e | 2019-10-23 21:44:47 +0200 | [diff] [blame] | 500 | } |