blob: d4ab96436e6ff5a97e33f0607da679a66afd5921 [file] [log] [blame]
Hendrik Hofstadt0d7c91e2019-10-23 21:44:47 +02001// Copyright 2020 The Monogon Project Authors.
2//
3// SPDX-License-Identifier: Apache-2.0
4//
5// Licensed under the Apache License, Version 2.0 (the "License");
6// you may not use this file except in compliance with the License.
7// You may obtain a copy of the License at
8//
9// http://www.apache.org/licenses/LICENSE-2.0
10//
11// Unless required by applicable law or agreed to in writing, software
12// distributed under the License is distributed on an "AS IS" BASIS,
13// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14// See the License for the specific language governing permissions and
15// limitations under the License.
16
Serge Bazanskif05e80a2021-10-12 11:53:34 +020017// Package consensus implements a runnable that manages an etcd instance which
18// forms part of a Metropolis etcd cluster. This cluster is a foundational
19// building block of Metropolis and its startup/management sequencing needs to
20// be as robust as possible.
Serge Bazanskicb883e22020-07-06 17:47:55 +020021//
Serge Bazanskif05e80a2021-10-12 11:53:34 +020022// Cluster Structure
Serge Bazanskicb883e22020-07-06 17:47:55 +020023//
Serge Bazanskif05e80a2021-10-12 11:53:34 +020024// Each etcd instance listens for two kinds of traffic:
Serge Bazanskicb883e22020-07-06 17:47:55 +020025//
Serge Bazanskif05e80a2021-10-12 11:53:34 +020026// 1. Peer traffic over TLS on a TCP port of the node's main interface. This is
27// where other etcd instances connect to to exchange peer traffic, perform
28// transactions and build quorum. The TLS credentials are stored in a PKI that
29// is managed internally by the consensus runnable, with its state stored in
30// etcd itself.
31//
32// 2. Client traffic over a local domain socket, with access control based on
33// standard Linux user/group permissions. Currently this allows any code running
34// as root on the host namespace full access to the etcd cluster.
35//
36// This means that if code running on a node wishes to perform etcd
37// transactions, it must also run an etcd instance. This colocation of all
38// direct etcd access and the etcd intances themselves effectively delegate all
39// Metropolis control plane functionality to whatever subset of nodes is running
40// consensus and all codes that connects to etcd directly (the Curator).
41//
42// For example, if nodes foo and bar are parts of the control plane, but node
43// worker is not:
44//
45// .---------------------.
46// | node-foo |
47// |---------------------|
48// | .--------------------.
49// | | etcd |<---etcd/TLS--. (node.ConsensusPort)
50// | '--------------------' |
51// | ^ Domain Socket | |
52// | | etcd/plain | |
53// | .--------------------. |
54// | | curator |<---gRPC/TLS----. (node.CuratorServicePort)
55// | '--------------------' | |
56// | ^ Domain Socket | | |
57// | | gRPC/plain | | |
58// | .-----------------. | | |
59// | | node logic | | | |
60// | '-----------------' | | |
61// '---------------------' | |
62// | |
63// .---------------------. | |
64// | node-baz | | |
65// |---------------------| | |
66// | .--------------------. | |
67// | | etcd |<-------------' |
68// | '--------------------' |
69// | ^ Domain Socket | |
70// | | gRPC/plain | |
71// | .--------------------. |
72// | | curator |<---gRPC/TLS----:
73// | '--------------------' |
74// | ... | |
75// '---------------------' |
76// |
77// .---------------------. |
78// | node-worker | |
79// |---------------------| |
80// | .-----------------. | |
81// | | node logic |-------------------'
82// | '-----------------' |
83// '---------------------'
84//
85
Hendrik Hofstadt0d7c91e2019-10-23 21:44:47 +020086package consensus
87
88import (
89 "context"
Serge Bazanskif05e80a2021-10-12 11:53:34 +020090 "crypto/ed25519"
91 "crypto/x509"
92 "crypto/x509/pkix"
Hendrik Hofstadt0d7c91e2019-10-23 21:44:47 +020093 "fmt"
Serge Bazanskif05e80a2021-10-12 11:53:34 +020094 "math/big"
Lorenz Bruna4ea9d02019-10-31 11:40:30 +010095 "time"
96
Hendrik Hofstadt0d7c91e2019-10-23 21:44:47 +020097 "go.etcd.io/etcd/clientv3"
Hendrik Hofstadt0d7c91e2019-10-23 21:44:47 +020098 "go.etcd.io/etcd/embed"
Hendrik Hofstadt8efe51e2020-02-28 12:53:41 +010099
Serge Bazanskia105db52021-04-12 19:57:46 +0200100 "source.monogon.dev/metropolis/node/core/consensus/client"
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200101 "source.monogon.dev/metropolis/node/core/identity"
102 "source.monogon.dev/metropolis/pkg/event/memory"
Serge Bazanski50009e02021-07-07 14:35:27 +0200103 "source.monogon.dev/metropolis/pkg/logtree/unraw"
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200104 "source.monogon.dev/metropolis/pkg/pki"
Serge Bazanski31370b02021-01-07 16:31:14 +0100105 "source.monogon.dev/metropolis/pkg/supervisor"
Hendrik Hofstadt0d7c91e2019-10-23 21:44:47 +0200106)
107
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200108var (
109 pkiNamespace = pki.Namespaced("/pki/")
Hendrik Hofstadt0d7c91e2019-10-23 21:44:47 +0200110)
111
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200112func pkiCA() *pki.Certificate {
113 return &pki.Certificate{
114 Name: "CA",
115 Namespace: &pkiNamespace,
116 Issuer: pki.SelfSigned,
117 Template: x509.Certificate{
118 SerialNumber: big.NewInt(1),
119 Subject: pkix.Name{
120 CommonName: "Metropolis etcd CA Certificate",
121 },
122 IsCA: true,
123 KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign | x509.KeyUsageDigitalSignature,
124 ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageOCSPSigning},
125 },
126 }
127}
128
129func pkiPeerCertificate(pubkey ed25519.PublicKey, extraNames []string) x509.Certificate {
130 return x509.Certificate{
131 Subject: pkix.Name{
132 CommonName: identity.NodeID(pubkey),
133 },
134 KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment,
135 ExtKeyUsage: []x509.ExtKeyUsage{
136 x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth,
137 },
138 DNSNames: append(extraNames, identity.NodeID(pubkey)),
139 }
140}
141
142// Service is the etcd cluster member service. See package-level documentation
143// for more information.
Serge Bazanskicb883e22020-07-06 17:47:55 +0200144type Service struct {
Serge Bazanskicb883e22020-07-06 17:47:55 +0200145 config *Config
Lorenz Bruna4ea9d02019-10-31 11:40:30 +0100146
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200147 value memory.Value
148 ca *pki.Certificate
Serge Bazanskicb883e22020-07-06 17:47:55 +0200149}
Hendrik Hofstadt0d7c91e2019-10-23 21:44:47 +0200150
Serge Bazanskicb883e22020-07-06 17:47:55 +0200151func New(config Config) *Service {
152 return &Service{
Hendrik Hofstadt0d7c91e2019-10-23 21:44:47 +0200153 config: &config,
154 }
Hendrik Hofstadt0d7c91e2019-10-23 21:44:47 +0200155}
156
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200157// Run is a Supervisor runnable that starts the etcd member service. It will
158// become healthy once the member joins the cluster successfully.
159func (s *Service) Run(ctx context.Context) error {
160 // Always re-create CA to make sure we don't have PKI state from previous runs.
161 //
162 // TODO(q3k): make the PKI library immune to this misuse.
163 s.ca = pkiCA()
Lorenz Brun52f7f292020-06-24 16:42:02 +0200164
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200165 // Create log converter. This will ingest etcd logs and pipe them out to this
166 // runnable's leveled logging facilities.
167 //
168 // TODO(q3k): add support for streaming to a sub-logger in the tree to get
169 // cleaner logs.
Serge Bazanski50009e02021-07-07 14:35:27 +0200170 converter := unraw.Converter{
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200171 Parser: parseEtcdLogEntry,
Serge Bazanski50009e02021-07-07 14:35:27 +0200172 MaximumLineLength: 8192,
173 LeveledLogger: supervisor.Logger(ctx),
174 }
175 fifoPath := s.config.Ephemeral.ServerLogsFIFO.FullPath()
176 pipe, err := converter.NamedPipeReader(fifoPath)
177 if err != nil {
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200178 return fmt.Errorf("when creating pipe reader: %w", err)
Serge Bazanski50009e02021-07-07 14:35:27 +0200179 }
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200180 if err := supervisor.Run(ctx, "piper", pipe); err != nil {
181 return fmt.Errorf("when starting log piper: %w", err)
Serge Bazanski50009e02021-07-07 14:35:27 +0200182 }
183
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200184 // Create autopromoter, which will automatically promote all learners to full
185 // etcd members.
186 if err := supervisor.Run(ctx, "autopromoter", s.autopromoter); err != nil {
187 return fmt.Errorf("when starting autopromtoer: %w", err)
188 }
Hendrik Hofstadt0d7c91e2019-10-23 21:44:47 +0200189
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200190 // Create selfupdater, which will perform a one-shot update of this member's
191 // peer address in etcd.
192 if err := supervisor.Run(ctx, "selfupadater", s.selfupdater); err != nil {
193 return fmt.Errorf("when starting selfupdater: %w", err)
194 }
195
196 // Prepare cluster PKI credentials.
197 ppki := s.config.Data.PeerPKI
198 jc := s.config.JoinCluster
199 if jc != nil {
200 // For nodes that join an existing cluster, or re-join it, always write whatever
201 // we've been given on startup.
202 if err := ppki.WriteAll(jc.NodeCertificate.Raw, s.config.NodePrivateKey, jc.CACertificate.Raw); err != nil {
203 return fmt.Errorf("when writing credentials for join: %w", err)
204 }
205 if err := s.config.Data.PeerCRL.Write(jc.InitialCRL.Raw, 0400); err != nil {
206 return fmt.Errorf("when writing CRL for join: %w", err)
207 }
208 } else {
209 // For other nodes, we should already have credentials from a previous join, or
210 // a previous bootstrap. If none exist, assume we need to bootstrap these
211 // credentials.
212 //
213 // TODO(q3k): once we have node join (ie. node restart from disk) flow, add a
214 // special configuration marker to prevent spurious bootstraps.
215 absent, err := ppki.AllAbsent()
216 if err != nil {
217 return fmt.Errorf("when checking for PKI file absence: %w", err)
218 }
219 if absent {
220 if err := s.bootstrap(ctx, fifoPath); err != nil {
221 return fmt.Errorf("bootstrap failed: %w", err)
222 }
223 } else {
224 supervisor.Logger(ctx).Info("PKI data present, not bootstrapping.")
225 }
226 }
227
228 // Start etcd ...
229 cfg := s.config.build(true)
230 server, err := embed.StartEtcd(cfg)
231 if err != nil {
232 return fmt.Errorf("when starting etcd: %w", err)
233 }
234
235 // ... wait for server to be ready...
236 select {
237 case <-ctx.Done():
238 return ctx.Err()
239 case <-server.Server.ReadyNotify():
240 }
241
242 // ... build a client to its' socket...
243 cl, err := s.config.localClient()
244 if err != nil {
245 return fmt.Errorf("getting local client failed: %w", err)
246 }
247
248 // ... and wait until we're not a learner anymore.
249 for {
250 members, err := cl.MemberList(ctx)
251 if err != nil {
252 supervisor.Logger(ctx).Warningf("MemberList failed: %v", err)
253 time.Sleep(time.Second)
254 continue
255 }
256
257 isMember := false
258 for _, member := range members.Members {
259 if member.ID != uint64(server.Server.ID()) {
260 continue
261 }
262 if !member.IsLearner {
263 isMember = true
264 break
265 }
266 }
267 if isMember {
268 break
269 }
270 supervisor.Logger(ctx).Warningf("Still a learner, waiting...")
271 time.Sleep(time.Second)
272 }
273
274 // All done! Report status.
275 supervisor.Logger(ctx).Infof("etcd server ready")
276
277 st := &Status{
278 localPeerURL: cfg.APUrls[0].String(),
279 localMemberID: uint64(server.Server.ID()),
280 cl: cl,
281 ca: s.ca,
282 }
283 s.value.Set(st)
284
285 // Wait until server dies for whatever reason, update status when that
286 // happens.
287 supervisor.Signal(ctx, supervisor.SignalHealthy)
288 select {
289 case err = <-server.Err():
290 err = fmt.Errorf("server returned error: %w", err)
291 case <-ctx.Done():
292 server.Close()
293 err = ctx.Err()
294 }
295 st.stopped = true
296 s.value.Set(st)
297 return err
Serge Bazanskicb883e22020-07-06 17:47:55 +0200298}
299
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200300func clientFor(kv *clientv3.Client, parts ...string) (client.Namespaced, error) {
301 var err error
302 namespaced := client.NewLocal(kv)
303 for _, el := range parts {
304 namespaced, err = namespaced.Sub(el)
Serge Bazanskicb883e22020-07-06 17:47:55 +0200305 if err != nil {
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200306 return nil, fmt.Errorf("when getting sub client: %w", err)
Serge Bazanskicb883e22020-07-06 17:47:55 +0200307 }
Serge Bazanskicb883e22020-07-06 17:47:55 +0200308
Serge Bazanskicb883e22020-07-06 17:47:55 +0200309 }
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200310 return namespaced, nil
311}
Serge Bazanskicb883e22020-07-06 17:47:55 +0200312
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200313// bootstrap performs a procedure to resolve the following bootstrap problems:
314// in order to start an etcd server for consensus, we need it to serve over TLS.
315// However, these TLS certificates also need to be stored in etcd so that
316// further certificates can be issued for new nodes.
317//
318// This was previously solved by a using a special PKI/TLS management system that
319// could first create certificates and keys in memory, then only commit them to
320// etcd. However, this ended up being somewhat brittle in the face of startup
321// sequencing issues, so we're now going with a different approach.
322//
323// This function starts an etcd instance first without any PKI/TLS support,
324// without listening on any external port for peer traffic. Once the instance is
325// running, it uses the standard metropolis pki library to create all required
326// data directly in the running etcd instance. It then writes all required
327// startup data (node private key, member certificate, CA certificate) to disk,
328// so that a 'full' etcd instance can be started.
329func (s *Service) bootstrap(ctx context.Context, fifoPath string) error {
330 supervisor.Logger(ctx).Infof("Bootstrapping PKI: starting etcd...")
Serge Bazanskicb883e22020-07-06 17:47:55 +0200331
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200332 cfg := s.config.build(false)
333 // This will make etcd create data directories and create a fully new cluster if
334 // needed. If we're restarting due to an error, the old cluster data will still
335 // exist.
336 cfg.ClusterState = "new"
Serge Bazanskicb883e22020-07-06 17:47:55 +0200337
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200338 // Start the bootstrap etcd instance...
Hendrik Hofstadt0d7c91e2019-10-23 21:44:47 +0200339 server, err := embed.StartEtcd(cfg)
Lorenz Bruna4ea9d02019-10-31 11:40:30 +0100340 if err != nil {
Serge Bazanskicb883e22020-07-06 17:47:55 +0200341 return fmt.Errorf("failed to start etcd: %w", err)
Lorenz Bruna4ea9d02019-10-31 11:40:30 +0100342 }
Lorenz Bruna4ea9d02019-10-31 11:40:30 +0100343
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200344 // ... wait for it to run ...
Serge Bazanskicb883e22020-07-06 17:47:55 +0200345 select {
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200346 case <-server.Server.ReadyNotify():
Serge Bazanskicb883e22020-07-06 17:47:55 +0200347 case <-ctx.Done():
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200348 return fmt.Errorf("when waiting for bootstrap etcd: %w", err)
Lorenz Brun52f7f292020-06-24 16:42:02 +0200349 }
350
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200351 // ... create a client to it ...
352 cl, err := s.config.localClient()
Lorenz Brun52f7f292020-06-24 16:42:02 +0200353 if err != nil {
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200354 return fmt.Errorf("when getting bootstrap client: %w", err)
Serge Bazanskicb883e22020-07-06 17:47:55 +0200355 }
356
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200357 // ... and build PKI there. This is idempotent, so we will never override
358 // anything that's already in the cluster, instead just retrieve it.
359 supervisor.Logger(ctx).Infof("Bootstrapping PKI: etcd running, building PKI...")
360 clPKI, err := clientFor(cl, "namespaced", "etcd-pki")
361 if err != nil {
362 return fmt.Errorf("when getting pki client: %w", err)
Serge Bazanskicb883e22020-07-06 17:47:55 +0200363 }
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200364 defer clPKI.Close()
365 caCert, err := s.ca.Ensure(ctx, clPKI)
366 if err != nil {
367 return fmt.Errorf("failed to ensure CA certificate: %w", err)
Serge Bazanskicb883e22020-07-06 17:47:55 +0200368 }
369
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200370 // If we're running with a test overridden external address (eg. localhost), we
371 // need to also make that part of the member certificate.
372 var extraNames []string
373 if external := s.config.testOverrides.externalAddress; external != "" {
374 extraNames = []string{external}
375 }
376 memberTemplate := pki.Certificate{
377 Name: identity.NodeID(s.config.nodePublicKey()),
378 Namespace: &pkiNamespace,
379 Issuer: s.ca,
380 Template: pkiPeerCertificate(s.config.nodePublicKey(), extraNames),
381 Mode: pki.CertificateExternal,
382 PublicKey: s.config.nodePublicKey(),
383 }
384 memberCert, err := memberTemplate.Ensure(ctx, clPKI)
385 if err != nil {
386 return fmt.Errorf("failed to ensure member certificate: %w", err)
387 }
Serge Bazanskicb883e22020-07-06 17:47:55 +0200388
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200389 // Retrieve CRL.
390 crlW := s.ca.WatchCRL(clPKI)
391 crl, err := crlW.Get(ctx)
392 if err != nil {
393 return fmt.Errorf("failed to retrieve initial CRL: %w", err)
394 }
395
396 // We have everything we need. Write things to disk.
397 supervisor.Logger(ctx).Infof("Bootstrapping PKI: certificates issued, writing to disk...")
398
399 if err := s.config.Data.PeerPKI.WriteAll(memberCert, s.config.NodePrivateKey, caCert); err != nil {
400 return fmt.Errorf("failed to write bootstrapped certificates: %w", err)
401 }
402 if err := s.config.Data.PeerCRL.Write(crl.Raw, 0400); err != nil {
403 return fmt.Errorf("failed tow rite CRL: %w", err)
404 }
405
406 // Stop the server synchronously (blocking until it's fully shutdown), and
407 // return. The caller can now run the 'full' etcd instance with PKI.
408 supervisor.Logger(ctx).Infof("Bootstrapping PKI: done, stopping server...")
409 server.Close()
Serge Bazanskicb883e22020-07-06 17:47:55 +0200410 return ctx.Err()
Lorenz Bruna4ea9d02019-10-31 11:40:30 +0100411}
412
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200413// autopromoter is a runnable which repeatedly attempts to promote etcd learners
414// in the cluster to full followers. This is needed to bring any new cluster
415// members (which are always added as learners) to full membership and make them
416// part of the etcd quorum.
Serge Bazanskicb883e22020-07-06 17:47:55 +0200417func (s *Service) autopromoter(ctx context.Context) error {
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200418 autopromote := func(ctx context.Context, cl *clientv3.Client) {
419 // Only autopromote if our endpoint is a leader. This is a bargain bin version
420 // of leader election: it's simple and cheap, but not very reliable. The most
421 // obvious failure mode is that the instance we contacted isn't a leader by the
422 // time we promote a member, but that's fine - the promotion is idempotent. What
423 // we really use the 'leader election' here for isn't for consistency, but to
424 // prevent the cluster from being hammered by spurious leadership promotion
425 // requests from every etcd member.
426 status, err := cl.Status(ctx, cl.Endpoints()[0])
427 if err != nil {
428 supervisor.Logger(ctx).Warningf("Failed to get endpoint status: %v", err)
429 }
430 if status.Leader != status.Header.MemberId {
Serge Bazanskicb883e22020-07-06 17:47:55 +0200431 return
432 }
433
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200434 members, err := cl.MemberList(ctx)
435 if err != nil {
436 supervisor.Logger(ctx).Warningf("Failed to list members: %v", err)
437 return
438 }
439 for _, member := range members.Members {
Serge Bazanskicb883e22020-07-06 17:47:55 +0200440 if !member.IsLearner {
Lorenz Bruna4ea9d02019-10-31 11:40:30 +0100441 continue
442 }
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200443 // Always call PromoteMember since the metadata necessary to decide if we should
444 // is private. Luckily etcd already does consistency checks internally and will
445 // refuse to promote nodes that aren't connected or are still behind on
446 // transactions.
447 if _, err := cl.MemberPromote(ctx, member.ID); err != nil {
Serge Bazanskic7359672020-10-30 16:38:57 +0100448 supervisor.Logger(ctx).Infof("Failed to promote consensus node %s: %v", member.Name, err)
Serge Bazanskicb883e22020-07-06 17:47:55 +0200449 } else {
Serge Bazanskic7359672020-10-30 16:38:57 +0100450 supervisor.Logger(ctx).Infof("Promoted new consensus node %s", member.Name)
Lorenz Bruna4ea9d02019-10-31 11:40:30 +0100451 }
452 }
453 }
Lorenz Bruna4ea9d02019-10-31 11:40:30 +0100454
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200455 w := s.Watch()
Serge Bazanskicb883e22020-07-06 17:47:55 +0200456 for {
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200457 st, err := w.Get(ctx)
458 if err != nil {
459 return fmt.Errorf("status get failed: %w", err)
Lorenz Brun52f7f292020-06-24 16:42:02 +0200460 }
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200461 t := time.NewTicker(5 * time.Second)
462 for {
463 autopromote(ctx, st.cl)
464 select {
465 case <-ctx.Done():
466 t.Stop()
467 return ctx.Err()
468 case <-t.C:
Serge Bazanskicb883e22020-07-06 17:47:55 +0200469 }
470 }
471 }
Hendrik Hofstadt0d7c91e2019-10-23 21:44:47 +0200472}
473
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200474// selfupdater is a runnable that performs a one-shot (once per Service Run,
475// thus once for each configuration) update of the node's Peer URL in etcd. This
476// is currently only really needed because the first node in the cluster
477// bootstraps itself without any peer URLs at first, and this allows it to then
478// add the peer URLs afterwards. Instead of a runnable, this might as well have
479// been part of the bootstarp logic, but making it a restartable runnable is
480// more robust.
481func (s *Service) selfupdater(ctx context.Context) error {
482 supervisor.Signal(ctx, supervisor.SignalHealthy)
483 w := s.Watch()
484 for {
485 st, err := w.Get(ctx)
486 if err != nil {
487 return fmt.Errorf("failed to get status: %w", err)
488 }
489
Serge Bazanski5839e972021-11-16 15:46:19 +0100490 if st.localPeerURL != "" {
491 supervisor.Logger(ctx).Infof("Updating local peer URL...")
492 peerURL := st.localPeerURL
493 if _, err := st.cl.MemberUpdate(ctx, st.localMemberID, []string{peerURL}); err != nil {
494 supervisor.Logger(ctx).Warningf("failed to update member: %v", err)
495 time.Sleep(1 * time.Second)
496 continue
497 }
498 } else {
499 supervisor.Logger(ctx).Infof("No local peer URL, not updating.")
Serge Bazanskif05e80a2021-10-12 11:53:34 +0200500 }
501
502 supervisor.Signal(ctx, supervisor.SignalDone)
503 return nil
Serge Bazanskia105db52021-04-12 19:57:46 +0200504 }
Hendrik Hofstadt0d7c91e2019-10-23 21:44:47 +0200505}