blob: 9ed2bebe665d984337662895d1a77440f631662f [file] [log] [blame]
Lorenz Brunae0d90d2019-09-05 17:53:56 +02001// Copyright 2020 The Monogon Project Authors.
2//
3// SPDX-License-Identifier: Apache-2.0
4//
5// Licensed under the Apache License, Version 2.0 (the "License");
6// you may not use this file except in compliance with the License.
7// You may obtain a copy of the License at
8//
9// http://www.apache.org/licenses/LICENSE-2.0
10//
11// Unless required by applicable law or agreed to in writing, software
12// distributed under the License is distributed on an "AS IS" BASIS,
13// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14// See the License for the specific language governing permissions and
15// limitations under the License.
16
17package main
18
19import (
Serge Bazanskicdb8c782020-02-17 12:34:02 +010020 "context"
Serge Bazanski57b43752020-07-13 19:17:48 +020021 "crypto/ed25519"
22 "crypto/rand"
23 "crypto/x509"
Lorenz Brundd8c80e2019-10-07 16:19:49 +020024 "fmt"
Serge Bazanski1ebd1e12020-07-13 19:17:16 +020025 "log"
Serge Bazanski57b43752020-07-13 19:17:48 +020026 "math/big"
Serge Bazanski1ebd1e12020-07-13 19:17:16 +020027 "net"
Lorenz Brunae0d90d2019-09-05 17:53:56 +020028 "os"
Lorenz Brunae0d90d2019-09-05 17:53:56 +020029 "os/signal"
Lorenz Brunf95909d2019-09-11 19:48:26 +020030 "runtime/debug"
Serge Bazanski76003f82021-06-17 16:39:01 +020031 "time"
Lorenz Brunae0d90d2019-09-05 17:53:56 +020032
Lorenz Brunae0d90d2019-09-05 17:53:56 +020033 "golang.org/x/sys/unix"
Serge Bazanski1ebd1e12020-07-13 19:17:16 +020034 "google.golang.org/grpc"
Serge Bazanski99f47742021-08-04 20:21:42 +020035
Serge Bazanski31370b02021-01-07 16:31:14 +010036 common "source.monogon.dev/metropolis/node"
37 "source.monogon.dev/metropolis/node/core/cluster"
Serge Bazanski76003f82021-06-17 16:39:01 +020038 "source.monogon.dev/metropolis/node/core/curator"
Serge Bazanski31370b02021-01-07 16:31:14 +010039 "source.monogon.dev/metropolis/node/core/localstorage"
40 "source.monogon.dev/metropolis/node/core/localstorage/declarative"
41 "source.monogon.dev/metropolis/node/core/network"
Serge Bazanskif9edf522021-06-17 15:57:13 +020042 "source.monogon.dev/metropolis/node/core/roleserve"
Lorenz Brune306d782021-09-01 13:01:06 +020043 timesvc "source.monogon.dev/metropolis/node/core/time"
Serge Bazanski31370b02021-01-07 16:31:14 +010044 "source.monogon.dev/metropolis/node/kubernetes/pki"
45 "source.monogon.dev/metropolis/pkg/logtree"
46 "source.monogon.dev/metropolis/pkg/supervisor"
47 "source.monogon.dev/metropolis/pkg/tpm"
48 apb "source.monogon.dev/metropolis/proto/api"
Lorenz Brunae0d90d2019-09-05 17:53:56 +020049)
50
51func main() {
Lorenz Brunf95909d2019-09-11 19:48:26 +020052 defer func() {
53 if r := recover(); r != nil {
54 fmt.Println("Init panicked:", r)
55 debug.PrintStack()
56 }
57 unix.Sync()
Leopold Schabel68c58752019-11-14 21:00:59 +010058 // TODO(lorenz): Switch this to Reboot when init panics are less likely
Serge Bazanski216fe7b2021-05-21 18:36:16 +020059 // Best effort, nothing we can do if this fails except printing the
60 // error to the console.
Leopold Schabel68c58752019-11-14 21:00:59 +010061 if err := unix.Reboot(unix.LINUX_REBOOT_CMD_POWER_OFF); err != nil {
62 panic(fmt.Sprintf("failed to halt node: %v\n", err))
63 }
Lorenz Brunf95909d2019-09-11 19:48:26 +020064 }()
Serge Bazanskic7359672020-10-30 16:38:57 +010065
Serge Bazanski662b5b32020-12-21 13:49:00 +010066 // Set up logger for Metropolis. Currently logs everything to stderr.
Serge Bazanskic7359672020-10-30 16:38:57 +010067 lt := logtree.New()
68 reader, err := lt.Read("", logtree.WithChildren(), logtree.WithStream())
Lorenz Brunae0d90d2019-09-05 17:53:56 +020069 if err != nil {
Serge Bazanskic7359672020-10-30 16:38:57 +010070 panic(fmt.Errorf("could not set up root log reader: %v", err))
Lorenz Brunae0d90d2019-09-05 17:53:56 +020071 }
Serge Bazanskic7359672020-10-30 16:38:57 +010072 go func() {
73 for {
74 p := <-reader.Stream
Serge Bazanskib0272182020-11-02 18:39:44 +010075 fmt.Fprintf(os.Stderr, "%s\n", p.String())
Serge Bazanskic7359672020-10-30 16:38:57 +010076 }
77 }()
78
79 // Initial logger. Used until we get to a supervisor.
80 logger := lt.MustLeveledFor("init")
Serge Bazanski581b0bd2020-03-12 13:36:43 +010081
Lorenz Brun3a99c592021-01-26 19:57:21 +010082 // Set up basic mounts
83 err = setupMounts(logger)
Serge Bazanski581b0bd2020-03-12 13:36:43 +010084 if err != nil {
Lorenz Brun3a99c592021-01-26 19:57:21 +010085 panic(fmt.Errorf("could not set up basic mounts: %w", err))
Serge Bazanski581b0bd2020-03-12 13:36:43 +010086 }
87
Serge Bazanski216fe7b2021-05-21 18:36:16 +020088 // Linux kernel default is 4096 which is far too low. Raise it to 1M which
89 // is what gVisor suggests.
Lorenz Brun878f5f92020-05-12 16:15:39 +020090 if err := unix.Setrlimit(unix.RLIMIT_NOFILE, &unix.Rlimit{Cur: 1048576, Max: 1048576}); err != nil {
Serge Bazanskic7359672020-10-30 16:38:57 +010091 logger.Fatalf("Failed to raise rlimits: %v", err)
Lorenz Brun878f5f92020-05-12 16:15:39 +020092 }
93
Serge Bazanski662b5b32020-12-21 13:49:00 +010094 logger.Info("Starting Metropolis node init")
Lorenz Brunae0d90d2019-09-05 17:53:56 +020095
Lorenz Brunae0d90d2019-09-05 17:53:56 +020096 signalChannel := make(chan os.Signal, 2)
97 signal.Notify(signalChannel)
98
Serge Bazanskic7359672020-10-30 16:38:57 +010099 if err := tpm.Initialize(logger); err != nil {
100 logger.Fatalf("Failed to initialize TPM 2.0: %v", err)
Lorenz Brunae0d90d2019-09-05 17:53:56 +0200101 }
102
Serge Bazanskid8af5bf2021-03-16 13:38:29 +0100103 networkSvc := network.New()
Lorenz Brune306d782021-09-01 13:01:06 +0200104 timeSvc := timesvc.New()
Leopold Schabel68c58752019-11-14 21:00:59 +0100105
Serge Bazanski216fe7b2021-05-21 18:36:16 +0200106 // This function initializes a headless Delve if this is a debug build or
107 // does nothing if it's not
Lorenz Brun70f65b22020-07-08 17:02:47 +0200108 initializeDebugger(networkSvc)
109
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200110 // Prepare local storage.
111 root := &localstorage.Root{}
112 if err := declarative.PlaceFS(root, "/"); err != nil {
113 panic(fmt.Errorf("when placing root FS: %w", err))
114 }
115
Serge Bazanski216fe7b2021-05-21 18:36:16 +0200116 // trapdoor is a channel used to signal to the init service that a very
117 // low-level, unrecoverable failure occured. This causes a GURU MEDITATION
118 // ERROR visible to the end user.
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200119 trapdoor := make(chan struct{})
120
121 // Make context for supervisor. We cancel it when we reach the trapdoor.
122 ctxS, ctxC := context.WithCancel(context.Background())
123
Serge Bazanski216fe7b2021-05-21 18:36:16 +0200124 // Start root initialization code as a supervisor one-shot runnable. This
125 // means waiting for the network, starting the cluster manager, and then
126 // starting all services related to the node's roles.
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200127 // TODO(q3k): move this to a separate 'init' service.
Serge Bazanskic7359672020-10-30 16:38:57 +0100128 supervisor.New(ctxS, func(ctx context.Context) error {
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200129 // Start storage and network - we need this to get anything else done.
130 if err := root.Start(ctx); err != nil {
131 return fmt.Errorf("cannot start root FS: %w", err)
132 }
Serge Bazanskib1b742f2020-03-24 13:58:19 +0100133 if err := supervisor.Run(ctx, "network", networkSvc.Run); err != nil {
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200134 return fmt.Errorf("when starting network: %w", err)
Serge Bazanskib1b742f2020-03-24 13:58:19 +0100135 }
Lorenz Brune306d782021-09-01 13:01:06 +0200136 if err := supervisor.Run(ctx, "time", timeSvc.Run); err != nil {
137 return fmt.Errorf("when starting time: %w", err)
138 }
Lorenz Brunf95909d2019-09-11 19:48:26 +0200139
Serge Bazanski216fe7b2021-05-21 18:36:16 +0200140 // Start cluster manager. This kicks off cluster membership machinery,
141 // which will either start a new cluster, enroll into one or join one.
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200142 m := cluster.NewManager(root, networkSvc)
143 if err := supervisor.Run(ctx, "enrolment", m.Run); err != nil {
144 return fmt.Errorf("when starting enrolment: %w", err)
145 }
146
Serge Bazanskia105db52021-04-12 19:57:46 +0200147 // Wait until the node finds a home in the new cluster.
148 watcher := m.Watch()
149 status, err := watcher.GetHome(ctx)
Serge Bazanski42e61c62021-03-18 15:07:18 +0100150 if err != nil {
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200151 close(trapdoor)
Serge Bazanskia105db52021-04-12 19:57:46 +0200152 return fmt.Errorf("new couldn't find home in new cluster, aborting: %w", err)
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200153 }
154
Serge Bazanskif9edf522021-06-17 15:57:13 +0200155 // Here starts some hairy stopgap code. In the future, not all nodes will have
156 // direct access to etcd (ie. the ability to retrieve an etcd client via
157 // status.ConsensusClient).
158 // However, we are not ready to implement this yet, as that would require
159 // moving more logic into the curator (eg. some of the Kubernetes PKI logic).
160 //
161 // For now, we keep Kubernetes PKI initialization logic here, and just assume
162 // that every node will have direct access to etcd.
163
164 // Retrieve namespaced etcd KV clients for the two main direct etcd users:
165 // - Curator
166 // - Kubernetes PKI
167 ckv, err := status.ConsensusClient(cluster.ConsensusUserCurator)
Serge Bazanski76003f82021-06-17 16:39:01 +0200168 if err != nil {
Serge Bazanskif9edf522021-06-17 15:57:13 +0200169 close(trapdoor)
Serge Bazanski76003f82021-06-17 16:39:01 +0200170 return fmt.Errorf("failed to retrieve consensus curator client: %w", err)
171 }
Serge Bazanskif9edf522021-06-17 15:57:13 +0200172 kkv, err := status.ConsensusClient(cluster.ConsensusUserKubernetesPKI)
173 if err != nil {
174 close(trapdoor)
175 return fmt.Errorf("failed to retrieve consensus kubernetes PKI client: %w", err)
176 }
177
Serge Bazanski5b2ae552021-08-17 13:00:14 +0200178 // TODO(q3k): restart curator on credentials change?
Serge Bazanski5b2ae552021-08-17 13:00:14 +0200179
Serge Bazanskif9edf522021-06-17 15:57:13 +0200180 // Start cluster curator. The cluster curator is responsible for lifecycle
181 // management of the cluster.
182 // In the future, this will only be started on nodes that run etcd.
Serge Bazanski76003f82021-06-17 16:39:01 +0200183 c := curator.New(curator.Config{
Serge Bazanskif9edf522021-06-17 15:57:13 +0200184 Etcd: ckv,
Serge Bazanskia959cbd2021-06-17 15:56:51 +0200185 NodeID: status.Credentials.ID(),
Serge Bazanski76003f82021-06-17 16:39:01 +0200186 // TODO(q3k): make this configurable?
Serge Bazanski9ffa1f92021-09-01 15:42:23 +0200187 LeaderTTL: time.Second * 5,
188 Directory: &root.Ephemeral.Curator,
189 ServerCredentials: status.Credentials.TLSCredentials(),
190 ClusterCACertificate: status.Credentials.ClusterCA(),
Serge Bazanski76003f82021-06-17 16:39:01 +0200191 })
192 if err := supervisor.Run(ctx, "curator", c.Run); err != nil {
Serge Bazanskif9edf522021-06-17 15:57:13 +0200193 close(trapdoor)
Serge Bazanski76003f82021-06-17 16:39:01 +0200194 return fmt.Errorf("when starting curator: %w", err)
195 }
196
Serge Bazanski216fe7b2021-05-21 18:36:16 +0200197 // We are now in a cluster. We can thus access our 'node' object and
198 // start all services that we should be running.
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200199 logger.Info("Enrolment success, continuing startup.")
Serge Bazanskia959cbd2021-06-17 15:56:51 +0200200
Serge Bazanskif9edf522021-06-17 15:57:13 +0200201 // Ensure Kubernetes PKI objects exist in etcd. In the future, this logic will
202 // be implemented in the curator.
Serge Bazanskia959cbd2021-06-17 15:56:51 +0200203 kpki := pki.New(lt.MustLeveledFor("pki.kubernetes"), kkv)
204 if err := kpki.EnsureAll(ctx); err != nil {
Serge Bazanskif9edf522021-06-17 15:57:13 +0200205 close(trapdoor)
Serge Bazanskia959cbd2021-06-17 15:56:51 +0200206 return fmt.Errorf("failed to ensure kubernetes PKI present: %w", err)
207 }
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200208
Serge Bazanskif9edf522021-06-17 15:57:13 +0200209 // Start the role service. The role service connects to the curator and runs
210 // all node-specific role code (eg. Kubernetes services).
211 // supervisor.Logger(ctx).Infof("Starting role service...")
212 rs := roleserve.New(roleserve.Config{
213 CuratorDial: c.DialCluster,
214 StorageRoot: root,
215 Network: networkSvc,
216 KPKI: kpki,
217 NodeID: status.Credentials.ID(),
218 })
219 if err := supervisor.Run(ctx, "role", rs.Run); err != nil {
220 close(trapdoor)
221 return fmt.Errorf("failed to start role service: %w", err)
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200222 }
223
224 // Start the node debug service.
Serge Bazanskif9edf522021-06-17 15:57:13 +0200225 supervisor.Logger(ctx).Infof("Starting debug service...")
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200226 dbg := &debugService{
Serge Bazanski158e9a42021-08-17 17:04:54 +0200227 roleserve: rs,
228 logtree: lt,
229 traceLock: make(chan struct{}, 1),
Lorenz Brun9d6c4c72021-07-20 21:16:27 +0200230 ephemeralVolume: &root.Ephemeral.Containerd,
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200231 }
232 dbgSrv := grpc.NewServer()
233 apb.RegisterNodeDebugServiceServer(dbgSrv, dbg)
234 dbgLis, err := net.Listen("tcp", fmt.Sprintf(":%d", common.DebugServicePort))
Serge Bazanskib1b742f2020-03-24 13:58:19 +0100235 if err != nil {
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200236 return fmt.Errorf("failed to listen on debug service: %w", err)
237 }
238 if err := supervisor.Run(ctx, "debug", supervisor.GRPCServer(dbgSrv, dbgLis, false)); err != nil {
239 return fmt.Errorf("failed to start debug service: %w", err)
Serge Bazanskib1b742f2020-03-24 13:58:19 +0100240 }
Lorenz Brunae0d90d2019-09-05 17:53:56 +0200241
Serge Bazanskib1b742f2020-03-24 13:58:19 +0100242 supervisor.Signal(ctx, supervisor.SignalHealthy)
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200243 supervisor.Signal(ctx, supervisor.SignalDone)
244 return nil
Serge Bazanskic7359672020-10-30 16:38:57 +0100245 }, supervisor.WithExistingLogtree(lt))
Serge Bazanskib1b742f2020-03-24 13:58:19 +0100246
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200247 // We're PID1, so orphaned processes get reparented to us to clean up
248 for {
249 select {
250 case <-trapdoor:
Serge Bazanski216fe7b2021-05-21 18:36:16 +0200251 // If the trapdoor got closed, we got stuck early enough in the
252 // boot process that we can't do anything about it. Display a
253 // generic error message until we handle error conditions better.
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200254 ctxC()
255 log.Printf(" ########################")
256 log.Printf(" # GURU MEDIATION ERROR #")
257 log.Printf(" ########################")
258 log.Printf("")
Serge Bazanski662b5b32020-12-21 13:49:00 +0100259 log.Printf("Metropolis encountered an uncorrectable error and this node must be")
260 log.Printf("restarted.")
261 log.Printf("")
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200262 log.Printf("(Error condition: init trapdoor closed)")
263 log.Printf("")
264 select {}
265
266 case sig := <-signalChannel:
267 switch sig {
268 case unix.SIGCHLD:
269 var status unix.WaitStatus
270 var rusage unix.Rusage
271 for {
272 res, err := unix.Wait4(-1, &status, unix.WNOHANG, &rusage)
273 if err != nil && err != unix.ECHILD {
Serge Bazanskic7359672020-10-30 16:38:57 +0100274 logger.Errorf("Failed to wait on orphaned child: %v", err)
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200275 break
Serge Bazanskib1b742f2020-03-24 13:58:19 +0100276 }
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200277 if res <= 0 {
278 break
279 }
Lorenz Brunae0d90d2019-09-05 17:53:56 +0200280 }
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200281 case unix.SIGURG:
Serge Bazanski216fe7b2021-05-21 18:36:16 +0200282 // Go 1.14 introduced asynchronous preemption, which uses
283 // SIGURG.
284 // In order not to break backwards compatibility in the
285 // unlikely case of an application actually using SIGURG on its
286 // own, they're not filtering them.
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200287 // (https://github.com/golang/go/issues/37942)
Serge Bazanskic7359672020-10-30 16:38:57 +0100288 logger.V(5).Info("Ignoring SIGURG")
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200289 // TODO(lorenz): We can probably get more than just SIGCHLD as init, but I can't think
290 // of any others right now, just log them in case we hit any of them.
291 default:
Serge Bazanskic7359672020-10-30 16:38:57 +0100292 logger.Warningf("Got unexpected signal %s", sig.String())
Lorenz Brunae0d90d2019-09-05 17:53:56 +0200293 }
Lorenz Brunae0d90d2019-09-05 17:53:56 +0200294 }
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200295 }
Lorenz Brunae0d90d2019-09-05 17:53:56 +0200296}
Serge Bazanski57b43752020-07-13 19:17:48 +0200297
Serge Bazanski216fe7b2021-05-21 18:36:16 +0200298// nodeCertificate creates a node key/certificate for a foreign node. This is
299// duplicated code with localstorage's PKIDirectory EnsureSelfSigned, but is
300// temporary (and specific to 'golden tickets').
Serge Bazanski57b43752020-07-13 19:17:48 +0200301func (s *debugService) nodeCertificate() (cert, key []byte, err error) {
302 pubKey, privKey, err := ed25519.GenerateKey(rand.Reader)
303 if err != nil {
304 err = fmt.Errorf("failed to generate key: %w", err)
305 return
306 }
307
308 key, err = x509.MarshalPKCS8PrivateKey(privKey)
309 if err != nil {
310 err = fmt.Errorf("failed to marshal key: %w", err)
311 return
312 }
313
314 serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 127)
315 serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
316 if err != nil {
317 err = fmt.Errorf("failed to generate serial number: %w", err)
318 return
319 }
320
321 template := localstorage.CertificateForNode(pubKey)
322 template.SerialNumber = serialNumber
323
324 cert, err = x509.CreateCertificate(rand.Reader, &template, &template, pubKey, privKey)
325 if err != nil {
326 err = fmt.Errorf("could not sign certificate: %w", err)
327 return
328 }
329 return
330}