blob: eb4c6c7dd9f169d12578e496a1140ba0e110a334 [file] [log] [blame]
Lorenz Brunae0d90d2019-09-05 17:53:56 +02001// Copyright 2020 The Monogon Project Authors.
2//
3// SPDX-License-Identifier: Apache-2.0
4//
5// Licensed under the Apache License, Version 2.0 (the "License");
6// you may not use this file except in compliance with the License.
7// You may obtain a copy of the License at
8//
9// http://www.apache.org/licenses/LICENSE-2.0
10//
11// Unless required by applicable law or agreed to in writing, software
12// distributed under the License is distributed on an "AS IS" BASIS,
13// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14// See the License for the specific language governing permissions and
15// limitations under the License.
16
17package main
18
19import (
Serge Bazanskicdb8c782020-02-17 12:34:02 +010020 "context"
Serge Bazanski57b43752020-07-13 19:17:48 +020021 "crypto/ed25519"
22 "crypto/rand"
23 "crypto/x509"
Lorenz Brundd8c80e2019-10-07 16:19:49 +020024 "fmt"
Serge Bazanski1ebd1e12020-07-13 19:17:16 +020025 "log"
Serge Bazanski57b43752020-07-13 19:17:48 +020026 "math/big"
Serge Bazanski1ebd1e12020-07-13 19:17:16 +020027 "net"
Lorenz Brunae0d90d2019-09-05 17:53:56 +020028 "os"
Lorenz Brunae0d90d2019-09-05 17:53:56 +020029 "os/signal"
Lorenz Brunf95909d2019-09-11 19:48:26 +020030 "runtime/debug"
Lorenz Brunae0d90d2019-09-05 17:53:56 +020031
Lorenz Brunae0d90d2019-09-05 17:53:56 +020032 "golang.org/x/sys/unix"
Serge Bazanski1ebd1e12020-07-13 19:17:16 +020033 "google.golang.org/grpc"
34
Serge Bazanski31370b02021-01-07 16:31:14 +010035 common "source.monogon.dev/metropolis/node"
36 "source.monogon.dev/metropolis/node/core/cluster"
37 "source.monogon.dev/metropolis/node/core/localstorage"
38 "source.monogon.dev/metropolis/node/core/localstorage/declarative"
39 "source.monogon.dev/metropolis/node/core/network"
Serge Bazanski31370b02021-01-07 16:31:14 +010040 "source.monogon.dev/metropolis/node/kubernetes"
41 "source.monogon.dev/metropolis/node/kubernetes/containerd"
42 "source.monogon.dev/metropolis/node/kubernetes/pki"
43 "source.monogon.dev/metropolis/pkg/logtree"
44 "source.monogon.dev/metropolis/pkg/supervisor"
45 "source.monogon.dev/metropolis/pkg/tpm"
46 apb "source.monogon.dev/metropolis/proto/api"
Lorenz Brunae0d90d2019-09-05 17:53:56 +020047)
48
Serge Bazanski1ebd1e12020-07-13 19:17:16 +020049var (
Serge Bazanski216fe7b2021-05-21 18:36:16 +020050 // kubernetesConfig is the static/global part of the Kubernetes service
51 // configuration. In the future, this might be configurable by loading it
52 // from the EnrolmentConfig. Fow now, it's static and same across all
53 // clusters.
Serge Bazanski1ebd1e12020-07-13 19:17:16 +020054 kubernetesConfig = kubernetes.Config{
55 ServiceIPRange: net.IPNet{ // TODO(q3k): Decide if configurable / final value
Lorenz Brunca24cfa2020-08-18 13:49:37 +020056 IP: net.IP{10, 0, 255, 1},
Serge Bazanski1ebd1e12020-07-13 19:17:16 +020057 Mask: net.IPMask{0xff, 0xff, 0xff, 0x00}, // /24, but Go stores as a literal mask
58 },
59 ClusterNet: net.IPNet{
60 IP: net.IP{10, 0, 0, 0},
61 Mask: net.IPMask{0xff, 0xff, 0x00, 0x00}, // /16
62 },
63 }
Leopold Schabela4516f92019-12-04 20:27:05 +000064)
65
Lorenz Brunae0d90d2019-09-05 17:53:56 +020066func main() {
Lorenz Brunf95909d2019-09-11 19:48:26 +020067 defer func() {
68 if r := recover(); r != nil {
69 fmt.Println("Init panicked:", r)
70 debug.PrintStack()
71 }
72 unix.Sync()
Leopold Schabel68c58752019-11-14 21:00:59 +010073 // TODO(lorenz): Switch this to Reboot when init panics are less likely
Serge Bazanski216fe7b2021-05-21 18:36:16 +020074 // Best effort, nothing we can do if this fails except printing the
75 // error to the console.
Leopold Schabel68c58752019-11-14 21:00:59 +010076 if err := unix.Reboot(unix.LINUX_REBOOT_CMD_POWER_OFF); err != nil {
77 panic(fmt.Sprintf("failed to halt node: %v\n", err))
78 }
Lorenz Brunf95909d2019-09-11 19:48:26 +020079 }()
Serge Bazanskic7359672020-10-30 16:38:57 +010080
Serge Bazanski662b5b32020-12-21 13:49:00 +010081 // Set up logger for Metropolis. Currently logs everything to stderr.
Serge Bazanskic7359672020-10-30 16:38:57 +010082 lt := logtree.New()
83 reader, err := lt.Read("", logtree.WithChildren(), logtree.WithStream())
Lorenz Brunae0d90d2019-09-05 17:53:56 +020084 if err != nil {
Serge Bazanskic7359672020-10-30 16:38:57 +010085 panic(fmt.Errorf("could not set up root log reader: %v", err))
Lorenz Brunae0d90d2019-09-05 17:53:56 +020086 }
Serge Bazanskic7359672020-10-30 16:38:57 +010087 go func() {
88 for {
89 p := <-reader.Stream
Serge Bazanskib0272182020-11-02 18:39:44 +010090 fmt.Fprintf(os.Stderr, "%s\n", p.String())
Serge Bazanskic7359672020-10-30 16:38:57 +010091 }
92 }()
93
94 // Initial logger. Used until we get to a supervisor.
95 logger := lt.MustLeveledFor("init")
Serge Bazanski581b0bd2020-03-12 13:36:43 +010096
Lorenz Brun3a99c592021-01-26 19:57:21 +010097 // Set up basic mounts
98 err = setupMounts(logger)
Serge Bazanski581b0bd2020-03-12 13:36:43 +010099 if err != nil {
Lorenz Brun3a99c592021-01-26 19:57:21 +0100100 panic(fmt.Errorf("could not set up basic mounts: %w", err))
Serge Bazanski581b0bd2020-03-12 13:36:43 +0100101 }
102
Serge Bazanski216fe7b2021-05-21 18:36:16 +0200103 // Linux kernel default is 4096 which is far too low. Raise it to 1M which
104 // is what gVisor suggests.
Lorenz Brun878f5f92020-05-12 16:15:39 +0200105 if err := unix.Setrlimit(unix.RLIMIT_NOFILE, &unix.Rlimit{Cur: 1048576, Max: 1048576}); err != nil {
Serge Bazanskic7359672020-10-30 16:38:57 +0100106 logger.Fatalf("Failed to raise rlimits: %v", err)
Lorenz Brun878f5f92020-05-12 16:15:39 +0200107 }
108
Serge Bazanski662b5b32020-12-21 13:49:00 +0100109 logger.Info("Starting Metropolis node init")
Lorenz Brunae0d90d2019-09-05 17:53:56 +0200110
Lorenz Brunae0d90d2019-09-05 17:53:56 +0200111 signalChannel := make(chan os.Signal, 2)
112 signal.Notify(signalChannel)
113
Serge Bazanskic7359672020-10-30 16:38:57 +0100114 if err := tpm.Initialize(logger); err != nil {
115 logger.Fatalf("Failed to initialize TPM 2.0: %v", err)
Lorenz Brunae0d90d2019-09-05 17:53:56 +0200116 }
117
Serge Bazanskid8af5bf2021-03-16 13:38:29 +0100118 networkSvc := network.New()
Leopold Schabel68c58752019-11-14 21:00:59 +0100119
Serge Bazanski216fe7b2021-05-21 18:36:16 +0200120 // This function initializes a headless Delve if this is a debug build or
121 // does nothing if it's not
Lorenz Brun70f65b22020-07-08 17:02:47 +0200122 initializeDebugger(networkSvc)
123
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200124 // Prepare local storage.
125 root := &localstorage.Root{}
126 if err := declarative.PlaceFS(root, "/"); err != nil {
127 panic(fmt.Errorf("when placing root FS: %w", err))
128 }
129
Serge Bazanski216fe7b2021-05-21 18:36:16 +0200130 // trapdoor is a channel used to signal to the init service that a very
131 // low-level, unrecoverable failure occured. This causes a GURU MEDITATION
132 // ERROR visible to the end user.
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200133 trapdoor := make(chan struct{})
134
135 // Make context for supervisor. We cancel it when we reach the trapdoor.
136 ctxS, ctxC := context.WithCancel(context.Background())
137
Serge Bazanski216fe7b2021-05-21 18:36:16 +0200138 // Start root initialization code as a supervisor one-shot runnable. This
139 // means waiting for the network, starting the cluster manager, and then
140 // starting all services related to the node's roles.
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200141 // TODO(q3k): move this to a separate 'init' service.
Serge Bazanskic7359672020-10-30 16:38:57 +0100142 supervisor.New(ctxS, func(ctx context.Context) error {
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200143 logger := supervisor.Logger(ctx)
144
145 // Start storage and network - we need this to get anything else done.
146 if err := root.Start(ctx); err != nil {
147 return fmt.Errorf("cannot start root FS: %w", err)
148 }
Serge Bazanskib1b742f2020-03-24 13:58:19 +0100149 if err := supervisor.Run(ctx, "network", networkSvc.Run); err != nil {
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200150 return fmt.Errorf("when starting network: %w", err)
Serge Bazanskib1b742f2020-03-24 13:58:19 +0100151 }
Lorenz Brunf95909d2019-09-11 19:48:26 +0200152
Serge Bazanski216fe7b2021-05-21 18:36:16 +0200153 // Start cluster manager. This kicks off cluster membership machinery,
154 // which will either start a new cluster, enroll into one or join one.
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200155 m := cluster.NewManager(root, networkSvc)
156 if err := supervisor.Run(ctx, "enrolment", m.Run); err != nil {
157 return fmt.Errorf("when starting enrolment: %w", err)
158 }
159
Serge Bazanskia105db52021-04-12 19:57:46 +0200160 // Wait until the node finds a home in the new cluster.
161 watcher := m.Watch()
162 status, err := watcher.GetHome(ctx)
Serge Bazanski42e61c62021-03-18 15:07:18 +0100163 if err != nil {
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200164 close(trapdoor)
Serge Bazanskia105db52021-04-12 19:57:46 +0200165 return fmt.Errorf("new couldn't find home in new cluster, aborting: %w", err)
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200166 }
167
Serge Bazanski216fe7b2021-05-21 18:36:16 +0200168 // We are now in a cluster. We can thus access our 'node' object and
169 // start all services that we should be running.
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200170
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200171 logger.Info("Enrolment success, continuing startup.")
Serge Bazanskia105db52021-04-12 19:57:46 +0200172 logger.Info(fmt.Sprintf("This node (%s) has roles:", status.Node.String()))
173 if cm := status.Node.ConsensusMember(); cm != nil {
Serge Bazanski216fe7b2021-05-21 18:36:16 +0200174 // There's no need to start anything for when we are a consensus
175 // member - the cluster manager does this for us if necessary (as
176 // creating/enrolling/joining a cluster is pretty tied into cluster
177 // lifecycle management).
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200178 logger.Info(fmt.Sprintf(" - etcd consensus member"))
179 }
Serge Bazanskia105db52021-04-12 19:57:46 +0200180 if kw := status.Node.KubernetesWorker(); kw != nil {
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200181 logger.Info(fmt.Sprintf(" - kubernetes worker"))
182 }
183
Serge Bazanski216fe7b2021-05-21 18:36:16 +0200184 // If we're supposed to be a kubernetes worker, start kubernetes
185 // services and containerd. In the future, this might be split further
186 // into kubernetes control plane and data plane roles.
187 // TODO(q3k): watch on cluster status updates to start/stop kubernetes
188 // service.
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200189 var containerdSvc *containerd.Service
190 var kubeSvc *kubernetes.Service
Serge Bazanskia105db52021-04-12 19:57:46 +0200191 if kw := status.Node.KubernetesWorker(); kw != nil {
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200192 logger.Info("Starting Kubernetes worker services...")
193
Serge Bazanskia105db52021-04-12 19:57:46 +0200194 kv, err := status.ConsensusClient(cluster.ConsensusUserKubernetesPKI)
195 if err != nil {
196 return fmt.Errorf("failed to retrieve consensus kubernetes PKI client: %w", err)
197 }
198
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200199 // Ensure Kubernetes PKI objects exist in etcd.
Serge Bazanskia105db52021-04-12 19:57:46 +0200200 kpki := pki.New(lt.MustLeveledFor("pki.kubernetes"), kv)
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200201 if err := kpki.EnsureAll(ctx); err != nil {
202 return fmt.Errorf("failed to ensure kubernetes PKI present: %w", err)
203 }
204
205 containerdSvc = &containerd.Service{
206 EphemeralVolume: &root.Ephemeral.Containerd,
207 }
208 if err := supervisor.Run(ctx, "containerd", containerdSvc.Run); err != nil {
209 return fmt.Errorf("failed to start containerd service: %w", err)
210 }
211
212 kubernetesConfig.KPKI = kpki
213 kubernetesConfig.Root = root
Serge Bazanskid8af5bf2021-03-16 13:38:29 +0100214 kubernetesConfig.Network = networkSvc
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200215 kubeSvc = kubernetes.New(kubernetesConfig)
216 if err := supervisor.Run(ctx, "kubernetes", kubeSvc.Run); err != nil {
217 return fmt.Errorf("failed to start kubernetes service: %w", err)
218 }
219
220 }
221
222 // Start the node debug service.
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200223 dbg := &debugService{
224 cluster: m,
Serge Bazanskib0272182020-11-02 18:39:44 +0100225 logtree: lt,
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200226 kubernetes: kubeSvc,
Lorenz Brun09c275b2021-03-30 12:47:09 +0200227 traceLock: make(chan struct{}, 1),
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200228 }
229 dbgSrv := grpc.NewServer()
230 apb.RegisterNodeDebugServiceServer(dbgSrv, dbg)
231 dbgLis, err := net.Listen("tcp", fmt.Sprintf(":%d", common.DebugServicePort))
Serge Bazanskib1b742f2020-03-24 13:58:19 +0100232 if err != nil {
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200233 return fmt.Errorf("failed to listen on debug service: %w", err)
234 }
235 if err := supervisor.Run(ctx, "debug", supervisor.GRPCServer(dbgSrv, dbgLis, false)); err != nil {
236 return fmt.Errorf("failed to start debug service: %w", err)
Serge Bazanskib1b742f2020-03-24 13:58:19 +0100237 }
Lorenz Brunae0d90d2019-09-05 17:53:56 +0200238
Serge Bazanskib1b742f2020-03-24 13:58:19 +0100239 supervisor.Signal(ctx, supervisor.SignalHealthy)
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200240 supervisor.Signal(ctx, supervisor.SignalDone)
241 return nil
Serge Bazanskic7359672020-10-30 16:38:57 +0100242 }, supervisor.WithExistingLogtree(lt))
Serge Bazanskib1b742f2020-03-24 13:58:19 +0100243
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200244 // We're PID1, so orphaned processes get reparented to us to clean up
245 for {
246 select {
247 case <-trapdoor:
Serge Bazanski216fe7b2021-05-21 18:36:16 +0200248 // If the trapdoor got closed, we got stuck early enough in the
249 // boot process that we can't do anything about it. Display a
250 // generic error message until we handle error conditions better.
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200251 ctxC()
252 log.Printf(" ########################")
253 log.Printf(" # GURU MEDIATION ERROR #")
254 log.Printf(" ########################")
255 log.Printf("")
Serge Bazanski662b5b32020-12-21 13:49:00 +0100256 log.Printf("Metropolis encountered an uncorrectable error and this node must be")
257 log.Printf("restarted.")
258 log.Printf("")
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200259 log.Printf("(Error condition: init trapdoor closed)")
260 log.Printf("")
261 select {}
262
263 case sig := <-signalChannel:
264 switch sig {
265 case unix.SIGCHLD:
266 var status unix.WaitStatus
267 var rusage unix.Rusage
268 for {
269 res, err := unix.Wait4(-1, &status, unix.WNOHANG, &rusage)
270 if err != nil && err != unix.ECHILD {
Serge Bazanskic7359672020-10-30 16:38:57 +0100271 logger.Errorf("Failed to wait on orphaned child: %v", err)
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200272 break
Serge Bazanskib1b742f2020-03-24 13:58:19 +0100273 }
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200274 if res <= 0 {
275 break
276 }
Lorenz Brunae0d90d2019-09-05 17:53:56 +0200277 }
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200278 case unix.SIGURG:
Serge Bazanski216fe7b2021-05-21 18:36:16 +0200279 // Go 1.14 introduced asynchronous preemption, which uses
280 // SIGURG.
281 // In order not to break backwards compatibility in the
282 // unlikely case of an application actually using SIGURG on its
283 // own, they're not filtering them.
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200284 // (https://github.com/golang/go/issues/37942)
Serge Bazanskic7359672020-10-30 16:38:57 +0100285 logger.V(5).Info("Ignoring SIGURG")
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200286 // TODO(lorenz): We can probably get more than just SIGCHLD as init, but I can't think
287 // of any others right now, just log them in case we hit any of them.
288 default:
Serge Bazanskic7359672020-10-30 16:38:57 +0100289 logger.Warningf("Got unexpected signal %s", sig.String())
Lorenz Brunae0d90d2019-09-05 17:53:56 +0200290 }
Lorenz Brunae0d90d2019-09-05 17:53:56 +0200291 }
Serge Bazanski1ebd1e12020-07-13 19:17:16 +0200292 }
Lorenz Brunae0d90d2019-09-05 17:53:56 +0200293}
Serge Bazanski57b43752020-07-13 19:17:48 +0200294
Serge Bazanski216fe7b2021-05-21 18:36:16 +0200295// nodeCertificate creates a node key/certificate for a foreign node. This is
296// duplicated code with localstorage's PKIDirectory EnsureSelfSigned, but is
297// temporary (and specific to 'golden tickets').
Serge Bazanski57b43752020-07-13 19:17:48 +0200298func (s *debugService) nodeCertificate() (cert, key []byte, err error) {
299 pubKey, privKey, err := ed25519.GenerateKey(rand.Reader)
300 if err != nil {
301 err = fmt.Errorf("failed to generate key: %w", err)
302 return
303 }
304
305 key, err = x509.MarshalPKCS8PrivateKey(privKey)
306 if err != nil {
307 err = fmt.Errorf("failed to marshal key: %w", err)
308 return
309 }
310
311 serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 127)
312 serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
313 if err != nil {
314 err = fmt.Errorf("failed to generate serial number: %w", err)
315 return
316 }
317
318 template := localstorage.CertificateForNode(pubKey)
319 template.SerialNumber = serialNumber
320
321 cert, err = x509.CreateCertificate(rand.Reader, &template, &template, pubKey, privKey)
322 if err != nil {
323 err = fmt.Errorf("could not sign certificate: %w", err)
324 return
325 }
326 return
327}