core/internal/cluster: add new single-node cluster code
This adds a cluster library, that consists of:
- a Node object that can be loaded from and saved into etcd,
representing a node of the cluster that can have different 'role
tags' assigned to it
- a cluster Manager, that is responsible for bringing up the local node
into a cluster (by creaating a new cluster, enrolling into or joining a
cluster)
This also gets wired into core/cmd/init, and as such completes a chunk
of The Refactor. This code should pass tests.
Test Plan: this should work! should be covered by existing e2e tests.
X-Origin-Diff: phab/D590
GitOrigin-RevId: e88022164e4353249b29fc16849a02805f15dd49
diff --git a/core/cmd/init/main.go b/core/cmd/init/main.go
index f5b09f8..0dc7d5e 100644
--- a/core/cmd/init/main.go
+++ b/core/cmd/init/main.go
@@ -19,23 +19,42 @@
import (
"context"
"fmt"
+ "log"
+ "net"
"os"
"os/signal"
"runtime/debug"
- "git.monogon.dev/source/nexantic.git/core/internal/common/supervisor"
- "git.monogon.dev/source/nexantic.git/core/internal/network"
- "git.monogon.dev/source/nexantic.git/core/internal/node"
- "git.monogon.dev/source/nexantic.git/core/internal/storage"
- "git.monogon.dev/source/nexantic.git/core/pkg/tpm"
-
"go.uber.org/zap"
"golang.org/x/sys/unix"
+ "google.golang.org/grpc"
+
+ "git.monogon.dev/source/nexantic.git/core/internal/cluster"
+ "git.monogon.dev/source/nexantic.git/core/internal/common"
+ "git.monogon.dev/source/nexantic.git/core/internal/common/supervisor"
+ "git.monogon.dev/source/nexantic.git/core/internal/containerd"
+ "git.monogon.dev/source/nexantic.git/core/internal/kubernetes"
+ "git.monogon.dev/source/nexantic.git/core/internal/kubernetes/pki"
+ "git.monogon.dev/source/nexantic.git/core/internal/localstorage"
+ "git.monogon.dev/source/nexantic.git/core/internal/localstorage/declarative"
+ "git.monogon.dev/source/nexantic.git/core/internal/network"
+ "git.monogon.dev/source/nexantic.git/core/pkg/tpm"
+ apb "git.monogon.dev/source/nexantic.git/core/proto/api"
)
-const (
- apiPort = 7833
- consensusPort = 7834
+var (
+ // kubernetesConfig is the static/global part of the Kubernetes service configuration. In the future, this might
+ // be configurable by loading it from the EnrolmentConfig. Fow now, it's static and same across all clusters.
+ kubernetesConfig = kubernetes.Config{
+ ServiceIPRange: net.IPNet{ // TODO(q3k): Decide if configurable / final value
+ IP: net.IP{192, 168, 188, 0},
+ Mask: net.IPMask{0xff, 0xff, 0xff, 0x00}, // /24, but Go stores as a literal mask
+ },
+ ClusterNet: net.IPNet{
+ IP: net.IP{10, 0, 0, 0},
+ Mask: net.IPMask{0xff, 0xff, 0x00, 0x00}, // /16
+ },
+ }
)
func main() {
@@ -76,70 +95,176 @@
logger.Panic("Failed to initialize TPM 2.0", zap.Error(err))
}
- storageManager, err := storage.Initialize(logger.With(zap.String("component", "storage")))
- if err != nil {
- panic(fmt.Errorf("could not initialize storage: %w", err))
- }
-
networkSvc := network.New(network.Config{})
- if err != nil {
- panic(err)
- }
// This function initializes a headless Delve if this is a debug build or does nothing if it's not
initializeDebugger(networkSvc)
- supervisor.New(context.Background(), logger, func(ctx context.Context) error {
+ // Prepare local storage.
+ root := &localstorage.Root{}
+ if err := declarative.PlaceFS(root, "/"); err != nil {
+ panic(fmt.Errorf("when placing root FS: %w", err))
+ }
+
+ // trapdoor is a channel used to signal to the init service that a very low-level, unrecoverable failure
+ // occured. This causes a GURU MEDITATION ERROR visible to the end user.
+ trapdoor := make(chan struct{})
+
+ // Make context for supervisor. We cancel it when we reach the trapdoor.
+ ctxS, ctxC := context.WithCancel(context.Background())
+
+ // Start root initialization code as a supervisor one-shot runnable. This means waiting for the network, starting
+ // the cluster manager, and then starting all services related to the node's roles.
+ // TODO(q3k): move this to a separate 'init' service.
+ supervisor.New(ctxS, logger, func(ctx context.Context) error {
+ logger := supervisor.Logger(ctx)
+
+ // Start storage and network - we need this to get anything else done.
+ if err := root.Start(ctx); err != nil {
+ return fmt.Errorf("cannot start root FS: %w", err)
+ }
if err := supervisor.Run(ctx, "network", networkSvc.Run); err != nil {
- return err
+ return fmt.Errorf("when starting network: %w", err)
}
- // TODO(q3k): convert node code to supervisor
- nodeInstance, err := node.NewSmalltownNode(logger, networkSvc, storageManager)
+ // Wait for IP address from network.
+ ip, err := networkSvc.GetIP(ctx, true)
if err != nil {
- panic(err)
+ return fmt.Errorf("when waiting for IP address: %w", err)
}
- err = nodeInstance.Start(ctx)
+ // Start cluster manager. This kicks off cluster membership machinery, which will either start
+ // a new cluster, enroll into one or join one.
+ m := cluster.NewManager(root, networkSvc)
+ if err := supervisor.Run(ctx, "enrolment", m.Run); err != nil {
+ return fmt.Errorf("when starting enrolment: %w", err)
+ }
+
+ // Wait until the cluster manager settles.
+ success := m.WaitFinished()
+ if !success {
+ close(trapdoor)
+ return fmt.Errorf("enrolment failed, aborting")
+ }
+
+ // We are now in a cluster. We can thus access our 'node' object and start all services that
+ // we should be running.
+
+ node := m.Node()
+ if err := node.ConfigureLocalHostname(&root.Etc); err != nil {
+ close(trapdoor)
+ return fmt.Errorf("failed to set local hostname: %w", err)
+ }
+
+ logger.Info("Enrolment success, continuing startup.")
+ logger.Info(fmt.Sprintf("This node (%s) has roles:", node.String()))
+ if cm := node.ConsensusMember(); cm != nil {
+ // There's no need to start anything for when we are a consensus member - the cluster
+ // manager does this for us if necessary (as creating/enrolling/joining a cluster is
+ // pretty tied into cluster lifecycle management).
+ logger.Info(fmt.Sprintf(" - etcd consensus member"))
+ }
+ if kw := node.KubernetesWorker(); kw != nil {
+ logger.Info(fmt.Sprintf(" - kubernetes worker"))
+ }
+
+ // If we're supposed to be a kubernetes worker, start kubernetes services and containerd.
+ // In the future, this might be split further into kubernetes control plane and data plane
+ // roles.
+ var containerdSvc *containerd.Service
+ var kubeSvc *kubernetes.Service
+ if kw := node.KubernetesWorker(); kw != nil {
+ logger.Info("Starting Kubernetes worker services...")
+
+ // Ensure Kubernetes PKI objects exist in etcd.
+ kpkiKV := m.ConsensusKV("cluster", "kpki")
+ kpki := pki.NewKubernetes(logger.Named("kpki"), kpkiKV)
+ if err := kpki.EnsureAll(ctx); err != nil {
+ return fmt.Errorf("failed to ensure kubernetes PKI present: %w", err)
+ }
+
+ containerdSvc = &containerd.Service{
+ EphemeralVolume: &root.Ephemeral.Containerd,
+ }
+ if err := supervisor.Run(ctx, "containerd", containerdSvc.Run); err != nil {
+ return fmt.Errorf("failed to start containerd service: %w", err)
+ }
+
+ kubernetesConfig.KPKI = kpki
+ kubernetesConfig.Root = root
+ kubernetesConfig.AdvertiseAddress = *ip
+ kubeSvc = kubernetes.New(kubernetesConfig)
+ if err := supervisor.Run(ctx, "kubernetes", kubeSvc.Run); err != nil {
+ return fmt.Errorf("failed to start kubernetes service: %w", err)
+ }
+
+ }
+
+ // Start the node debug service.
+ // TODO(q3k): this needs to be done in a smarter way once LogTree lands, and then a few things can be
+ // refactored to start this earlier, or this can be split up into a multiple gRPC service on a single listener.
+ dbg := &debugService{
+ cluster: m,
+ containerd: containerdSvc,
+ kubernetes: kubeSvc,
+ }
+ dbgSrv := grpc.NewServer()
+ apb.RegisterNodeDebugServiceServer(dbgSrv, dbg)
+ dbgLis, err := net.Listen("tcp", fmt.Sprintf(":%d", common.DebugServicePort))
if err != nil {
- panic(err)
+ return fmt.Errorf("failed to listen on debug service: %w", err)
+ }
+ if err := supervisor.Run(ctx, "debug", supervisor.GRPCServer(dbgSrv, dbgLis, false)); err != nil {
+ return fmt.Errorf("failed to start debug service: %w", err)
}
supervisor.Signal(ctx, supervisor.SignalHealthy)
+ supervisor.Signal(ctx, supervisor.SignalDone)
+ return nil
+ })
- // We're PID1, so orphaned processes get reparented to us to clean up
- for {
- select {
- case <-ctx.Done():
- return ctx.Err()
- case sig := <-signalChannel:
- switch sig {
- case unix.SIGCHLD:
- var status unix.WaitStatus
- var rusage unix.Rusage
- for {
- res, err := unix.Wait4(-1, &status, unix.WNOHANG, &rusage)
- if err != nil && err != unix.ECHILD {
- logger.Error("Failed to wait on orphaned child", zap.Error(err))
- break
- }
- if res <= 0 {
- break
- }
+ // We're PID1, so orphaned processes get reparented to us to clean up
+ for {
+ select {
+ case <-trapdoor:
+ // If the trapdoor got closed, we got stuck early enough in the boot process that we can't do anything about
+ // it. Display a generic error message until we handle error conditions better.
+ ctxC()
+ log.Printf(" ########################")
+ log.Printf(" # GURU MEDIATION ERROR #")
+ log.Printf(" ########################")
+ log.Printf("")
+ log.Printf("Smalltown encountered an uncorrectable error and must be restarted.")
+ log.Printf("(Error condition: init trapdoor closed)")
+ log.Printf("")
+ select {}
+
+ case sig := <-signalChannel:
+ switch sig {
+ case unix.SIGCHLD:
+ var status unix.WaitStatus
+ var rusage unix.Rusage
+ for {
+ res, err := unix.Wait4(-1, &status, unix.WNOHANG, &rusage)
+ if err != nil && err != unix.ECHILD {
+ logger.Error("Failed to wait on orphaned child", zap.Error(err))
+ break
}
- case unix.SIGURG:
- // Go 1.14 introduced asynchronous preemption, which uses SIGURG.
- // In order not to break backwards compatibility in the unlikely case
- // of an application actually using SIGURG on its own, they're not filtering them.
- // (https://github.com/golang/go/issues/37942)
- logger.Debug("Ignoring SIGURG")
- // TODO(lorenz): We can probably get more than just SIGCHLD as init, but I can't think
- // of any others right now, just log them in case we hit any of them.
- default:
- logger.Warn("Got unexpected signal", zap.String("signal", sig.String()))
+ if res <= 0 {
+ break
+ }
}
+ case unix.SIGURG:
+ // Go 1.14 introduced asynchronous preemption, which uses SIGURG.
+ // In order not to break backwards compatibility in the unlikely case
+ // of an application actually using SIGURG on its own, they're not filtering them.
+ // (https://github.com/golang/go/issues/37942)
+ logger.Debug("Ignoring SIGURG")
+ // TODO(lorenz): We can probably get more than just SIGCHLD as init, but I can't think
+ // of any others right now, just log them in case we hit any of them.
+ default:
+ logger.Warn("Got unexpected signal", zap.String("signal", sig.String()))
}
}
- })
- select {}
+ }
}