m/node: allow specifying node labels during cluster bootstrap
We also drive-by refactor ProvideBootstrapData to take a structure
instead of a bunch of unnamed arguments.
Change-Id: I8d876fd726fa87420789513540b20f523994d801
Reviewed-on: https://review.monogon.dev/c/monogon/+/3103
Tested-by: Jenkins CI
Reviewed-by: Lorenz Brun <lorenz@monogon.tech>
diff --git a/metropolis/node/core/roleserve/roleserve.go b/metropolis/node/core/roleserve/roleserve.go
index 718c394..ad8207b 100644
--- a/metropolis/node/core/roleserve/roleserve.go
+++ b/metropolis/node/core/roleserve/roleserve.go
@@ -81,7 +81,7 @@
Config
KubernetesStatus memory.Value[*KubernetesStatus]
- bootstrapData memory.Value[*bootstrapData]
+ bootstrapData memory.Value[*BootstrapData]
localRoles memory.Value[*cpb.NodeRoles]
podNetwork memory.Value[*clusternet.Prefixes]
clusterDirectorySaved memory.Value[bool]
@@ -178,8 +178,38 @@
return s
}
-func (s *Service) ProvideBootstrapData(privkey ed25519.PrivateKey, iok, cuk, nuk, jkey []byte, icc *curator.Cluster, tpmUsage cpb.NodeTPMUsage) {
- pubkey := privkey.Public().(ed25519.PublicKey)
+// BootstrapData contains all the information needed to be injected into the
+// roleserver by the cluster bootstrap logic via ProvideBootstrapData.
+type BootstrapData struct {
+ // Data about the bootstrapping node.
+ Node struct {
+ PrivateKey ed25519.PrivateKey
+
+ // CUK/NUK for storage, if storage encryption is enabled.
+ ClusterUnlockKey []byte
+ NodeUnlockKey []byte
+
+ // Join key for subsequent reboots.
+ JoinKey ed25519.PrivateKey
+
+ // Reported TPM usage by the node.
+ TPMUsage cpb.NodeTPMUsage
+
+ // Initial labels for the node.
+ Labels map[string]string
+ }
+ // Cluster-specific data.
+ Cluster struct {
+ // Public keys of initial owner of cluster. Used to escrow real user credentials
+ // during the takeownership metroctl process.
+ InitialOwnerKey []byte
+ // Initial cluster configuration.
+ Configuration *curator.Cluster
+ }
+}
+
+func (s *Service) ProvideBootstrapData(data *BootstrapData) {
+ pubkey := data.Node.PrivateKey.Public().(ed25519.PublicKey)
nid := identity.NodeID(pubkey)
// This is the first time we have the node ID, tell the resolver that it's
@@ -187,15 +217,7 @@
s.Resolver.AddOverride(nid, resolver.NodeByHostPort("127.0.0.1", uint16(common.CuratorServicePort)))
s.Resolver.AddEndpoint(resolver.NodeByHostPort("127.0.0.1", uint16(common.CuratorServicePort)))
- s.bootstrapData.Set(&bootstrapData{
- nodePrivateKey: privkey,
- initialOwnerKey: iok,
- clusterUnlockKey: cuk,
- nodeUnlockKey: nuk,
- nodePrivateJoinKey: jkey,
- initialClusterConfiguration: icc,
- nodeTPMUsage: tpmUsage,
- })
+ s.bootstrapData.Set(data)
}
func (s *Service) ProvideRegisterData(credentials identity.NodeCredentials, directory *cpb.ClusterDirectory) {
diff --git a/metropolis/node/core/roleserve/values.go b/metropolis/node/core/roleserve/values.go
index 3b7ff25..91510ae 100644
--- a/metropolis/node/core/roleserve/values.go
+++ b/metropolis/node/core/roleserve/values.go
@@ -1,8 +1,6 @@
package roleserve
import (
- "crypto/ed25519"
-
"google.golang.org/grpc"
"source.monogon.dev/metropolis/node/core/consensus"
@@ -11,24 +9,8 @@
"source.monogon.dev/metropolis/node/core/rpc"
"source.monogon.dev/metropolis/node/core/rpc/resolver"
"source.monogon.dev/metropolis/node/kubernetes"
-
- cpb "source.monogon.dev/metropolis/proto/common"
)
-// bootstrapData is an internal EventValue structure which is populated by the
-// Cluster Enrolment logic via ProvideBootstrapData. It contains data needed by
-// the control plane logic to go into bootstrap mode and bring up a control
-// plane from scratch.
-type bootstrapData struct {
- nodePrivateKey ed25519.PrivateKey
- clusterUnlockKey []byte
- nodeUnlockKey []byte
- initialOwnerKey []byte
- nodePrivateJoinKey ed25519.PrivateKey
- initialClusterConfiguration *curator.Cluster
- nodeTPMUsage cpb.NodeTPMUsage
-}
-
// localControlPlane is an internal EventValue structure which carries
// information about whether the node has a locally running consensus and curator
// service. When it does, the structure pointer inside the EventValue will be
diff --git a/metropolis/node/core/roleserve/worker_controlplane.go b/metropolis/node/core/roleserve/worker_controlplane.go
index d5a60fc..777f887 100644
--- a/metropolis/node/core/roleserve/worker_controlplane.go
+++ b/metropolis/node/core/roleserve/worker_controlplane.go
@@ -23,7 +23,7 @@
// locally running Control Plane (Consensus and Curator service pair) if needed.
//
// The Control Plane will run under the following conditions:
-// - This node has been started in BOOTSTRAP mode and bootstrapData was provided
+// - This node has been started in BOOTSTRAP mode and BootstrapData was provided
// by the cluster enrolment logic. In this case, the Control Plane Worker will
// perform the required bootstrap steps, creating a local node with appropriate
// roles, and will start Consensus and the Curator.
@@ -38,8 +38,8 @@
type workerControlPlane struct {
storageRoot *localstorage.Root
- // bootstrapData will be read.
- bootstrapData *memory.Value[*bootstrapData]
+ // BootstrapData will be read.
+ bootstrapData *memory.Value[*BootstrapData]
// localRoles will be read.
localRoles *memory.Value[*cpb.NodeRoles]
// resolver will be read and used to populate curatorConnection when
@@ -61,7 +61,7 @@
consensusConfig *consensus.Config
// bootstrap is set if this node should bootstrap consensus. It contains all
// data required to perform this bootstrap step.
- bootstrap *bootstrapData
+ bootstrap *BootstrapData
existing *curatorConnection
}
@@ -94,13 +94,13 @@
// Channels are used as intermediaries between map stages and the final reduce,
// which is okay as long as the entire tree restarts simultaneously (which we
// ensure via RunGroup).
- bootstrapDataC := make(chan *bootstrapData)
+ bootstrapDataC := make(chan *BootstrapData)
curatorConnectionC := make(chan *curatorConnection)
rolesC := make(chan *cpb.NodeRoles)
supervisor.RunGroup(ctx, map[string]supervisor.Runnable{
// Plain conversion from Event Value to channel.
- "map-bootstrap-data": event.Pipe[*bootstrapData](s.bootstrapData, bootstrapDataC),
+ "map-bootstrap-data": event.Pipe[*BootstrapData](s.bootstrapData, bootstrapDataC),
// Plain conversion from Event Value to channel.
"map-curator-connection": event.Pipe[*curatorConnection](s.curatorConnection, curatorConnectionC),
// Plain conversion from Event Value to channel.
@@ -110,7 +110,7 @@
supervisor.Signal(ctx, supervisor.SignalHealthy)
var lr *cpb.NodeRoles
var cc *curatorConnection
- var bd *bootstrapData
+ var bd *BootstrapData
for {
select {
case <-ctx.Done():
@@ -145,7 +145,7 @@
consensusConfig: &consensus.Config{
Data: &s.storageRoot.Data.Etcd,
Ephemeral: &s.storageRoot.Ephemeral.Consensus,
- NodePrivateKey: bd.nodePrivateKey,
+ NodePrivateKey: bd.Node.PrivateKey,
},
bootstrap: bd,
})
@@ -269,10 +269,16 @@
// curator startup.
//
// TODO(q3k): collapse the curator bootstrap shenanigans into a single function.
- npub := b.nodePrivateKey.Public().(ed25519.PublicKey)
- jpub := b.nodePrivateJoinKey.Public().(ed25519.PublicKey)
+ npub := b.Node.PrivateKey.Public().(ed25519.PublicKey)
+ jpub := b.Node.JoinKey.Public().(ed25519.PublicKey)
- n := curator.NewNodeForBootstrap(b.clusterUnlockKey, npub, jpub, b.nodeTPMUsage)
+ n := curator.NewNodeForBootstrap(&curator.NewNodeData{
+ CUK: b.Node.ClusterUnlockKey,
+ Pubkey: npub,
+ JPub: jpub,
+ TPMUsage: b.Node.TPMUsage,
+ Labels: b.Node.Labels,
+ })
// The first node always runs consensus.
join, err := st.AddNode(ctx, npub)
@@ -284,12 +290,12 @@
n.EnableKubernetesController()
var nodeCert []byte
- caCert, nodeCert, err = curator.BootstrapNodeFinish(ctx, ckv, &n, b.initialOwnerKey, b.initialClusterConfiguration)
+ caCert, nodeCert, err = curator.BootstrapNodeFinish(ctx, ckv, &n, b.Cluster.InitialOwnerKey, b.Cluster.Configuration)
if err != nil {
return fmt.Errorf("while bootstrapping node: %w", err)
}
// ... and build new credentials from bootstrap step.
- creds, err = identity.NewNodeCredentials(b.nodePrivateKey, nodeCert, caCert)
+ creds, err = identity.NewNodeCredentials(b.Node.PrivateKey, nodeCert, caCert)
if err != nil {
return fmt.Errorf("when creating bootstrap node credentials: %w", err)
}
@@ -297,12 +303,12 @@
if err = creds.Save(&s.storageRoot.Data.Node.Credentials); err != nil {
return fmt.Errorf("while saving node credentials: %w", err)
}
- sc, err := s.storageRoot.ESP.Metropolis.SealedConfiguration.Unseal(b.nodeTPMUsage)
+ sc, err := s.storageRoot.ESP.Metropolis.SealedConfiguration.Unseal(b.Node.TPMUsage)
if err != nil {
return fmt.Errorf("reading sealed configuration failed: %w", err)
}
sc.ClusterCa = caCert
- if err = s.storageRoot.ESP.Metropolis.SealedConfiguration.SealSecureBoot(sc, b.nodeTPMUsage); err != nil {
+ if err = s.storageRoot.ESP.Metropolis.SealedConfiguration.SealSecureBoot(sc, b.Node.TPMUsage); err != nil {
return fmt.Errorf("writing sealed configuration failed: %w", err)
}
supervisor.Logger(ctx).Infof("Control plane bootstrap complete, starting curator...")