m/node: introduce node storage setting and cluster policy

This adds NodeStorageSecurity and a corresponding
ClusterConfiguration.StorageSecurityPolicy, and pipes it into the
Metropolis node bootstrap and registration flow.

All the various settings have so far only been tested manually. For now
the default behaviour (which is exercised by tests) is the same as
previously: require encryption and authentication.

In the future, we will have to expand our end-to-end testing to properly
exercise all the various settings and verify their enforcement and
effect. But that has to come in a follow-up CR as this one is already
large enough as is.

Change-Id: I76f3e37639ef02f4fc708af47ae5014408dc7c21
Reviewed-on: https://review.monogon.dev/c/monogon/+/1747
Reviewed-by: Lorenz Brun <lorenz@monogon.tech>
Tested-by: Jenkins CI
diff --git a/metropolis/node/core/cluster/cluster_bootstrap.go b/metropolis/node/core/cluster/cluster_bootstrap.go
index d86f995..1306cff 100644
--- a/metropolis/node/core/cluster/cluster_bootstrap.go
+++ b/metropolis/node/core/cluster/cluster_bootstrap.go
@@ -49,11 +49,16 @@
 
 	tpmUsage, err := cc.NodeTPMUsage(m.haveTPM)
 	if err != nil {
-		return fmt.Errorf("cannot join cluster: %w", err)
+		return fmt.Errorf("cannot bootstrap cluster: %w", err)
 	}
 
-	supervisor.Logger(ctx).Infof("TPM: cluster TPM mode: %s", cc.TPMMode)
-	supervisor.Logger(ctx).Infof("TPM: node TPM usage: %s", tpmUsage)
+	storageSecurity, err := cc.NodeStorageSecurity()
+	if err != nil {
+		return fmt.Errorf("cannot bootstrap cluster: %w", err)
+	}
+
+	supervisor.Logger(ctx).Infof("TPM: cluster policy: %s, node: %s", cc.TPMMode, tpmUsage)
+	supervisor.Logger(ctx).Infof("Storage Security: cluster policy: %s, node: %s", cc.StorageSecurityPolicy, storageSecurity)
 
 	ownerKey := bootstrap.OwnerPublicKey
 	configuration := ppb.SealedConfiguration{}
@@ -71,7 +76,7 @@
 			supervisor.Logger(ctx).Infof("Bootstrapping: still waiting for storage....")
 		}
 	}()
-	cuk, err := m.storageRoot.Data.MountNew(&configuration)
+	cuk, err := m.storageRoot.Data.MountNew(&configuration, storageSecurity)
 	close(storageDone)
 	if err != nil {
 		return fmt.Errorf("could not make and mount data partition: %w", err)
diff --git a/metropolis/node/core/cluster/cluster_join.go b/metropolis/node/core/cluster/cluster_join.go
index 4fd6473..fbec05d 100644
--- a/metropolis/node/core/cluster/cluster_join.go
+++ b/metropolis/node/core/cluster/cluster_join.go
@@ -70,23 +70,38 @@
 	}
 	cur := ipb.NewCuratorClient(eph)
 
-	// Join the cluster and use the newly obtained CUK to mount the data
-	// partition.
-	var jr *ipb.JoinNodeResponse
-	bo := backoff.NewExponentialBackOff()
-	bo.MaxElapsedTime = 0
-	backoff.Retry(func() error {
-		jr, err = cur.JoinNode(ctx, &ipb.JoinNodeRequest{
-			UsingSealedConfiguration: sealed,
-		})
-		if err != nil {
-			supervisor.Logger(ctx).Warningf("Join failed: %v", err)
-			// This is never used.
-			return fmt.Errorf("join call failed")
+	// Retrieve CUK from cluster and reconstruct encryption key if we're not in
+	// insecure mode.
+	var cuk []byte
+	if sc.StorageSecurity != cpb.NodeStorageSecurity_NODE_STORAGE_SECURITY_INSECURE {
+		if want, got := 32, len(sc.NodeUnlockKey); want != got {
+			return fmt.Errorf("sealed configuration has invalid node unlock key (wanted %d bytes, got %d)", want, got)
 		}
-		return nil
-	}, bo)
-	if err := m.storageRoot.Data.MountExisting(sc, jr.ClusterUnlockKey); err != nil {
+
+		// Join the cluster and use the newly obtained CUK to mount the data
+		// partition.
+		var jr *ipb.JoinNodeResponse
+		bo := backoff.NewExponentialBackOff()
+		bo.MaxElapsedTime = 0
+		backoff.Retry(func() error {
+			jr, err = cur.JoinNode(ctx, &ipb.JoinNodeRequest{
+				UsingSealedConfiguration: sealed,
+			})
+			if err != nil {
+				supervisor.Logger(ctx).Warningf("Join failed: %v", err)
+				// This is never used.
+				return fmt.Errorf("join call failed")
+			}
+			return nil
+		}, bo)
+		cuk = jr.ClusterUnlockKey
+
+		if want, got := 32, len(cuk); want != got {
+			return fmt.Errorf("cluster returned invalid cluster unlock key (wanted %d bytes, got %d)", want, got)
+		}
+	}
+
+	if err := m.storageRoot.Data.MountExisting(sc, cuk); err != nil {
 		return fmt.Errorf("while mounting Data: %w", err)
 	}
 
diff --git a/metropolis/node/core/cluster/cluster_register.go b/metropolis/node/core/cluster/cluster_register.go
index 0096fa7..167f2f7 100644
--- a/metropolis/node/core/cluster/cluster_register.go
+++ b/metropolis/node/core/cluster/cluster_register.go
@@ -70,28 +70,10 @@
 	}
 
 	// Validation passed, let's start working on registering us into the cluster.
-
-	// Tell the user what we're doing.
-	supervisor.Logger(ctx).Infof("Registering into existing cluster.")
-	supervisor.Logger(ctx).Infof("  Cluster CA public key: %s", hex.EncodeToString(ca.PublicKey.(ed25519.PublicKey)))
-	supervisor.Logger(ctx).Infof("  Register Ticket: %s", hex.EncodeToString(register.RegisterTicket))
-	supervisor.Logger(ctx).Infof("  Directory:")
-	logClusterDirectory(ctx, register.ClusterDirectory)
-
-	// Mount new storage with generated CUK, MountNew will save NUK into sc, to be
-	// saved into the ESP after successful registration.
-	var sc ppb.SealedConfiguration
-	supervisor.Logger(ctx).Infof("Registering: mounting new storage...")
-	cuk, err := m.storageRoot.Data.MountNew(&sc)
-	if err != nil {
-		return fmt.Errorf("could not make and mount data partition: %w", err)
-	}
-
 	pub, priv, err := ed25519.GenerateKey(rand.Reader)
 	if err != nil {
 		return fmt.Errorf("could not generate node keypair: %w", err)
 	}
-	supervisor.Logger(ctx).Infof("Registering: node public key: %s", hex.EncodeToString([]byte(pub)))
 
 	// Build resolver used by the register process, authenticating with ephemeral
 	// credentials. Once the join is complete, the rolesever will start its own
@@ -124,13 +106,22 @@
 	}
 	cur := ipb.NewCuratorClient(eph)
 
+	// TODO(q3k): allow node to pick storage security per given policy
+
+	// Tell the user what we're doing.
+	supervisor.Logger(ctx).Infof("Registering into existing cluster.")
+	supervisor.Logger(ctx).Infof("  Cluster CA public key: %s", hex.EncodeToString(ca.PublicKey.(ed25519.PublicKey)))
+	supervisor.Logger(ctx).Infof("  Node public key: %s", hex.EncodeToString(pub))
+	supervisor.Logger(ctx).Infof("  Register Ticket: %s", hex.EncodeToString(register.RegisterTicket))
+	supervisor.Logger(ctx).Infof("  Directory:")
+	logClusterDirectory(ctx, register.ClusterDirectory)
+
 	// Generate Join Credentials. The private key will be stored in
 	// SealedConfiguration only if RegisterNode succeeds.
 	jpub, jpriv, err := ed25519.GenerateKey(rand.Reader)
 	if err != nil {
 		return fmt.Errorf("could not generate join keypair: %w", err)
 	}
-	sc.JoinKey = jpriv
 	supervisor.Logger(ctx).Infof("Registering: join public key: %s", hex.EncodeToString([]byte(jpub)))
 
 	// Register this node.
@@ -148,7 +139,20 @@
 	if err != nil {
 		return fmt.Errorf("register call failed: %w", err)
 	}
+	storageSecurity := res.RecommendedNodeStorageSecurity
 
+	// Mount new storage with generated CUK, MountNew will save NUK into sc, to be
+	// saved into the ESP after successful registration.
+	var sc ppb.SealedConfiguration
+	supervisor.Logger(ctx).Infof("Registering: mounting new storage...")
+	cuk, err := m.storageRoot.Data.MountNew(&sc, storageSecurity)
+	if err != nil {
+		return fmt.Errorf("could not make and mount data partition: %w", err)
+	}
+	sc.JoinKey = jpriv
+
+	supervisor.Logger(ctx).Infof("Storage Security: cluster policy: %s", res.ClusterConfiguration.StorageSecurityPolicy)
+	supervisor.Logger(ctx).Infof("Storage Security: node: %s", storageSecurity)
 	supervisor.Logger(ctx).Infof("TPM: cluster TPM mode: %s", res.ClusterConfiguration.TpmMode)
 	supervisor.Logger(ctx).Infof("TPM: node TPM usage: %v", res.TpmUsage)
 
@@ -158,6 +162,7 @@
 	for {
 		resC, err := cur.CommitNode(ctx, &ipb.CommitNodeRequest{
 			ClusterUnlockKey: cuk,
+			StorageSecurity:  storageSecurity,
 		})
 		if err == nil {
 			supervisor.Logger(ctx).Infof("Registering: Commit successful, received certificate")