m/n/core/cluster: clean up

The cluster.Manager structure got heavily refactored over its life, and
we ended up with a whole bunch of stuff that wasn't even being used.
Let's clean that up.

We also change the oneway logic to use a channel instead of a locked
boolean.

Change-Id: I3e8158ad5938be1636efc9d7fad7eb6d6e953ccf
Reviewed-on: https://review.monogon.dev/c/monogon/+/1354
Reviewed-by: Leopold Schabel <leo@monogon.tech>
Tested-by: Jenkins CI
diff --git a/metropolis/node/core/cluster/BUILD.bazel b/metropolis/node/core/cluster/BUILD.bazel
index 246fe1f..be09f48 100644
--- a/metropolis/node/core/cluster/BUILD.bazel
+++ b/metropolis/node/core/cluster/BUILD.bazel
@@ -12,7 +12,6 @@
     importpath = "source.monogon.dev/metropolis/node/core/cluster",
     visibility = ["//metropolis/node/core:__subpackages__"],
     deps = [
-        "//metropolis/node/core/consensus",
         "//metropolis/node/core/curator/proto/api",
         "//metropolis/node/core/identity",
         "//metropolis/node/core/localstorage",
diff --git a/metropolis/node/core/cluster/cluster.go b/metropolis/node/core/cluster/cluster.go
index 6254669..c484132 100644
--- a/metropolis/node/core/cluster/cluster.go
+++ b/metropolis/node/core/cluster/cluster.go
@@ -34,39 +34,24 @@
 	"net/http"
 	"os"
 	"strings"
-	"sync"
 
 	"github.com/cenkalti/backoff/v4"
 	"google.golang.org/protobuf/proto"
 
-	"source.monogon.dev/metropolis/node/core/consensus"
 	"source.monogon.dev/metropolis/node/core/localstorage"
 	"source.monogon.dev/metropolis/node/core/network"
 	"source.monogon.dev/metropolis/node/core/roleserve"
 	"source.monogon.dev/metropolis/pkg/supervisor"
 	apb "source.monogon.dev/metropolis/proto/api"
 	cpb "source.monogon.dev/metropolis/proto/common"
-	ppb "source.monogon.dev/metropolis/proto/private"
 )
 
-type state struct {
-	mu sync.RWMutex
-
-	oneway bool
-
-	configuration *ppb.SealedConfiguration
-}
-
 type Manager struct {
 	storageRoot    *localstorage.Root
 	networkService *network.Service
 	roleServer     *roleserve.Service
 
-	state
-
-	// consensus is the spawned etcd/consensus service, if the Manager brought
-	// up a Node that should run one.
-	consensus *consensus.Service
+	oneway chan struct{}
 }
 
 // NewManager creates a new cluster Manager. The given localstorage Root must
@@ -77,32 +62,19 @@
 		storageRoot:    storageRoot,
 		networkService: networkService,
 		roleServer:     rs,
-
-		state: state{},
+		oneway:         make(chan struct{}),
 	}
 }
 
-func (m *Manager) lock() (*state, func()) {
-	m.mu.Lock()
-	return &m.state, m.mu.Unlock
-}
-
-func (m *Manager) rlock() (*state, func()) {
-	m.mu.RLock()
-	return &m.state, m.mu.RUnlock
-}
-
 // Run is the runnable of the Manager, to be started using the Supervisor. It
 // is one-shot, and should not be restarted.
 func (m *Manager) Run(ctx context.Context) error {
-	state, unlock := m.lock()
-	if state.oneway {
-		unlock()
-		// TODO(q3k): restart the entire system if this happens
+	select {
+	case <-m.oneway:
 		return fmt.Errorf("cannot restart cluster manager")
+	default:
 	}
-	state.oneway = true
-	unlock()
+	close(m.oneway)
 
 	configuration, err := m.storageRoot.ESP.Metropolis.SealedConfiguration.Unseal()
 	if err == nil {
@@ -140,6 +112,7 @@
 
 	if err == nil {
 		supervisor.Logger(ctx).Info("Cluster enrolment done.")
+		return nil
 	}
 	return err
 }
diff --git a/metropolis/node/core/cluster/cluster_bootstrap.go b/metropolis/node/core/cluster/cluster_bootstrap.go
index 707c26c..fd990a6 100644
--- a/metropolis/node/core/cluster/cluster_bootstrap.go
+++ b/metropolis/node/core/cluster/cluster_bootstrap.go
@@ -31,19 +31,16 @@
 func (m *Manager) bootstrap(ctx context.Context, bootstrap *apb.NodeParameters_ClusterBootstrap) error {
 	supervisor.Logger(ctx).Infof("Bootstrapping new cluster, owner public key: %s", hex.EncodeToString(bootstrap.OwnerPublicKey))
 
-	state, unlock := m.lock()
-	defer unlock()
-
 	ownerKey := bootstrap.OwnerPublicKey
-	state.configuration = &ppb.SealedConfiguration{}
+	configuration := ppb.SealedConfiguration{}
 
 	// Mount new storage with generated CUK, and save NUK into sealed config proto.
 	supervisor.Logger(ctx).Infof("Bootstrapping: mounting new storage...")
-	cuk, err := m.storageRoot.Data.MountNew(state.configuration)
+	cuk, err := m.storageRoot.Data.MountNew(&configuration)
 	if err != nil {
 		return fmt.Errorf("could not make and mount data partition: %w", err)
 	}
-	nuk := state.configuration.NodeUnlockKey
+	nuk := configuration.NodeUnlockKey
 
 	pub, priv, err := ed25519.GenerateKey(rand.Reader)
 	if err != nil {
diff --git a/metropolis/node/core/cluster/cluster_register.go b/metropolis/node/core/cluster/cluster_register.go
index 624c449..9adc1e3 100644
--- a/metropolis/node/core/cluster/cluster_register.go
+++ b/metropolis/node/core/cluster/cluster_register.go
@@ -68,11 +68,7 @@
 		register.ClusterDirectory.Nodes[i].PublicKey = nil
 	}
 
-	// Validation passed, let's take the state lock and start working on registering
-	// us into the cluster.
-
-	state, unlock := m.lock()
-	defer unlock()
+	// Validation passed, let's start working on registering us into the cluster.
 
 	// Tell the user what we're doing.
 	supervisor.Logger(ctx).Infof("Registering into existing cluster.")
@@ -84,9 +80,8 @@
 	// Mount new storage with generated CUK, MountNew will save NUK into sc, to be
 	// saved into the ESP after successful registration.
 	var sc ppb.SealedConfiguration
-	state.configuration = &sc
 	supervisor.Logger(ctx).Infof("Registering: mounting new storage...")
-	cuk, err := m.storageRoot.Data.MountNew(state.configuration)
+	cuk, err := m.storageRoot.Data.MountNew(&sc)
 	if err != nil {
 		return fmt.Errorf("could not make and mount data partition: %w", err)
 	}