Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 1 | // Copyright 2020 The Monogon Project Authors. |
| 2 | // |
| 3 | // SPDX-License-Identifier: Apache-2.0 |
| 4 | // |
| 5 | // Licensed under the Apache License, Version 2.0 (the "License"); |
| 6 | // you may not use this file except in compliance with the License. |
| 7 | // You may obtain a copy of the License at |
| 8 | // |
| 9 | // http://www.apache.org/licenses/LICENSE-2.0 |
| 10 | // |
| 11 | // Unless required by applicable law or agreed to in writing, software |
| 12 | // distributed under the License is distributed on an "AS IS" BASIS, |
| 13 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | // See the License for the specific language governing permissions and |
| 15 | // limitations under the License. |
| 16 | |
| 17 | package cluster |
| 18 | |
| 19 | import ( |
| 20 | "context" |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 21 | "errors" |
Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 22 | "fmt" |
| 23 | "io/ioutil" |
Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 24 | "sync" |
Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 25 | |
Serge Bazanski | 0ed2f96 | 2021-03-15 16:39:30 +0100 | [diff] [blame] | 26 | "google.golang.org/protobuf/proto" |
Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 27 | |
Serge Bazanski | 31370b0 | 2021-01-07 16:31:14 +0100 | [diff] [blame] | 28 | "source.monogon.dev/metropolis/node/core/consensus" |
Serge Bazanski | a105db5 | 2021-04-12 19:57:46 +0200 | [diff] [blame] | 29 | "source.monogon.dev/metropolis/node/core/consensus/client" |
Serge Bazanski | 31370b0 | 2021-01-07 16:31:14 +0100 | [diff] [blame] | 30 | "source.monogon.dev/metropolis/node/core/localstorage" |
Serge Bazanski | 31370b0 | 2021-01-07 16:31:14 +0100 | [diff] [blame] | 31 | "source.monogon.dev/metropolis/node/core/network" |
Serge Bazanski | a105db5 | 2021-04-12 19:57:46 +0200 | [diff] [blame] | 32 | "source.monogon.dev/metropolis/pkg/event" |
Serge Bazanski | 68ca5ee | 2021-04-27 16:09:16 +0200 | [diff] [blame] | 33 | "source.monogon.dev/metropolis/pkg/event/memory" |
Serge Bazanski | 31370b0 | 2021-01-07 16:31:14 +0100 | [diff] [blame] | 34 | "source.monogon.dev/metropolis/pkg/supervisor" |
| 35 | apb "source.monogon.dev/metropolis/proto/api" |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 36 | ppb "source.monogon.dev/metropolis/proto/private" |
Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 37 | ) |
| 38 | |
Serge Bazanski | a105db5 | 2021-04-12 19:57:46 +0200 | [diff] [blame] | 39 | // Status is returned to Cluster clients (ie., node code) on Manager.Watch/.Get. |
| 40 | type Status struct { |
| 41 | // State is the current state of the cluster, as seen by the node. |
| 42 | State ClusterState |
| 43 | // Node is the configuration of this node in the cluster. |
| 44 | Node *Node |
| 45 | |
| 46 | consensusClient client.Namespaced |
| 47 | } |
| 48 | |
| 49 | // ConsensusUser is the to-level user of an etcd client in Metropolis node |
| 50 | // code. These need to be defined ahead of time in an Go 'enum', and different |
| 51 | // ConsensusUsers should not be shared by different codepaths. |
| 52 | type ConsensusUser string |
| 53 | |
| 54 | const ( |
| 55 | ConsensusUserKubernetesPKI ConsensusUser = "kubernetes-pki" |
Serge Bazanski | 76003f8 | 2021-06-17 16:39:01 +0200 | [diff] [blame^] | 56 | ConsensusUserCurator ConsensusUser = "curator" |
Serge Bazanski | a105db5 | 2021-04-12 19:57:46 +0200 | [diff] [blame] | 57 | ) |
| 58 | |
| 59 | // ConsensusClient returns an etcd/consensus client for a given ConsensusUser. |
| 60 | func (s *Status) ConsensusClient(user ConsensusUser) (client.Namespaced, error) { |
| 61 | // Ensure that we already are connected to etcd and are in a state in which we |
| 62 | // should be handing out cluster connectivity. |
| 63 | if s.consensusClient == nil { |
| 64 | return nil, fmt.Errorf("not connected") |
| 65 | } |
| 66 | switch s.State { |
| 67 | case ClusterHome: |
| 68 | case ClusterSplit: |
| 69 | return nil, fmt.Errorf("refusing connection with cluster state %v", s.State) |
| 70 | default: |
| 71 | } |
| 72 | |
| 73 | // Ensure only defined 'applications' are used to prevent programmer error and |
| 74 | // casting to ConsensusUser from an arbitrary string. |
| 75 | switch user { |
| 76 | case ConsensusUserKubernetesPKI: |
Serge Bazanski | 76003f8 | 2021-06-17 16:39:01 +0200 | [diff] [blame^] | 77 | case ConsensusUserCurator: |
Serge Bazanski | a105db5 | 2021-04-12 19:57:46 +0200 | [diff] [blame] | 78 | default: |
| 79 | return nil, fmt.Errorf("unknown ConsensusUser %q", user) |
| 80 | } |
| 81 | client, err := s.consensusClient.Sub(string(user)) |
| 82 | if err != nil { |
| 83 | return nil, fmt.Errorf("retrieving subclient failed: %w", err) |
| 84 | } |
| 85 | return client, nil |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 86 | } |
| 87 | |
| 88 | type state struct { |
| 89 | mu sync.RWMutex |
| 90 | |
| 91 | oneway bool |
| 92 | stateCluster ClusterState |
| 93 | stateNode ppb.Node_FSMState |
| 94 | |
| 95 | configuration *ppb.SealedConfiguration |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 96 | } |
| 97 | |
Serge Bazanski | a105db5 | 2021-04-12 19:57:46 +0200 | [diff] [blame] | 98 | type Watcher struct { |
| 99 | event.Watcher |
| 100 | } |
| 101 | |
| 102 | func (w *Watcher) Get(ctx context.Context) (*Status, error) { |
| 103 | val, err := w.Watcher.Get(ctx) |
| 104 | if err != nil { |
| 105 | return nil, err |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 106 | } |
Serge Bazanski | a105db5 | 2021-04-12 19:57:46 +0200 | [diff] [blame] | 107 | status := val.(Status) |
| 108 | return &status, err |
| 109 | } |
| 110 | |
| 111 | // GetHome waits until the cluster, from the point of view of this node, is in |
| 112 | // the ClusterHome state. This can be used to wait for the cluster manager to |
| 113 | // 'settle', before clients start more node services. |
| 114 | func (w *Watcher) GetHome(ctx context.Context) (*Status, error) { |
| 115 | for { |
| 116 | status, err := w.Get(ctx) |
| 117 | if err != nil { |
| 118 | return nil, err |
| 119 | } |
| 120 | switch status.State { |
| 121 | case ClusterHome: |
| 122 | return status, nil |
| 123 | case ClusterDisowning: |
| 124 | return nil, fmt.Errorf("the cluster has disowned this node") |
| 125 | } |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 126 | } |
Serge Bazanski | a105db5 | 2021-04-12 19:57:46 +0200 | [diff] [blame] | 127 | } |
| 128 | |
| 129 | func (m *Manager) Watch() Watcher { |
| 130 | return Watcher{ |
| 131 | Watcher: m.status.Watch(), |
| 132 | } |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 133 | } |
| 134 | |
Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 135 | type Manager struct { |
| 136 | storageRoot *localstorage.Root |
| 137 | networkService *network.Service |
Serge Bazanski | 68ca5ee | 2021-04-27 16:09:16 +0200 | [diff] [blame] | 138 | status memory.Value |
Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 139 | |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 140 | state |
Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 141 | |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 142 | // consensus is the spawned etcd/consensus service, if the Manager brought |
| 143 | // up a Node that should run one. |
Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 144 | consensus *consensus.Service |
| 145 | } |
| 146 | |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 147 | // NewManager creates a new cluster Manager. The given localstorage Root must |
| 148 | // be places, but not yet started (and will be started as the Manager makes |
| 149 | // progress). The given network Service must already be running. |
Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 150 | func NewManager(storageRoot *localstorage.Root, networkService *network.Service) *Manager { |
| 151 | return &Manager{ |
| 152 | storageRoot: storageRoot, |
| 153 | networkService: networkService, |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 154 | |
| 155 | state: state{ |
| 156 | stateCluster: ClusterUnknown, |
| 157 | stateNode: ppb.Node_FSM_STATE_INVALID, |
| 158 | }, |
Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 159 | } |
| 160 | } |
| 161 | |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 162 | func (m *Manager) lock() (*state, func()) { |
| 163 | m.mu.Lock() |
| 164 | return &m.state, m.mu.Unlock |
Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 165 | } |
| 166 | |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 167 | func (m *Manager) rlock() (*state, func()) { |
| 168 | m.mu.RLock() |
| 169 | return &m.state, m.mu.RUnlock |
Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 170 | } |
| 171 | |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 172 | // Run is the runnable of the Manager, to be started using the Supervisor. It |
| 173 | // is one-shot, and should not be restarted. |
Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 174 | func (m *Manager) Run(ctx context.Context) error { |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 175 | state, unlock := m.lock() |
| 176 | if state.oneway { |
| 177 | unlock() |
| 178 | // TODO(q3k): restart the entire system if this happens |
| 179 | return fmt.Errorf("cannot restart cluster manager") |
| 180 | } |
| 181 | state.oneway = true |
| 182 | unlock() |
| 183 | |
| 184 | configuration, err := m.storageRoot.ESP.SealedConfiguration.Unseal() |
| 185 | if err == nil { |
| 186 | supervisor.Logger(ctx).Info("Sealed configuration present. attempting to join cluster") |
| 187 | return m.join(ctx, configuration) |
Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 188 | } |
| 189 | |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 190 | if !errors.Is(err, localstorage.ErrNoSealed) { |
| 191 | return fmt.Errorf("unexpected sealed config error: %w", err) |
Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 192 | } |
| 193 | |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 194 | supervisor.Logger(ctx).Info("No sealed configuration, looking for node parameters") |
| 195 | |
| 196 | params, err := m.nodeParams(ctx) |
| 197 | if err != nil { |
| 198 | return fmt.Errorf("no parameters available: %w", err) |
Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 199 | } |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 200 | |
| 201 | switch inner := params.Cluster.(type) { |
| 202 | case *apb.NodeParameters_ClusterBootstrap_: |
| 203 | return m.bootstrap(ctx, inner.ClusterBootstrap) |
| 204 | case *apb.NodeParameters_ClusterRegister_: |
| 205 | return m.register(ctx, inner.ClusterRegister) |
| 206 | default: |
| 207 | return fmt.Errorf("node parameters misconfigured: neither cluster_bootstrap nor cluster_register set") |
| 208 | } |
| 209 | } |
| 210 | |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 211 | func (m *Manager) register(ctx context.Context, bootstrap *apb.NodeParameters_ClusterRegister) error { |
| 212 | return fmt.Errorf("unimplemented") |
Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 213 | } |
| 214 | |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 215 | func (m *Manager) nodeParamsFWCFG(ctx context.Context) (*apb.NodeParameters, error) { |
| 216 | bytes, err := ioutil.ReadFile("/sys/firmware/qemu_fw_cfg/by_name/dev.monogon.metropolis/parameters.pb/raw") |
Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 217 | if err != nil { |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 218 | return nil, fmt.Errorf("could not read firmware enrolment file: %w", err) |
Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 219 | } |
Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 220 | |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 221 | config := apb.NodeParameters{} |
| 222 | err = proto.Unmarshal(bytes, &config) |
Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 223 | if err != nil { |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 224 | return nil, fmt.Errorf("could not unmarshal: %v", err) |
Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 225 | } |
| 226 | |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 227 | return &config, nil |
Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 228 | } |
| 229 | |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 230 | func (m *Manager) nodeParams(ctx context.Context) (*apb.NodeParameters, error) { |
| 231 | // Retrieve node parameters from qemu's fwcfg interface or ESP. |
| 232 | // TODO(q3k): probably abstract this away and implement per platform/build/... |
| 233 | paramsFWCFG, err := m.nodeParamsFWCFG(ctx) |
| 234 | if err != nil { |
| 235 | supervisor.Logger(ctx).Warningf("Could not retrieve node parameters from qemu fwcfg: %v", err) |
| 236 | paramsFWCFG = nil |
| 237 | } else { |
| 238 | supervisor.Logger(ctx).Infof("Retrieved node parameters from qemu fwcfg") |
| 239 | } |
| 240 | paramsESP, err := m.storageRoot.ESP.NodeParameters.Unmarshal() |
| 241 | if err != nil { |
| 242 | supervisor.Logger(ctx).Warningf("Could not retrieve node parameters from ESP: %v", err) |
| 243 | paramsESP = nil |
| 244 | } else { |
| 245 | supervisor.Logger(ctx).Infof("Retrieved node parameters from ESP") |
| 246 | } |
| 247 | if paramsFWCFG == nil && paramsESP == nil { |
| 248 | return nil, fmt.Errorf("could not find node parameters in ESP or qemu fwcfg") |
| 249 | } |
| 250 | if paramsFWCFG != nil && paramsESP != nil { |
Serge Bazanski | a105db5 | 2021-04-12 19:57:46 +0200 | [diff] [blame] | 251 | supervisor.Logger(ctx).Warningf("Node parameters found both in both ESP and qemu fwcfg, using the latter") |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 252 | return paramsFWCFG, nil |
| 253 | } else if paramsFWCFG != nil { |
| 254 | return paramsFWCFG, nil |
| 255 | } else { |
| 256 | return paramsESP, nil |
| 257 | } |
| 258 | } |
| 259 | |
| 260 | func (m *Manager) join(ctx context.Context, cfg *ppb.SealedConfiguration) error { |
| 261 | return fmt.Errorf("unimplemented") |
| 262 | } |
| 263 | |
| 264 | // Node returns the Node that the Manager brought into a cluster, or nil if the |
| 265 | // Manager is not Running. This is safe to call from any goroutine. |
Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 266 | func (m *Manager) Node() *Node { |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 267 | return nil |
Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 268 | } |