Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 1 | // Copyright 2020 The Monogon Project Authors. |
| 2 | // |
| 3 | // SPDX-License-Identifier: Apache-2.0 |
| 4 | // |
| 5 | // Licensed under the Apache License, Version 2.0 (the "License"); |
| 6 | // you may not use this file except in compliance with the License. |
| 7 | // You may obtain a copy of the License at |
| 8 | // |
| 9 | // http://www.apache.org/licenses/LICENSE-2.0 |
| 10 | // |
| 11 | // Unless required by applicable law or agreed to in writing, software |
| 12 | // distributed under the License is distributed on an "AS IS" BASIS, |
| 13 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | // See the License for the specific language governing permissions and |
| 15 | // limitations under the License. |
| 16 | |
| 17 | package cluster |
| 18 | |
| 19 | import ( |
| 20 | "context" |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 21 | "errors" |
Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 22 | "fmt" |
| 23 | "io/ioutil" |
Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 24 | "sync" |
Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 25 | |
Serge Bazanski | 0ed2f96 | 2021-03-15 16:39:30 +0100 | [diff] [blame] | 26 | "google.golang.org/protobuf/proto" |
Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 27 | |
Serge Bazanski | 31370b0 | 2021-01-07 16:31:14 +0100 | [diff] [blame] | 28 | "source.monogon.dev/metropolis/node/core/consensus" |
Serge Bazanski | a105db5 | 2021-04-12 19:57:46 +0200 | [diff] [blame] | 29 | "source.monogon.dev/metropolis/node/core/consensus/client" |
Serge Bazanski | 31370b0 | 2021-01-07 16:31:14 +0100 | [diff] [blame] | 30 | "source.monogon.dev/metropolis/node/core/localstorage" |
Serge Bazanski | 31370b0 | 2021-01-07 16:31:14 +0100 | [diff] [blame] | 31 | "source.monogon.dev/metropolis/node/core/network" |
Serge Bazanski | a105db5 | 2021-04-12 19:57:46 +0200 | [diff] [blame] | 32 | "source.monogon.dev/metropolis/pkg/event" |
Serge Bazanski | 68ca5ee | 2021-04-27 16:09:16 +0200 | [diff] [blame^] | 33 | "source.monogon.dev/metropolis/pkg/event/memory" |
Serge Bazanski | 31370b0 | 2021-01-07 16:31:14 +0100 | [diff] [blame] | 34 | "source.monogon.dev/metropolis/pkg/supervisor" |
| 35 | apb "source.monogon.dev/metropolis/proto/api" |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 36 | ppb "source.monogon.dev/metropolis/proto/private" |
Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 37 | ) |
| 38 | |
Serge Bazanski | a105db5 | 2021-04-12 19:57:46 +0200 | [diff] [blame] | 39 | // Status is returned to Cluster clients (ie., node code) on Manager.Watch/.Get. |
| 40 | type Status struct { |
| 41 | // State is the current state of the cluster, as seen by the node. |
| 42 | State ClusterState |
| 43 | // Node is the configuration of this node in the cluster. |
| 44 | Node *Node |
| 45 | |
| 46 | consensusClient client.Namespaced |
| 47 | } |
| 48 | |
| 49 | // ConsensusUser is the to-level user of an etcd client in Metropolis node |
| 50 | // code. These need to be defined ahead of time in an Go 'enum', and different |
| 51 | // ConsensusUsers should not be shared by different codepaths. |
| 52 | type ConsensusUser string |
| 53 | |
| 54 | const ( |
| 55 | ConsensusUserKubernetesPKI ConsensusUser = "kubernetes-pki" |
| 56 | ) |
| 57 | |
| 58 | // ConsensusClient returns an etcd/consensus client for a given ConsensusUser. |
| 59 | func (s *Status) ConsensusClient(user ConsensusUser) (client.Namespaced, error) { |
| 60 | // Ensure that we already are connected to etcd and are in a state in which we |
| 61 | // should be handing out cluster connectivity. |
| 62 | if s.consensusClient == nil { |
| 63 | return nil, fmt.Errorf("not connected") |
| 64 | } |
| 65 | switch s.State { |
| 66 | case ClusterHome: |
| 67 | case ClusterSplit: |
| 68 | return nil, fmt.Errorf("refusing connection with cluster state %v", s.State) |
| 69 | default: |
| 70 | } |
| 71 | |
| 72 | // Ensure only defined 'applications' are used to prevent programmer error and |
| 73 | // casting to ConsensusUser from an arbitrary string. |
| 74 | switch user { |
| 75 | case ConsensusUserKubernetesPKI: |
| 76 | default: |
| 77 | return nil, fmt.Errorf("unknown ConsensusUser %q", user) |
| 78 | } |
| 79 | client, err := s.consensusClient.Sub(string(user)) |
| 80 | if err != nil { |
| 81 | return nil, fmt.Errorf("retrieving subclient failed: %w", err) |
| 82 | } |
| 83 | return client, nil |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 84 | } |
| 85 | |
| 86 | type state struct { |
| 87 | mu sync.RWMutex |
| 88 | |
| 89 | oneway bool |
| 90 | stateCluster ClusterState |
| 91 | stateNode ppb.Node_FSMState |
| 92 | |
| 93 | configuration *ppb.SealedConfiguration |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 94 | } |
| 95 | |
Serge Bazanski | a105db5 | 2021-04-12 19:57:46 +0200 | [diff] [blame] | 96 | type Watcher struct { |
| 97 | event.Watcher |
| 98 | } |
| 99 | |
| 100 | func (w *Watcher) Get(ctx context.Context) (*Status, error) { |
| 101 | val, err := w.Watcher.Get(ctx) |
| 102 | if err != nil { |
| 103 | return nil, err |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 104 | } |
Serge Bazanski | a105db5 | 2021-04-12 19:57:46 +0200 | [diff] [blame] | 105 | status := val.(Status) |
| 106 | return &status, err |
| 107 | } |
| 108 | |
| 109 | // GetHome waits until the cluster, from the point of view of this node, is in |
| 110 | // the ClusterHome state. This can be used to wait for the cluster manager to |
| 111 | // 'settle', before clients start more node services. |
| 112 | func (w *Watcher) GetHome(ctx context.Context) (*Status, error) { |
| 113 | for { |
| 114 | status, err := w.Get(ctx) |
| 115 | if err != nil { |
| 116 | return nil, err |
| 117 | } |
| 118 | switch status.State { |
| 119 | case ClusterHome: |
| 120 | return status, nil |
| 121 | case ClusterDisowning: |
| 122 | return nil, fmt.Errorf("the cluster has disowned this node") |
| 123 | } |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 124 | } |
Serge Bazanski | a105db5 | 2021-04-12 19:57:46 +0200 | [diff] [blame] | 125 | } |
| 126 | |
| 127 | func (m *Manager) Watch() Watcher { |
| 128 | return Watcher{ |
| 129 | Watcher: m.status.Watch(), |
| 130 | } |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 131 | } |
| 132 | |
Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 133 | type Manager struct { |
| 134 | storageRoot *localstorage.Root |
| 135 | networkService *network.Service |
Serge Bazanski | 68ca5ee | 2021-04-27 16:09:16 +0200 | [diff] [blame^] | 136 | status memory.Value |
Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 137 | |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 138 | state |
Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 139 | |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 140 | // consensus is the spawned etcd/consensus service, if the Manager brought |
| 141 | // up a Node that should run one. |
Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 142 | consensus *consensus.Service |
| 143 | } |
| 144 | |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 145 | // NewManager creates a new cluster Manager. The given localstorage Root must |
| 146 | // be places, but not yet started (and will be started as the Manager makes |
| 147 | // progress). The given network Service must already be running. |
Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 148 | func NewManager(storageRoot *localstorage.Root, networkService *network.Service) *Manager { |
| 149 | return &Manager{ |
| 150 | storageRoot: storageRoot, |
| 151 | networkService: networkService, |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 152 | |
| 153 | state: state{ |
| 154 | stateCluster: ClusterUnknown, |
| 155 | stateNode: ppb.Node_FSM_STATE_INVALID, |
| 156 | }, |
Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 157 | } |
| 158 | } |
| 159 | |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 160 | func (m *Manager) lock() (*state, func()) { |
| 161 | m.mu.Lock() |
| 162 | return &m.state, m.mu.Unlock |
Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 163 | } |
| 164 | |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 165 | func (m *Manager) rlock() (*state, func()) { |
| 166 | m.mu.RLock() |
| 167 | return &m.state, m.mu.RUnlock |
Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 168 | } |
| 169 | |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 170 | // Run is the runnable of the Manager, to be started using the Supervisor. It |
| 171 | // is one-shot, and should not be restarted. |
Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 172 | func (m *Manager) Run(ctx context.Context) error { |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 173 | state, unlock := m.lock() |
| 174 | if state.oneway { |
| 175 | unlock() |
| 176 | // TODO(q3k): restart the entire system if this happens |
| 177 | return fmt.Errorf("cannot restart cluster manager") |
| 178 | } |
| 179 | state.oneway = true |
| 180 | unlock() |
| 181 | |
| 182 | configuration, err := m.storageRoot.ESP.SealedConfiguration.Unseal() |
| 183 | if err == nil { |
| 184 | supervisor.Logger(ctx).Info("Sealed configuration present. attempting to join cluster") |
| 185 | return m.join(ctx, configuration) |
Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 186 | } |
| 187 | |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 188 | if !errors.Is(err, localstorage.ErrNoSealed) { |
| 189 | return fmt.Errorf("unexpected sealed config error: %w", err) |
Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 190 | } |
| 191 | |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 192 | supervisor.Logger(ctx).Info("No sealed configuration, looking for node parameters") |
| 193 | |
| 194 | params, err := m.nodeParams(ctx) |
| 195 | if err != nil { |
| 196 | return fmt.Errorf("no parameters available: %w", err) |
Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 197 | } |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 198 | |
| 199 | switch inner := params.Cluster.(type) { |
| 200 | case *apb.NodeParameters_ClusterBootstrap_: |
| 201 | return m.bootstrap(ctx, inner.ClusterBootstrap) |
| 202 | case *apb.NodeParameters_ClusterRegister_: |
| 203 | return m.register(ctx, inner.ClusterRegister) |
| 204 | default: |
| 205 | return fmt.Errorf("node parameters misconfigured: neither cluster_bootstrap nor cluster_register set") |
| 206 | } |
| 207 | } |
| 208 | |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 209 | func (m *Manager) register(ctx context.Context, bootstrap *apb.NodeParameters_ClusterRegister) error { |
| 210 | return fmt.Errorf("unimplemented") |
Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 211 | } |
| 212 | |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 213 | func (m *Manager) nodeParamsFWCFG(ctx context.Context) (*apb.NodeParameters, error) { |
| 214 | bytes, err := ioutil.ReadFile("/sys/firmware/qemu_fw_cfg/by_name/dev.monogon.metropolis/parameters.pb/raw") |
Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 215 | if err != nil { |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 216 | return nil, fmt.Errorf("could not read firmware enrolment file: %w", err) |
Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 217 | } |
Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 218 | |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 219 | config := apb.NodeParameters{} |
| 220 | err = proto.Unmarshal(bytes, &config) |
Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 221 | if err != nil { |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 222 | return nil, fmt.Errorf("could not unmarshal: %v", err) |
Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 223 | } |
| 224 | |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 225 | return &config, nil |
Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 226 | } |
| 227 | |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 228 | func (m *Manager) nodeParams(ctx context.Context) (*apb.NodeParameters, error) { |
| 229 | // Retrieve node parameters from qemu's fwcfg interface or ESP. |
| 230 | // TODO(q3k): probably abstract this away and implement per platform/build/... |
| 231 | paramsFWCFG, err := m.nodeParamsFWCFG(ctx) |
| 232 | if err != nil { |
| 233 | supervisor.Logger(ctx).Warningf("Could not retrieve node parameters from qemu fwcfg: %v", err) |
| 234 | paramsFWCFG = nil |
| 235 | } else { |
| 236 | supervisor.Logger(ctx).Infof("Retrieved node parameters from qemu fwcfg") |
| 237 | } |
| 238 | paramsESP, err := m.storageRoot.ESP.NodeParameters.Unmarshal() |
| 239 | if err != nil { |
| 240 | supervisor.Logger(ctx).Warningf("Could not retrieve node parameters from ESP: %v", err) |
| 241 | paramsESP = nil |
| 242 | } else { |
| 243 | supervisor.Logger(ctx).Infof("Retrieved node parameters from ESP") |
| 244 | } |
| 245 | if paramsFWCFG == nil && paramsESP == nil { |
| 246 | return nil, fmt.Errorf("could not find node parameters in ESP or qemu fwcfg") |
| 247 | } |
| 248 | if paramsFWCFG != nil && paramsESP != nil { |
Serge Bazanski | a105db5 | 2021-04-12 19:57:46 +0200 | [diff] [blame] | 249 | supervisor.Logger(ctx).Warningf("Node parameters found both in both ESP and qemu fwcfg, using the latter") |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 250 | return paramsFWCFG, nil |
| 251 | } else if paramsFWCFG != nil { |
| 252 | return paramsFWCFG, nil |
| 253 | } else { |
| 254 | return paramsESP, nil |
| 255 | } |
| 256 | } |
| 257 | |
| 258 | func (m *Manager) join(ctx context.Context, cfg *ppb.SealedConfiguration) error { |
| 259 | return fmt.Errorf("unimplemented") |
| 260 | } |
| 261 | |
| 262 | // Node returns the Node that the Manager brought into a cluster, or nil if the |
| 263 | // Manager is not Running. This is safe to call from any goroutine. |
Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 264 | func (m *Manager) Node() *Node { |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 265 | return nil |
Serge Bazanski | 1ebd1e1 | 2020-07-13 19:17:16 +0200 | [diff] [blame] | 266 | } |