Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 1 | // Copyright 2020 The Monogon Project Authors. |
| 2 | // |
| 3 | // SPDX-License-Identifier: Apache-2.0 |
| 4 | // |
| 5 | // Licensed under the Apache License, Version 2.0 (the "License"); |
| 6 | // you may not use this file except in compliance with the License. |
| 7 | // You may obtain a copy of the License at |
| 8 | // |
| 9 | // http://www.apache.org/licenses/LICENSE-2.0 |
| 10 | // |
| 11 | // Unless required by applicable law or agreed to in writing, software |
| 12 | // distributed under the License is distributed on an "AS IS" BASIS, |
| 13 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | // See the License for the specific language governing permissions and |
| 15 | // limitations under the License. |
| 16 | |
Serge Bazanski | a959cbd | 2021-06-17 15:56:51 +0200 | [diff] [blame] | 17 | // cluster implements low-level clustering logic, especially logic regarding to |
| 18 | // bootstrapping, registering into and joining a cluster. Its goal is to provide |
| 19 | // the rest of the node code with the following: |
| 20 | // - A mounted plaintext storage. |
| 21 | // - Node credentials/identity. |
| 22 | // - A locally running etcd server if the node is supposed to run one, and a |
| 23 | // client connection to that etcd cluster if so. |
| 24 | // - The state of the cluster as seen by the node, to enable code to respond to |
| 25 | // node lifecycle changes. |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 26 | package cluster |
| 27 | |
| 28 | import ( |
Serge Bazanski | a959cbd | 2021-06-17 15:56:51 +0200 | [diff] [blame] | 29 | "context" |
Leopold Schabel | a554528 | 2021-12-04 23:29:44 +0100 | [diff] [blame] | 30 | "encoding/base64" |
Serge Bazanski | a959cbd | 2021-06-17 15:56:51 +0200 | [diff] [blame] | 31 | "errors" |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 32 | "fmt" |
Leopold Schabel | a554528 | 2021-12-04 23:29:44 +0100 | [diff] [blame] | 33 | "io" |
Mateusz Zalega | 2930e99 | 2022-04-25 12:52:35 +0200 | [diff] [blame^] | 34 | "net" |
Leopold Schabel | a554528 | 2021-12-04 23:29:44 +0100 | [diff] [blame] | 35 | "net/http" |
Lorenz Brun | 764a2de | 2021-11-22 16:26:36 +0100 | [diff] [blame] | 36 | "os" |
Mateusz Zalega | 2930e99 | 2022-04-25 12:52:35 +0200 | [diff] [blame^] | 37 | "strings" |
Serge Bazanski | a959cbd | 2021-06-17 15:56:51 +0200 | [diff] [blame] | 38 | "sync" |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 39 | |
Leopold Schabel | a554528 | 2021-12-04 23:29:44 +0100 | [diff] [blame] | 40 | "github.com/cenkalti/backoff/v4" |
Serge Bazanski | a959cbd | 2021-06-17 15:56:51 +0200 | [diff] [blame] | 41 | "google.golang.org/protobuf/proto" |
| 42 | |
Mateusz Zalega | 2930e99 | 2022-04-25 12:52:35 +0200 | [diff] [blame^] | 43 | "source.monogon.dev/metropolis/node" |
Serge Bazanski | a959cbd | 2021-06-17 15:56:51 +0200 | [diff] [blame] | 44 | "source.monogon.dev/metropolis/node/core/consensus" |
Mateusz Zalega | 2930e99 | 2022-04-25 12:52:35 +0200 | [diff] [blame^] | 45 | "source.monogon.dev/metropolis/node/core/identity" |
Serge Bazanski | a959cbd | 2021-06-17 15:56:51 +0200 | [diff] [blame] | 46 | "source.monogon.dev/metropolis/node/core/localstorage" |
| 47 | "source.monogon.dev/metropolis/node/core/network" |
Serge Bazanski | 6dff6d6 | 2022-01-28 18:15:14 +0100 | [diff] [blame] | 48 | "source.monogon.dev/metropolis/node/core/roleserve" |
Serge Bazanski | a959cbd | 2021-06-17 15:56:51 +0200 | [diff] [blame] | 49 | "source.monogon.dev/metropolis/pkg/event/memory" |
| 50 | "source.monogon.dev/metropolis/pkg/supervisor" |
| 51 | apb "source.monogon.dev/metropolis/proto/api" |
Mateusz Zalega | 2930e99 | 2022-04-25 12:52:35 +0200 | [diff] [blame^] | 52 | cpb "source.monogon.dev/metropolis/proto/common" |
Serge Bazanski | a959cbd | 2021-06-17 15:56:51 +0200 | [diff] [blame] | 53 | ppb "source.monogon.dev/metropolis/proto/private" |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 54 | ) |
| 55 | |
Serge Bazanski | a959cbd | 2021-06-17 15:56:51 +0200 | [diff] [blame] | 56 | type state struct { |
| 57 | mu sync.RWMutex |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 58 | |
Serge Bazanski | a959cbd | 2021-06-17 15:56:51 +0200 | [diff] [blame] | 59 | oneway bool |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 60 | |
Serge Bazanski | a959cbd | 2021-06-17 15:56:51 +0200 | [diff] [blame] | 61 | configuration *ppb.SealedConfiguration |
Serge Bazanski | 42e61c6 | 2021-03-18 15:07:18 +0100 | [diff] [blame] | 62 | } |
| 63 | |
Serge Bazanski | a959cbd | 2021-06-17 15:56:51 +0200 | [diff] [blame] | 64 | type Manager struct { |
| 65 | storageRoot *localstorage.Root |
| 66 | networkService *network.Service |
Serge Bazanski | 6dff6d6 | 2022-01-28 18:15:14 +0100 | [diff] [blame] | 67 | roleServer *roleserve.Service |
Serge Bazanski | a959cbd | 2021-06-17 15:56:51 +0200 | [diff] [blame] | 68 | status memory.Value |
| 69 | |
| 70 | state |
| 71 | |
| 72 | // consensus is the spawned etcd/consensus service, if the Manager brought |
| 73 | // up a Node that should run one. |
| 74 | consensus *consensus.Service |
| 75 | } |
| 76 | |
| 77 | // NewManager creates a new cluster Manager. The given localstorage Root must |
| 78 | // be places, but not yet started (and will be started as the Manager makes |
| 79 | // progress). The given network Service must already be running. |
Serge Bazanski | 6dff6d6 | 2022-01-28 18:15:14 +0100 | [diff] [blame] | 80 | func NewManager(storageRoot *localstorage.Root, networkService *network.Service, rs *roleserve.Service) *Manager { |
Serge Bazanski | a959cbd | 2021-06-17 15:56:51 +0200 | [diff] [blame] | 81 | return &Manager{ |
| 82 | storageRoot: storageRoot, |
| 83 | networkService: networkService, |
Serge Bazanski | 6dff6d6 | 2022-01-28 18:15:14 +0100 | [diff] [blame] | 84 | roleServer: rs, |
Serge Bazanski | a959cbd | 2021-06-17 15:56:51 +0200 | [diff] [blame] | 85 | |
| 86 | state: state{}, |
| 87 | } |
| 88 | } |
| 89 | |
| 90 | func (m *Manager) lock() (*state, func()) { |
| 91 | m.mu.Lock() |
| 92 | return &m.state, m.mu.Unlock |
| 93 | } |
| 94 | |
| 95 | func (m *Manager) rlock() (*state, func()) { |
| 96 | m.mu.RLock() |
| 97 | return &m.state, m.mu.RUnlock |
| 98 | } |
| 99 | |
| 100 | // Run is the runnable of the Manager, to be started using the Supervisor. It |
| 101 | // is one-shot, and should not be restarted. |
| 102 | func (m *Manager) Run(ctx context.Context) error { |
| 103 | state, unlock := m.lock() |
| 104 | if state.oneway { |
| 105 | unlock() |
| 106 | // TODO(q3k): restart the entire system if this happens |
| 107 | return fmt.Errorf("cannot restart cluster manager") |
| 108 | } |
| 109 | state.oneway = true |
| 110 | unlock() |
| 111 | |
Lorenz Brun | 6c35e97 | 2021-12-14 03:08:23 +0100 | [diff] [blame] | 112 | configuration, err := m.storageRoot.ESP.Metropolis.SealedConfiguration.Unseal() |
Serge Bazanski | a959cbd | 2021-06-17 15:56:51 +0200 | [diff] [blame] | 113 | if err == nil { |
| 114 | supervisor.Logger(ctx).Info("Sealed configuration present. attempting to join cluster") |
Mateusz Zalega | 2930e99 | 2022-04-25 12:52:35 +0200 | [diff] [blame^] | 115 | |
| 116 | // Read Cluster Directory and unmarshal it. Since the node is already |
| 117 | // registered with the cluster, the directory won't be bootstrapped from |
| 118 | // Node Parameters. |
| 119 | cd, err := m.storageRoot.ESP.Metropolis.ClusterDirectory.Unmarshal() |
| 120 | if err != nil { |
| 121 | return fmt.Errorf("while reading cluster directory: %w", err) |
| 122 | } |
| 123 | return m.join(ctx, configuration, cd) |
Serge Bazanski | a959cbd | 2021-06-17 15:56:51 +0200 | [diff] [blame] | 124 | } |
| 125 | |
| 126 | if !errors.Is(err, localstorage.ErrNoSealed) { |
| 127 | return fmt.Errorf("unexpected sealed config error: %w", err) |
| 128 | } |
| 129 | |
| 130 | supervisor.Logger(ctx).Info("No sealed configuration, looking for node parameters") |
| 131 | |
| 132 | params, err := m.nodeParams(ctx) |
| 133 | if err != nil { |
| 134 | return fmt.Errorf("no parameters available: %w", err) |
| 135 | } |
| 136 | |
| 137 | switch inner := params.Cluster.(type) { |
| 138 | case *apb.NodeParameters_ClusterBootstrap_: |
Serge Bazanski | 5839e97 | 2021-11-16 15:46:19 +0100 | [diff] [blame] | 139 | err = m.bootstrap(ctx, inner.ClusterBootstrap) |
Serge Bazanski | a959cbd | 2021-06-17 15:56:51 +0200 | [diff] [blame] | 140 | case *apb.NodeParameters_ClusterRegister_: |
Serge Bazanski | 5839e97 | 2021-11-16 15:46:19 +0100 | [diff] [blame] | 141 | err = m.register(ctx, inner.ClusterRegister) |
Serge Bazanski | a959cbd | 2021-06-17 15:56:51 +0200 | [diff] [blame] | 142 | default: |
Serge Bazanski | 5839e97 | 2021-11-16 15:46:19 +0100 | [diff] [blame] | 143 | err = fmt.Errorf("node parameters misconfigured: neither cluster_bootstrap nor cluster_register set") |
Serge Bazanski | a959cbd | 2021-06-17 15:56:51 +0200 | [diff] [blame] | 144 | } |
Serge Bazanski | 5839e97 | 2021-11-16 15:46:19 +0100 | [diff] [blame] | 145 | |
| 146 | if err == nil { |
| 147 | supervisor.Logger(ctx).Info("Cluster enrolment done.") |
| 148 | } |
| 149 | return err |
Serge Bazanski | a959cbd | 2021-06-17 15:56:51 +0200 | [diff] [blame] | 150 | } |
| 151 | |
Serge Bazanski | a959cbd | 2021-06-17 15:56:51 +0200 | [diff] [blame] | 152 | func (m *Manager) nodeParamsFWCFG(ctx context.Context) (*apb.NodeParameters, error) { |
Lorenz Brun | 764a2de | 2021-11-22 16:26:36 +0100 | [diff] [blame] | 153 | bytes, err := os.ReadFile("/sys/firmware/qemu_fw_cfg/by_name/dev.monogon.metropolis/parameters.pb/raw") |
Serge Bazanski | a959cbd | 2021-06-17 15:56:51 +0200 | [diff] [blame] | 154 | if err != nil { |
| 155 | return nil, fmt.Errorf("could not read firmware enrolment file: %w", err) |
| 156 | } |
| 157 | |
| 158 | config := apb.NodeParameters{} |
| 159 | err = proto.Unmarshal(bytes, &config) |
| 160 | if err != nil { |
| 161 | return nil, fmt.Errorf("could not unmarshal: %v", err) |
| 162 | } |
| 163 | |
| 164 | return &config, nil |
| 165 | } |
| 166 | |
Leopold Schabel | a554528 | 2021-12-04 23:29:44 +0100 | [diff] [blame] | 167 | // nodeParamsGCPMetadata attempts to retrieve the node parameters from the |
| 168 | // GCP metadata service. Returns nil if the metadata service is available, |
| 169 | // but no node parameters are specified. |
| 170 | func (m *Manager) nodeParamsGCPMetadata(ctx context.Context) (*apb.NodeParameters, error) { |
| 171 | const metadataURL = "http://169.254.169.254/computeMetadata/v1/instance/attributes/metropolis-node-params" |
| 172 | req, err := http.NewRequestWithContext(ctx, "GET", metadataURL, nil) |
| 173 | if err != nil { |
| 174 | return nil, fmt.Errorf("could not create request: %w", err) |
| 175 | } |
| 176 | req.Header.Set("Metadata-Flavor", "Google") |
| 177 | resp, err := http.DefaultClient.Do(req) |
| 178 | if err != nil { |
| 179 | return nil, fmt.Errorf("HTTP request failed: %w", err) |
| 180 | } |
| 181 | defer resp.Body.Close() |
| 182 | if resp.StatusCode != http.StatusOK { |
| 183 | if resp.StatusCode == http.StatusNotFound { |
| 184 | return nil, nil |
| 185 | } |
| 186 | return nil, fmt.Errorf("non-200 status code: %d", resp.StatusCode) |
| 187 | } |
| 188 | decoded, err := io.ReadAll(base64.NewDecoder(base64.StdEncoding, resp.Body)) |
| 189 | if err != nil { |
| 190 | return nil, fmt.Errorf("cannot decode base64: %w", err) |
| 191 | } |
| 192 | config := apb.NodeParameters{} |
| 193 | err = proto.Unmarshal(decoded, &config) |
| 194 | if err != nil { |
| 195 | return nil, fmt.Errorf("failed unmarshalling NodeParameters: %w", err) |
| 196 | } |
| 197 | return &config, nil |
| 198 | } |
| 199 | |
Serge Bazanski | a959cbd | 2021-06-17 15:56:51 +0200 | [diff] [blame] | 200 | func (m *Manager) nodeParams(ctx context.Context) (*apb.NodeParameters, error) { |
Leopold Schabel | a554528 | 2021-12-04 23:29:44 +0100 | [diff] [blame] | 201 | boardName, err := getDMIBoardName() |
| 202 | if err != nil { |
| 203 | supervisor.Logger(ctx).Warningf("Could not get board name, cannot detect platform: %v", err) |
| 204 | } |
| 205 | supervisor.Logger(ctx).Infof("Board name: %q", boardName) |
| 206 | |
| 207 | // When running on GCP, attempt to retrieve the node parameters from the |
| 208 | // metadata server first. Retry until we get a response, since we need to |
| 209 | // wait for the network service to assign an IP address first. |
| 210 | if isGCPInstance(boardName) { |
| 211 | var params *apb.NodeParameters |
| 212 | op := func() error { |
| 213 | supervisor.Logger(ctx).Info("Running on GCP, attempting to retrieve node parameters from metadata server") |
| 214 | params, err = m.nodeParamsGCPMetadata(ctx) |
| 215 | return err |
| 216 | } |
| 217 | err := backoff.Retry(op, backoff.WithContext(backoff.NewExponentialBackOff(), ctx)) |
| 218 | if err != nil { |
| 219 | supervisor.Logger(ctx).Errorf("Failed to retrieve node parameters: %v", err) |
| 220 | } |
| 221 | if params != nil { |
| 222 | supervisor.Logger(ctx).Info("Retrieved parameters from GCP metadata server") |
| 223 | return params, nil |
| 224 | } |
| 225 | supervisor.Logger(ctx).Infof("\"metropolis-node-params\" metadata not found") |
| 226 | } |
| 227 | |
Serge Bazanski | a959cbd | 2021-06-17 15:56:51 +0200 | [diff] [blame] | 228 | // Retrieve node parameters from qemu's fwcfg interface or ESP. |
| 229 | // TODO(q3k): probably abstract this away and implement per platform/build/... |
| 230 | paramsFWCFG, err := m.nodeParamsFWCFG(ctx) |
| 231 | if err != nil { |
| 232 | supervisor.Logger(ctx).Warningf("Could not retrieve node parameters from qemu fwcfg: %v", err) |
| 233 | paramsFWCFG = nil |
| 234 | } else { |
| 235 | supervisor.Logger(ctx).Infof("Retrieved node parameters from qemu fwcfg") |
| 236 | } |
Lorenz Brun | 6c35e97 | 2021-12-14 03:08:23 +0100 | [diff] [blame] | 237 | paramsESP, err := m.storageRoot.ESP.Metropolis.NodeParameters.Unmarshal() |
Serge Bazanski | a959cbd | 2021-06-17 15:56:51 +0200 | [diff] [blame] | 238 | if err != nil { |
| 239 | supervisor.Logger(ctx).Warningf("Could not retrieve node parameters from ESP: %v", err) |
| 240 | paramsESP = nil |
| 241 | } else { |
| 242 | supervisor.Logger(ctx).Infof("Retrieved node parameters from ESP") |
| 243 | } |
| 244 | if paramsFWCFG == nil && paramsESP == nil { |
| 245 | return nil, fmt.Errorf("could not find node parameters in ESP or qemu fwcfg") |
| 246 | } |
| 247 | if paramsFWCFG != nil && paramsESP != nil { |
| 248 | supervisor.Logger(ctx).Warningf("Node parameters found both in both ESP and qemu fwcfg, using the latter") |
| 249 | return paramsFWCFG, nil |
| 250 | } else if paramsFWCFG != nil { |
| 251 | return paramsFWCFG, nil |
| 252 | } else { |
| 253 | return paramsESP, nil |
| 254 | } |
| 255 | } |
| 256 | |
Mateusz Zalega | 2930e99 | 2022-04-25 12:52:35 +0200 | [diff] [blame^] | 257 | // logClusterDirectory verbosely logs the whole Cluster Directory passed to it. |
| 258 | func logClusterDirectory(ctx context.Context, cd *cpb.ClusterDirectory) { |
| 259 | for _, node := range cd.Nodes { |
| 260 | id := identity.NodeID(node.PublicKey) |
| 261 | var addresses []string |
| 262 | for _, add := range node.Addresses { |
| 263 | addresses = append(addresses, add.Host) |
| 264 | } |
| 265 | supervisor.Logger(ctx).Infof(" Node ID: %s, Addresses: %s", id, strings.Join(addresses, ",")) |
| 266 | } |
| 267 | } |
| 268 | |
| 269 | // curatorRemote returns a host:port pair pointing at one of the cluster's |
| 270 | // available Curator endpoints. It will return an empty string, and an error, |
| 271 | // if the cluster directory is empty. |
| 272 | // TODO(issues/117): use dynamic cluster client instead |
| 273 | func curatorRemote(cd *cpb.ClusterDirectory) (string, error) { |
| 274 | if len(cd.Nodes) == 0 { |
| 275 | return "", fmt.Errorf("the Cluster Directory is empty.") |
| 276 | } |
| 277 | n := cd.Nodes[0] |
| 278 | if len(n.Addresses) == 0 { |
| 279 | return "", fmt.Errorf("the first node in the Cluster Directory doesn't have an associated Address.") |
| 280 | } |
| 281 | r := n.Addresses[0].Host |
| 282 | return net.JoinHostPort(r, node.CuratorServicePort.PortString()), nil |
Serge Bazanski | a959cbd | 2021-06-17 15:56:51 +0200 | [diff] [blame] | 283 | } |