blob: 1de24f3f26a254facdac8b563676a6ff1a2f0ddc [file] [log] [blame]
Serge Bazanski42e61c62021-03-18 15:07:18 +01001// Copyright 2020 The Monogon Project Authors.
2//
3// SPDX-License-Identifier: Apache-2.0
4//
5// Licensed under the Apache License, Version 2.0 (the "License");
6// you may not use this file except in compliance with the License.
7// You may obtain a copy of the License at
8//
9// http://www.apache.org/licenses/LICENSE-2.0
10//
11// Unless required by applicable law or agreed to in writing, software
12// distributed under the License is distributed on an "AS IS" BASIS,
13// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14// See the License for the specific language governing permissions and
15// limitations under the License.
16
Serge Bazanskia959cbd2021-06-17 15:56:51 +020017// cluster implements low-level clustering logic, especially logic regarding to
18// bootstrapping, registering into and joining a cluster. Its goal is to provide
19// the rest of the node code with the following:
20// - A mounted plaintext storage.
21// - Node credentials/identity.
22// - A locally running etcd server if the node is supposed to run one, and a
23// client connection to that etcd cluster if so.
24// - The state of the cluster as seen by the node, to enable code to respond to
25// node lifecycle changes.
Serge Bazanski42e61c62021-03-18 15:07:18 +010026package cluster
27
28import (
Serge Bazanskia959cbd2021-06-17 15:56:51 +020029 "context"
Leopold Schabela5545282021-12-04 23:29:44 +010030 "encoding/base64"
Serge Bazanskia959cbd2021-06-17 15:56:51 +020031 "errors"
Serge Bazanski42e61c62021-03-18 15:07:18 +010032 "fmt"
Leopold Schabela5545282021-12-04 23:29:44 +010033 "io"
Mateusz Zalega2930e992022-04-25 12:52:35 +020034 "net"
Leopold Schabela5545282021-12-04 23:29:44 +010035 "net/http"
Lorenz Brun764a2de2021-11-22 16:26:36 +010036 "os"
Mateusz Zalega2930e992022-04-25 12:52:35 +020037 "strings"
Serge Bazanskia959cbd2021-06-17 15:56:51 +020038 "sync"
Serge Bazanski42e61c62021-03-18 15:07:18 +010039
Leopold Schabela5545282021-12-04 23:29:44 +010040 "github.com/cenkalti/backoff/v4"
Serge Bazanskia959cbd2021-06-17 15:56:51 +020041 "google.golang.org/protobuf/proto"
42
Mateusz Zalega2930e992022-04-25 12:52:35 +020043 "source.monogon.dev/metropolis/node"
Serge Bazanskia959cbd2021-06-17 15:56:51 +020044 "source.monogon.dev/metropolis/node/core/consensus"
Mateusz Zalega2930e992022-04-25 12:52:35 +020045 "source.monogon.dev/metropolis/node/core/identity"
Serge Bazanskia959cbd2021-06-17 15:56:51 +020046 "source.monogon.dev/metropolis/node/core/localstorage"
47 "source.monogon.dev/metropolis/node/core/network"
Serge Bazanski6dff6d62022-01-28 18:15:14 +010048 "source.monogon.dev/metropolis/node/core/roleserve"
Serge Bazanskia959cbd2021-06-17 15:56:51 +020049 "source.monogon.dev/metropolis/pkg/event/memory"
50 "source.monogon.dev/metropolis/pkg/supervisor"
51 apb "source.monogon.dev/metropolis/proto/api"
Mateusz Zalega2930e992022-04-25 12:52:35 +020052 cpb "source.monogon.dev/metropolis/proto/common"
Serge Bazanskia959cbd2021-06-17 15:56:51 +020053 ppb "source.monogon.dev/metropolis/proto/private"
Serge Bazanski42e61c62021-03-18 15:07:18 +010054)
55
Serge Bazanskia959cbd2021-06-17 15:56:51 +020056type state struct {
57 mu sync.RWMutex
Serge Bazanski42e61c62021-03-18 15:07:18 +010058
Serge Bazanskia959cbd2021-06-17 15:56:51 +020059 oneway bool
Serge Bazanski42e61c62021-03-18 15:07:18 +010060
Serge Bazanskia959cbd2021-06-17 15:56:51 +020061 configuration *ppb.SealedConfiguration
Serge Bazanski42e61c62021-03-18 15:07:18 +010062}
63
Serge Bazanskia959cbd2021-06-17 15:56:51 +020064type Manager struct {
65 storageRoot *localstorage.Root
66 networkService *network.Service
Serge Bazanski6dff6d62022-01-28 18:15:14 +010067 roleServer *roleserve.Service
Serge Bazanskia959cbd2021-06-17 15:56:51 +020068 status memory.Value
69
70 state
71
72 // consensus is the spawned etcd/consensus service, if the Manager brought
73 // up a Node that should run one.
74 consensus *consensus.Service
75}
76
77// NewManager creates a new cluster Manager. The given localstorage Root must
78// be places, but not yet started (and will be started as the Manager makes
79// progress). The given network Service must already be running.
Serge Bazanski6dff6d62022-01-28 18:15:14 +010080func NewManager(storageRoot *localstorage.Root, networkService *network.Service, rs *roleserve.Service) *Manager {
Serge Bazanskia959cbd2021-06-17 15:56:51 +020081 return &Manager{
82 storageRoot: storageRoot,
83 networkService: networkService,
Serge Bazanski6dff6d62022-01-28 18:15:14 +010084 roleServer: rs,
Serge Bazanskia959cbd2021-06-17 15:56:51 +020085
86 state: state{},
87 }
88}
89
90func (m *Manager) lock() (*state, func()) {
91 m.mu.Lock()
92 return &m.state, m.mu.Unlock
93}
94
95func (m *Manager) rlock() (*state, func()) {
96 m.mu.RLock()
97 return &m.state, m.mu.RUnlock
98}
99
100// Run is the runnable of the Manager, to be started using the Supervisor. It
101// is one-shot, and should not be restarted.
102func (m *Manager) Run(ctx context.Context) error {
103 state, unlock := m.lock()
104 if state.oneway {
105 unlock()
106 // TODO(q3k): restart the entire system if this happens
107 return fmt.Errorf("cannot restart cluster manager")
108 }
109 state.oneway = true
110 unlock()
111
Lorenz Brun6c35e972021-12-14 03:08:23 +0100112 configuration, err := m.storageRoot.ESP.Metropolis.SealedConfiguration.Unseal()
Serge Bazanskia959cbd2021-06-17 15:56:51 +0200113 if err == nil {
114 supervisor.Logger(ctx).Info("Sealed configuration present. attempting to join cluster")
Mateusz Zalega2930e992022-04-25 12:52:35 +0200115
116 // Read Cluster Directory and unmarshal it. Since the node is already
117 // registered with the cluster, the directory won't be bootstrapped from
118 // Node Parameters.
119 cd, err := m.storageRoot.ESP.Metropolis.ClusterDirectory.Unmarshal()
120 if err != nil {
121 return fmt.Errorf("while reading cluster directory: %w", err)
122 }
123 return m.join(ctx, configuration, cd)
Serge Bazanskia959cbd2021-06-17 15:56:51 +0200124 }
125
126 if !errors.Is(err, localstorage.ErrNoSealed) {
127 return fmt.Errorf("unexpected sealed config error: %w", err)
128 }
129
130 supervisor.Logger(ctx).Info("No sealed configuration, looking for node parameters")
131
132 params, err := m.nodeParams(ctx)
133 if err != nil {
134 return fmt.Errorf("no parameters available: %w", err)
135 }
136
137 switch inner := params.Cluster.(type) {
138 case *apb.NodeParameters_ClusterBootstrap_:
Serge Bazanski5839e972021-11-16 15:46:19 +0100139 err = m.bootstrap(ctx, inner.ClusterBootstrap)
Serge Bazanskia959cbd2021-06-17 15:56:51 +0200140 case *apb.NodeParameters_ClusterRegister_:
Serge Bazanski5839e972021-11-16 15:46:19 +0100141 err = m.register(ctx, inner.ClusterRegister)
Serge Bazanskia959cbd2021-06-17 15:56:51 +0200142 default:
Serge Bazanski5839e972021-11-16 15:46:19 +0100143 err = fmt.Errorf("node parameters misconfigured: neither cluster_bootstrap nor cluster_register set")
Serge Bazanskia959cbd2021-06-17 15:56:51 +0200144 }
Serge Bazanski5839e972021-11-16 15:46:19 +0100145
146 if err == nil {
147 supervisor.Logger(ctx).Info("Cluster enrolment done.")
148 }
149 return err
Serge Bazanskia959cbd2021-06-17 15:56:51 +0200150}
151
Serge Bazanskia959cbd2021-06-17 15:56:51 +0200152func (m *Manager) nodeParamsFWCFG(ctx context.Context) (*apb.NodeParameters, error) {
Lorenz Brun764a2de2021-11-22 16:26:36 +0100153 bytes, err := os.ReadFile("/sys/firmware/qemu_fw_cfg/by_name/dev.monogon.metropolis/parameters.pb/raw")
Serge Bazanskia959cbd2021-06-17 15:56:51 +0200154 if err != nil {
155 return nil, fmt.Errorf("could not read firmware enrolment file: %w", err)
156 }
157
158 config := apb.NodeParameters{}
159 err = proto.Unmarshal(bytes, &config)
160 if err != nil {
161 return nil, fmt.Errorf("could not unmarshal: %v", err)
162 }
163
164 return &config, nil
165}
166
Leopold Schabela5545282021-12-04 23:29:44 +0100167// nodeParamsGCPMetadata attempts to retrieve the node parameters from the
168// GCP metadata service. Returns nil if the metadata service is available,
169// but no node parameters are specified.
170func (m *Manager) nodeParamsGCPMetadata(ctx context.Context) (*apb.NodeParameters, error) {
171 const metadataURL = "http://169.254.169.254/computeMetadata/v1/instance/attributes/metropolis-node-params"
172 req, err := http.NewRequestWithContext(ctx, "GET", metadataURL, nil)
173 if err != nil {
174 return nil, fmt.Errorf("could not create request: %w", err)
175 }
176 req.Header.Set("Metadata-Flavor", "Google")
177 resp, err := http.DefaultClient.Do(req)
178 if err != nil {
179 return nil, fmt.Errorf("HTTP request failed: %w", err)
180 }
181 defer resp.Body.Close()
182 if resp.StatusCode != http.StatusOK {
183 if resp.StatusCode == http.StatusNotFound {
184 return nil, nil
185 }
186 return nil, fmt.Errorf("non-200 status code: %d", resp.StatusCode)
187 }
188 decoded, err := io.ReadAll(base64.NewDecoder(base64.StdEncoding, resp.Body))
189 if err != nil {
190 return nil, fmt.Errorf("cannot decode base64: %w", err)
191 }
192 config := apb.NodeParameters{}
193 err = proto.Unmarshal(decoded, &config)
194 if err != nil {
195 return nil, fmt.Errorf("failed unmarshalling NodeParameters: %w", err)
196 }
197 return &config, nil
198}
199
Serge Bazanskia959cbd2021-06-17 15:56:51 +0200200func (m *Manager) nodeParams(ctx context.Context) (*apb.NodeParameters, error) {
Leopold Schabela5545282021-12-04 23:29:44 +0100201 boardName, err := getDMIBoardName()
202 if err != nil {
203 supervisor.Logger(ctx).Warningf("Could not get board name, cannot detect platform: %v", err)
204 }
205 supervisor.Logger(ctx).Infof("Board name: %q", boardName)
206
207 // When running on GCP, attempt to retrieve the node parameters from the
208 // metadata server first. Retry until we get a response, since we need to
209 // wait for the network service to assign an IP address first.
210 if isGCPInstance(boardName) {
211 var params *apb.NodeParameters
212 op := func() error {
213 supervisor.Logger(ctx).Info("Running on GCP, attempting to retrieve node parameters from metadata server")
214 params, err = m.nodeParamsGCPMetadata(ctx)
215 return err
216 }
217 err := backoff.Retry(op, backoff.WithContext(backoff.NewExponentialBackOff(), ctx))
218 if err != nil {
219 supervisor.Logger(ctx).Errorf("Failed to retrieve node parameters: %v", err)
220 }
221 if params != nil {
222 supervisor.Logger(ctx).Info("Retrieved parameters from GCP metadata server")
223 return params, nil
224 }
225 supervisor.Logger(ctx).Infof("\"metropolis-node-params\" metadata not found")
226 }
227
Serge Bazanskia959cbd2021-06-17 15:56:51 +0200228 // Retrieve node parameters from qemu's fwcfg interface or ESP.
229 // TODO(q3k): probably abstract this away and implement per platform/build/...
230 paramsFWCFG, err := m.nodeParamsFWCFG(ctx)
231 if err != nil {
232 supervisor.Logger(ctx).Warningf("Could not retrieve node parameters from qemu fwcfg: %v", err)
233 paramsFWCFG = nil
234 } else {
235 supervisor.Logger(ctx).Infof("Retrieved node parameters from qemu fwcfg")
236 }
Lorenz Brun6c35e972021-12-14 03:08:23 +0100237 paramsESP, err := m.storageRoot.ESP.Metropolis.NodeParameters.Unmarshal()
Serge Bazanskia959cbd2021-06-17 15:56:51 +0200238 if err != nil {
239 supervisor.Logger(ctx).Warningf("Could not retrieve node parameters from ESP: %v", err)
240 paramsESP = nil
241 } else {
242 supervisor.Logger(ctx).Infof("Retrieved node parameters from ESP")
243 }
244 if paramsFWCFG == nil && paramsESP == nil {
245 return nil, fmt.Errorf("could not find node parameters in ESP or qemu fwcfg")
246 }
247 if paramsFWCFG != nil && paramsESP != nil {
248 supervisor.Logger(ctx).Warningf("Node parameters found both in both ESP and qemu fwcfg, using the latter")
249 return paramsFWCFG, nil
250 } else if paramsFWCFG != nil {
251 return paramsFWCFG, nil
252 } else {
253 return paramsESP, nil
254 }
255}
256
Mateusz Zalega2930e992022-04-25 12:52:35 +0200257// logClusterDirectory verbosely logs the whole Cluster Directory passed to it.
258func logClusterDirectory(ctx context.Context, cd *cpb.ClusterDirectory) {
259 for _, node := range cd.Nodes {
260 id := identity.NodeID(node.PublicKey)
261 var addresses []string
262 for _, add := range node.Addresses {
263 addresses = append(addresses, add.Host)
264 }
265 supervisor.Logger(ctx).Infof(" Node ID: %s, Addresses: %s", id, strings.Join(addresses, ","))
266 }
267}
268
269// curatorRemote returns a host:port pair pointing at one of the cluster's
270// available Curator endpoints. It will return an empty string, and an error,
271// if the cluster directory is empty.
272// TODO(issues/117): use dynamic cluster client instead
273func curatorRemote(cd *cpb.ClusterDirectory) (string, error) {
274 if len(cd.Nodes) == 0 {
275 return "", fmt.Errorf("the Cluster Directory is empty.")
276 }
277 n := cd.Nodes[0]
278 if len(n.Addresses) == 0 {
279 return "", fmt.Errorf("the first node in the Cluster Directory doesn't have an associated Address.")
280 }
281 r := n.Addresses[0].Host
282 return net.JoinHostPort(r, node.CuratorServicePort.PortString()), nil
Serge Bazanskia959cbd2021-06-17 15:56:51 +0200283}