Serge Bazanski | 5df62ba | 2023-03-22 17:56:46 +0100 | [diff] [blame] | 1 | // Package roleserve implements the roleserver/“Role Server”. |
Serge Bazanski | 0d93777 | 2021-06-17 15:54:40 +0200 | [diff] [blame] | 2 | // |
Serge Bazanski | 6dff6d6 | 2022-01-28 18:15:14 +0100 | [diff] [blame] | 3 | // The Role Server runs on every node and is responsible for running all of the |
| 4 | // node's role dependant services, like the control plane (Consensus/etcd and |
| 5 | // Curator) and Kubernetes. It watches the node roles as assigned by the |
| 6 | // cluster's curator, updates the status of the node within the curator, and |
| 7 | // spawns on-demand services. |
Serge Bazanski | 0d93777 | 2021-06-17 15:54:40 +0200 | [diff] [blame] | 8 | // |
Serge Bazanski | 37110c3 | 2023-03-01 13:57:27 +0000 | [diff] [blame] | 9 | // .-----------. .--------. Watches .------------. |
| 10 | // | Cluster |--------->| Role |<----------| Node Roles | |
| 11 | // | Enrolment | Provides | Server | Updates '------------' |
| 12 | // '-----------' Data | |----. .-------------. |
| 13 | // '--------' '----->| Node Status | |
| 14 | // Spawns | | Spawns '-------------' |
| 15 | // .-----' '-----. |
| 16 | // V V |
| 17 | // .-----------. .------------. |
| 18 | // | Consensus | | Kubernetes | |
| 19 | // | & Curator | | | |
| 20 | // '-----------' '------------' |
Serge Bazanski | 6dff6d6 | 2022-01-28 18:15:14 +0100 | [diff] [blame] | 21 | // |
| 22 | // The internal state of the Role Server (eg. status of services, input from |
| 23 | // Cluster Enrolment, current node roles as retrieved from the cluster) is |
| 24 | // stored as in-memory Event Value variables, with some of them being exposed |
| 25 | // externally for other services to consume (ie. ones that wish to depend on |
| 26 | // some information managed by the Role Server but which do not need to be |
| 27 | // spawned on demand by the Role Server). These Event Values and code which acts |
| 28 | // upon them form a reactive/dataflow-driven model which drives the Role Server |
| 29 | // logic forward. |
| 30 | // |
| 31 | // The Role Server also has to handle the complex bootstrap problem involved in |
| 32 | // simultaneously accessing the control plane (for node roles and other cluster |
| 33 | // data) while maintaining (possibly the only one in the cluster) control plane |
Serge Bazanski | fe3d8fd | 2023-05-30 20:50:09 +0200 | [diff] [blame] | 34 | // instance. This problem is resolved by using the RPC resolver package which |
| 35 | // allows dynamic reconfiguration of endpoints as the cluster is running. |
Serge Bazanski | 0d93777 | 2021-06-17 15:54:40 +0200 | [diff] [blame] | 36 | package roleserve |
| 37 | |
| 38 | import ( |
| 39 | "context" |
Serge Bazanski | 6dff6d6 | 2022-01-28 18:15:14 +0100 | [diff] [blame] | 40 | "crypto/ed25519" |
Serge Bazanski | 0d93777 | 2021-06-17 15:54:40 +0200 | [diff] [blame] | 41 | |
Serge Bazanski | b43d0f0 | 2022-06-23 17:32:10 +0200 | [diff] [blame] | 42 | common "source.monogon.dev/metropolis/node" |
Serge Bazanski | 7920852 | 2023-03-28 20:14:58 +0200 | [diff] [blame] | 43 | "source.monogon.dev/metropolis/node/core/clusternet" |
Serge Bazanski | 5df62ba | 2023-03-22 17:56:46 +0100 | [diff] [blame] | 44 | "source.monogon.dev/metropolis/node/core/curator" |
Lorenz Brun | 1de8b18 | 2021-12-21 17:15:18 +0100 | [diff] [blame] | 45 | "source.monogon.dev/metropolis/node/core/identity" |
Serge Bazanski | 0d93777 | 2021-06-17 15:54:40 +0200 | [diff] [blame] | 46 | "source.monogon.dev/metropolis/node/core/localstorage" |
| 47 | "source.monogon.dev/metropolis/node/core/network" |
Serge Bazanski | b43d0f0 | 2022-06-23 17:32:10 +0200 | [diff] [blame] | 48 | "source.monogon.dev/metropolis/node/core/rpc/resolver" |
Lorenz Brun | 35fcf03 | 2023-06-29 04:15:58 +0200 | [diff] [blame] | 49 | "source.monogon.dev/metropolis/node/core/update" |
Serge Bazanski | 37110c3 | 2023-03-01 13:57:27 +0000 | [diff] [blame] | 50 | "source.monogon.dev/metropolis/pkg/event/memory" |
Serge Bazanski | e012b72 | 2023-03-29 17:49:04 +0200 | [diff] [blame] | 51 | "source.monogon.dev/metropolis/pkg/logtree" |
Serge Bazanski | 0d93777 | 2021-06-17 15:54:40 +0200 | [diff] [blame] | 52 | "source.monogon.dev/metropolis/pkg/supervisor" |
Serge Bazanski | 6dff6d6 | 2022-01-28 18:15:14 +0100 | [diff] [blame] | 53 | cpb "source.monogon.dev/metropolis/proto/common" |
Serge Bazanski | 0d93777 | 2021-06-17 15:54:40 +0200 | [diff] [blame] | 54 | ) |
| 55 | |
| 56 | // Config is the configuration of the role server. |
| 57 | type Config struct { |
Serge Bazanski | 0d93777 | 2021-06-17 15:54:40 +0200 | [diff] [blame] | 58 | // StorageRoot is a handle to access all of the Node's storage. This is needed |
| 59 | // as the roleserver spawns complex workloads like Kubernetes which need access |
| 60 | // to a broad range of storage. |
| 61 | StorageRoot *localstorage.Root |
| 62 | |
| 63 | // Network is a handle to the network service, used by workloads. |
| 64 | Network *network.Service |
Serge Bazanski | 58ddc09 | 2022-06-30 18:23:33 +0200 | [diff] [blame] | 65 | |
| 66 | // resolver is the main, long-lived, authenticated cluster resolver that is used |
| 67 | // for all subsequent gRPC calls by the subordinates of the roleserver. It is |
| 68 | // created early in the roleserver lifecycle, and is seeded with node |
Serge Bazanski | fe3d8fd | 2023-05-30 20:50:09 +0200 | [diff] [blame] | 69 | // information from the ProvideXXX methods. |
Serge Bazanski | 58ddc09 | 2022-06-30 18:23:33 +0200 | [diff] [blame] | 70 | Resolver *resolver.Resolver |
Serge Bazanski | e012b72 | 2023-03-29 17:49:04 +0200 | [diff] [blame] | 71 | |
Lorenz Brun | 35fcf03 | 2023-06-29 04:15:58 +0200 | [diff] [blame] | 72 | // Update is a handle to the update service, used by workloads. |
| 73 | Update *update.Service |
| 74 | |
Serge Bazanski | e012b72 | 2023-03-29 17:49:04 +0200 | [diff] [blame] | 75 | LogTree *logtree.LogTree |
Serge Bazanski | 0d93777 | 2021-06-17 15:54:40 +0200 | [diff] [blame] | 76 | } |
| 77 | |
| 78 | // Service is the roleserver/“Role Server” service. See the package-level |
| 79 | // documentation for more details. |
| 80 | type Service struct { |
| 81 | Config |
| 82 | |
Serge Bazanski | 1fb2b10 | 2023-04-06 10:13:46 +0200 | [diff] [blame] | 83 | KubernetesStatus memory.Value[*KubernetesStatus] |
| 84 | bootstrapData memory.Value[*bootstrapData] |
| 85 | localRoles memory.Value[*cpb.NodeRoles] |
| 86 | podNetwork memory.Value[*clusternet.Prefixes] |
| 87 | clusterDirectorySaved memory.Value[bool] |
Serge Bazanski | fe3d8fd | 2023-05-30 20:50:09 +0200 | [diff] [blame] | 88 | localControlPlane memory.Value[*localControlPlane] |
| 89 | CuratorConnection memory.Value[*curatorConnection] |
Serge Bazanski | 0d93777 | 2021-06-17 15:54:40 +0200 | [diff] [blame] | 90 | |
Serge Bazanski | 6dff6d6 | 2022-01-28 18:15:14 +0100 | [diff] [blame] | 91 | controlPlane *workerControlPlane |
| 92 | statusPush *workerStatusPush |
Mateusz Zalega | 32b1929 | 2022-05-17 13:26:55 +0200 | [diff] [blame] | 93 | heartbeat *workerHeartbeat |
Serge Bazanski | 6dff6d6 | 2022-01-28 18:15:14 +0100 | [diff] [blame] | 94 | kubernetes *workerKubernetes |
| 95 | rolefetch *workerRoleFetch |
Serge Bazanski | b40c008 | 2023-03-29 14:28:04 +0200 | [diff] [blame] | 96 | nodeMgmt *workerNodeMgmt |
Serge Bazanski | 7920852 | 2023-03-28 20:14:58 +0200 | [diff] [blame] | 97 | clusternet *workerClusternet |
Serge Bazanski | 1fb2b10 | 2023-04-06 10:13:46 +0200 | [diff] [blame] | 98 | hostsfile *workerHostsfile |
Serge Bazanski | 54e212a | 2023-06-14 13:45:11 +0200 | [diff] [blame] | 99 | metrics *workerMetrics |
Serge Bazanski | 0d93777 | 2021-06-17 15:54:40 +0200 | [diff] [blame] | 100 | } |
| 101 | |
| 102 | // New creates a Role Server services from a Config. |
| 103 | func New(c Config) *Service { |
Serge Bazanski | b43d0f0 | 2022-06-23 17:32:10 +0200 | [diff] [blame] | 104 | s := &Service{ |
Serge Bazanski | 58ddc09 | 2022-06-30 18:23:33 +0200 | [diff] [blame] | 105 | Config: c, |
Serge Bazanski | b43d0f0 | 2022-06-23 17:32:10 +0200 | [diff] [blame] | 106 | } |
Serge Bazanski | 6dff6d6 | 2022-01-28 18:15:14 +0100 | [diff] [blame] | 107 | s.controlPlane = &workerControlPlane{ |
| 108 | storageRoot: s.StorageRoot, |
| 109 | |
Serge Bazanski | fe3d8fd | 2023-05-30 20:50:09 +0200 | [diff] [blame] | 110 | bootstrapData: &s.bootstrapData, |
| 111 | localRoles: &s.localRoles, |
| 112 | resolver: s.Resolver, |
| 113 | |
| 114 | localControlPlane: &s.localControlPlane, |
| 115 | curatorConnection: &s.CuratorConnection, |
Serge Bazanski | 6dff6d6 | 2022-01-28 18:15:14 +0100 | [diff] [blame] | 116 | } |
| 117 | |
| 118 | s.statusPush = &workerStatusPush{ |
| 119 | network: s.Network, |
| 120 | |
Serge Bazanski | fe3d8fd | 2023-05-30 20:50:09 +0200 | [diff] [blame] | 121 | curatorConnection: &s.CuratorConnection, |
| 122 | localControlPlane: &s.localControlPlane, |
Serge Bazanski | 1fb2b10 | 2023-04-06 10:13:46 +0200 | [diff] [blame] | 123 | clusterDirectorySaved: &s.clusterDirectorySaved, |
Serge Bazanski | 6dff6d6 | 2022-01-28 18:15:14 +0100 | [diff] [blame] | 124 | } |
| 125 | |
Mateusz Zalega | 32b1929 | 2022-05-17 13:26:55 +0200 | [diff] [blame] | 126 | s.heartbeat = &workerHeartbeat{ |
| 127 | network: s.Network, |
| 128 | |
Serge Bazanski | fe3d8fd | 2023-05-30 20:50:09 +0200 | [diff] [blame] | 129 | curatorConnection: &s.CuratorConnection, |
Mateusz Zalega | 32b1929 | 2022-05-17 13:26:55 +0200 | [diff] [blame] | 130 | } |
| 131 | |
Serge Bazanski | 6dff6d6 | 2022-01-28 18:15:14 +0100 | [diff] [blame] | 132 | s.kubernetes = &workerKubernetes{ |
| 133 | network: s.Network, |
| 134 | storageRoot: s.StorageRoot, |
| 135 | |
| 136 | localRoles: &s.localRoles, |
Serge Bazanski | fe3d8fd | 2023-05-30 20:50:09 +0200 | [diff] [blame] | 137 | localControlPlane: &s.localControlPlane, |
| 138 | curatorConnection: &s.CuratorConnection, |
Serge Bazanski | 6dff6d6 | 2022-01-28 18:15:14 +0100 | [diff] [blame] | 139 | |
| 140 | kubernetesStatus: &s.KubernetesStatus, |
Serge Bazanski | 7920852 | 2023-03-28 20:14:58 +0200 | [diff] [blame] | 141 | podNetwork: &s.podNetwork, |
Serge Bazanski | 6dff6d6 | 2022-01-28 18:15:14 +0100 | [diff] [blame] | 142 | } |
| 143 | |
| 144 | s.rolefetch = &workerRoleFetch{ |
Serge Bazanski | 186109c | 2023-06-21 16:57:36 +0200 | [diff] [blame] | 145 | storageRoot: s.StorageRoot, |
Serge Bazanski | fe3d8fd | 2023-05-30 20:50:09 +0200 | [diff] [blame] | 146 | curatorConnection: &s.CuratorConnection, |
Serge Bazanski | 6dff6d6 | 2022-01-28 18:15:14 +0100 | [diff] [blame] | 147 | |
| 148 | localRoles: &s.localRoles, |
| 149 | } |
| 150 | |
Serge Bazanski | b40c008 | 2023-03-29 14:28:04 +0200 | [diff] [blame] | 151 | s.nodeMgmt = &workerNodeMgmt{ |
Serge Bazanski | fe3d8fd | 2023-05-30 20:50:09 +0200 | [diff] [blame] | 152 | curatorConnection: &s.CuratorConnection, |
Serge Bazanski | e012b72 | 2023-03-29 17:49:04 +0200 | [diff] [blame] | 153 | logTree: s.LogTree, |
Lorenz Brun | 35fcf03 | 2023-06-29 04:15:58 +0200 | [diff] [blame] | 154 | updateService: s.Update, |
Serge Bazanski | b40c008 | 2023-03-29 14:28:04 +0200 | [diff] [blame] | 155 | } |
Serge Bazanski | 1fb2b10 | 2023-04-06 10:13:46 +0200 | [diff] [blame] | 156 | |
Serge Bazanski | 7920852 | 2023-03-28 20:14:58 +0200 | [diff] [blame] | 157 | s.clusternet = &workerClusternet{ |
| 158 | storageRoot: s.StorageRoot, |
| 159 | |
Serge Bazanski | fe3d8fd | 2023-05-30 20:50:09 +0200 | [diff] [blame] | 160 | curatorConnection: &s.CuratorConnection, |
Serge Bazanski | 7920852 | 2023-03-28 20:14:58 +0200 | [diff] [blame] | 161 | podNetwork: &s.podNetwork, |
Serge Bazanski | b565cc6 | 2023-03-30 18:43:51 +0200 | [diff] [blame] | 162 | network: s.Network, |
Serge Bazanski | 7920852 | 2023-03-28 20:14:58 +0200 | [diff] [blame] | 163 | } |
Serge Bazanski | b40c008 | 2023-03-29 14:28:04 +0200 | [diff] [blame] | 164 | |
Serge Bazanski | 1fb2b10 | 2023-04-06 10:13:46 +0200 | [diff] [blame] | 165 | s.hostsfile = &workerHostsfile{ |
| 166 | storageRoot: s.StorageRoot, |
| 167 | network: s.Network, |
Serge Bazanski | fe3d8fd | 2023-05-30 20:50:09 +0200 | [diff] [blame] | 168 | curatorConnection: &s.CuratorConnection, |
Serge Bazanski | 1fb2b10 | 2023-04-06 10:13:46 +0200 | [diff] [blame] | 169 | clusterDirectorySaved: &s.clusterDirectorySaved, |
| 170 | } |
| 171 | |
Serge Bazanski | 54e212a | 2023-06-14 13:45:11 +0200 | [diff] [blame] | 172 | s.metrics = &workerMetrics{ |
| 173 | curatorConnection: &s.CuratorConnection, |
Tim Windelschmidt | b551b65 | 2023-07-17 16:01:42 +0200 | [diff] [blame] | 174 | localRoles: &s.localRoles, |
Tim Windelschmidt | fd49f22 | 2023-07-20 14:27:50 +0200 | [diff] [blame] | 175 | localControlplane: &s.localControlPlane, |
Serge Bazanski | 54e212a | 2023-06-14 13:45:11 +0200 | [diff] [blame] | 176 | } |
| 177 | |
Serge Bazanski | 6dff6d6 | 2022-01-28 18:15:14 +0100 | [diff] [blame] | 178 | return s |
Serge Bazanski | 0d93777 | 2021-06-17 15:54:40 +0200 | [diff] [blame] | 179 | } |
| 180 | |
Serge Bazanski | e4a4ce1 | 2023-03-22 18:29:54 +0100 | [diff] [blame] | 181 | func (s *Service) ProvideBootstrapData(privkey ed25519.PrivateKey, iok, cuk, nuk, jkey []byte, icc *curator.Cluster, tpmUsage cpb.NodeTPMUsage) { |
Serge Bazanski | b43d0f0 | 2022-06-23 17:32:10 +0200 | [diff] [blame] | 182 | pubkey := privkey.Public().(ed25519.PublicKey) |
| 183 | nid := identity.NodeID(pubkey) |
| 184 | |
| 185 | // This is the first time we have the node ID, tell the resolver that it's |
| 186 | // available on the loopback interface. |
Serge Bazanski | 58ddc09 | 2022-06-30 18:23:33 +0200 | [diff] [blame] | 187 | s.Resolver.AddOverride(nid, resolver.NodeByHostPort("127.0.0.1", uint16(common.CuratorServicePort))) |
Serge Bazanski | 90a70a0 | 2023-05-30 15:15:27 +0200 | [diff] [blame] | 188 | s.Resolver.AddEndpoint(resolver.NodeByHostPort("127.0.0.1", uint16(common.CuratorServicePort))) |
Serge Bazanski | b43d0f0 | 2022-06-23 17:32:10 +0200 | [diff] [blame] | 189 | |
Serge Bazanski | 37110c3 | 2023-03-01 13:57:27 +0000 | [diff] [blame] | 190 | s.bootstrapData.Set(&bootstrapData{ |
Serge Bazanski | 5df62ba | 2023-03-22 17:56:46 +0100 | [diff] [blame] | 191 | nodePrivateKey: privkey, |
| 192 | initialOwnerKey: iok, |
| 193 | clusterUnlockKey: cuk, |
| 194 | nodeUnlockKey: nuk, |
| 195 | nodePrivateJoinKey: jkey, |
| 196 | initialClusterConfiguration: icc, |
Serge Bazanski | e4a4ce1 | 2023-03-22 18:29:54 +0100 | [diff] [blame] | 197 | nodeTPMUsage: tpmUsage, |
Serge Bazanski | 6dff6d6 | 2022-01-28 18:15:14 +0100 | [diff] [blame] | 198 | }) |
Serge Bazanski | 0d93777 | 2021-06-17 15:54:40 +0200 | [diff] [blame] | 199 | } |
| 200 | |
Serge Bazanski | 6dff6d6 | 2022-01-28 18:15:14 +0100 | [diff] [blame] | 201 | func (s *Service) ProvideRegisterData(credentials identity.NodeCredentials, directory *cpb.ClusterDirectory) { |
Serge Bazanski | b43d0f0 | 2022-06-23 17:32:10 +0200 | [diff] [blame] | 202 | // This is the first time we have the node ID, tell the resolver that it's |
| 203 | // available on the loopback interface. |
Serge Bazanski | 58ddc09 | 2022-06-30 18:23:33 +0200 | [diff] [blame] | 204 | s.Resolver.AddOverride(credentials.ID(), resolver.NodeByHostPort("127.0.0.1", uint16(common.CuratorServicePort))) |
Serge Bazanski | 90a70a0 | 2023-05-30 15:15:27 +0200 | [diff] [blame] | 205 | // Also tell the resolver about all the existing nodes in the cluster we just |
Serge Bazanski | fe3d8fd | 2023-05-30 20:50:09 +0200 | [diff] [blame] | 206 | // registered into. The directory passed here was used to issue the initial |
| 207 | // Register call, which means at least one of the nodes was running the control |
| 208 | // plane and thus can be used to seed the rest of the resolver. |
Serge Bazanski | 90a70a0 | 2023-05-30 15:15:27 +0200 | [diff] [blame] | 209 | for _, n := range directory.Nodes { |
Serge Bazanski | 90a70a0 | 2023-05-30 15:15:27 +0200 | [diff] [blame] | 210 | for _, addr := range n.Addresses { |
Serge Bazanski | fe3d8fd | 2023-05-30 20:50:09 +0200 | [diff] [blame] | 211 | s.Resolver.AddEndpoint(resolver.NodeAtAddressWithDefaultPort(addr.Host)) |
Serge Bazanski | 90a70a0 | 2023-05-30 15:15:27 +0200 | [diff] [blame] | 212 | } |
| 213 | } |
Serge Bazanski | b43d0f0 | 2022-06-23 17:32:10 +0200 | [diff] [blame] | 214 | |
Serge Bazanski | fe3d8fd | 2023-05-30 20:50:09 +0200 | [diff] [blame] | 215 | s.CuratorConnection.Set(newCuratorConnection(&credentials, s.Resolver)) |
Serge Bazanski | 0d93777 | 2021-06-17 15:54:40 +0200 | [diff] [blame] | 216 | } |
| 217 | |
Mateusz Zalega | 2930e99 | 2022-04-25 12:52:35 +0200 | [diff] [blame] | 218 | func (s *Service) ProvideJoinData(credentials identity.NodeCredentials, directory *cpb.ClusterDirectory) { |
Serge Bazanski | b43d0f0 | 2022-06-23 17:32:10 +0200 | [diff] [blame] | 219 | // This is the first time we have the node ID, tell the resolver that it's |
| 220 | // available on the loopback interface. |
Serge Bazanski | 58ddc09 | 2022-06-30 18:23:33 +0200 | [diff] [blame] | 221 | s.Resolver.AddOverride(credentials.ID(), resolver.NodeByHostPort("127.0.0.1", uint16(common.CuratorServicePort))) |
Serge Bazanski | 90a70a0 | 2023-05-30 15:15:27 +0200 | [diff] [blame] | 222 | // Also tell the resolver about all the existing nodes in the cluster we just |
Serge Bazanski | fe3d8fd | 2023-05-30 20:50:09 +0200 | [diff] [blame] | 223 | // joined into. The directory passed here was used to issue the initial |
| 224 | // Join call, which means at least one of the nodes was running the control |
| 225 | // plane and thus can be used to seed the rest of the resolver. |
Serge Bazanski | 90a70a0 | 2023-05-30 15:15:27 +0200 | [diff] [blame] | 226 | for _, n := range directory.Nodes { |
Serge Bazanski | 90a70a0 | 2023-05-30 15:15:27 +0200 | [diff] [blame] | 227 | for _, addr := range n.Addresses { |
Serge Bazanski | fe3d8fd | 2023-05-30 20:50:09 +0200 | [diff] [blame] | 228 | s.Resolver.AddEndpoint(resolver.NodeAtAddressWithDefaultPort(addr.Host)) |
Serge Bazanski | 90a70a0 | 2023-05-30 15:15:27 +0200 | [diff] [blame] | 229 | } |
| 230 | } |
Serge Bazanski | b43d0f0 | 2022-06-23 17:32:10 +0200 | [diff] [blame] | 231 | |
Serge Bazanski | fe3d8fd | 2023-05-30 20:50:09 +0200 | [diff] [blame] | 232 | s.CuratorConnection.Set(newCuratorConnection(&credentials, s.Resolver)) |
Serge Bazanski | 1fb2b10 | 2023-04-06 10:13:46 +0200 | [diff] [blame] | 233 | s.clusterDirectorySaved.Set(true) |
Mateusz Zalega | 2930e99 | 2022-04-25 12:52:35 +0200 | [diff] [blame] | 234 | } |
| 235 | |
Serge Bazanski | 0d93777 | 2021-06-17 15:54:40 +0200 | [diff] [blame] | 236 | // Run the Role Server service, which uses intermediary workload launchers to |
| 237 | // start/stop subordinate services as the Node's roles change. |
| 238 | func (s *Service) Run(ctx context.Context) error { |
Serge Bazanski | 6dff6d6 | 2022-01-28 18:15:14 +0100 | [diff] [blame] | 239 | supervisor.Run(ctx, "controlplane", s.controlPlane.run) |
| 240 | supervisor.Run(ctx, "kubernetes", s.kubernetes.run) |
| 241 | supervisor.Run(ctx, "statuspush", s.statusPush.run) |
Mateusz Zalega | 32b1929 | 2022-05-17 13:26:55 +0200 | [diff] [blame] | 242 | supervisor.Run(ctx, "heartbeat", s.heartbeat.run) |
Serge Bazanski | 6dff6d6 | 2022-01-28 18:15:14 +0100 | [diff] [blame] | 243 | supervisor.Run(ctx, "rolefetch", s.rolefetch.run) |
Serge Bazanski | b40c008 | 2023-03-29 14:28:04 +0200 | [diff] [blame] | 244 | supervisor.Run(ctx, "nodemgmt", s.nodeMgmt.run) |
Serge Bazanski | 7920852 | 2023-03-28 20:14:58 +0200 | [diff] [blame] | 245 | supervisor.Run(ctx, "clusternet", s.clusternet.run) |
Serge Bazanski | 1fb2b10 | 2023-04-06 10:13:46 +0200 | [diff] [blame] | 246 | supervisor.Run(ctx, "hostsfile", s.hostsfile.run) |
Serge Bazanski | 54e212a | 2023-06-14 13:45:11 +0200 | [diff] [blame] | 247 | supervisor.Run(ctx, "metrics", s.metrics.run) |
Serge Bazanski | 0d93777 | 2021-06-17 15:54:40 +0200 | [diff] [blame] | 248 | supervisor.Signal(ctx, supervisor.SignalHealthy) |
| 249 | |
Serge Bazanski | 6dff6d6 | 2022-01-28 18:15:14 +0100 | [diff] [blame] | 250 | <-ctx.Done() |
| 251 | return ctx.Err() |
Serge Bazanski | 0d93777 | 2021-06-17 15:54:40 +0200 | [diff] [blame] | 252 | } |