Serge Bazanski | 93d593b | 2023-03-28 16:43:47 +0200 | [diff] [blame] | 1 | // Package clusternet implements a Cluster Networking mesh service running on all |
| 2 | // Metropolis nodes. |
| 3 | // |
| 4 | // The mesh is based on wireguard and a centralized configuration store in the |
| 5 | // cluster Curator (in etcd). |
| 6 | // |
| 7 | // While the implementation is nearly generic, it currently makes an assumption |
| 8 | // that it is used only for Kubernetes pod networking. That has a few |
| 9 | // implications: |
| 10 | // |
| 11 | // First, we only have a single real route on the host into the wireguard |
| 12 | // networking mesh / interface, and that is configured ahead of time in the |
| 13 | // Service as ClusterNet. All destination addresses that should be carried by the |
| 14 | // mesh must thus be part of this single route. Otherwise, traffic will be able |
| 15 | // to flow into the node from other nodes, but will exit through another |
| 16 | // interface. This is used in practice to allow other host nodes (whose external |
| 17 | // addresses are outside the cluster network) to access the cluster network. |
| 18 | // |
| 19 | // Second, we only have a single source/owner of prefixes per node: the |
| 20 | // Kubernetes service. This is reflected as the LocalKubernetesPodNetwork event |
| 21 | // Value in Service. |
| 22 | package clusternet |
| 23 | |
| 24 | import ( |
| 25 | "context" |
| 26 | "fmt" |
| 27 | "net" |
| 28 | |
| 29 | "github.com/cenkalti/backoff/v4" |
| 30 | |
| 31 | apb "source.monogon.dev/metropolis/node/core/curator/proto/api" |
| 32 | "source.monogon.dev/metropolis/node/core/localstorage" |
| 33 | "source.monogon.dev/metropolis/pkg/event" |
| 34 | "source.monogon.dev/metropolis/pkg/supervisor" |
| 35 | cpb "source.monogon.dev/metropolis/proto/common" |
| 36 | ) |
| 37 | |
| 38 | // Service implements the Cluster Networking Mesh. See package-level docs for |
| 39 | // more details. |
| 40 | type Service struct { |
| 41 | // Curator is the gRPC client that the service will use to reach the cluster's |
| 42 | // Curator, for pushing locally announced prefixes and pulling information about |
| 43 | // other nodes. |
| 44 | Curator apb.CuratorClient |
| 45 | // ClusterNet is the prefix that will be programmed to exit through the wireguard |
| 46 | // mesh. |
| 47 | ClusterNet net.IPNet |
| 48 | // DataDirectory is where the WireGuard key of this node will be stored. |
| 49 | DataDirectory *localstorage.DataKubernetesClusterNetworkingDirectory |
| 50 | // LocalKubernetesPodNetwork is an event.Value watched for prefixes that should |
| 51 | // be announced into the mesh. This is to be Set by the Kubernetes service once |
| 52 | // it knows about the local node's IPAM address assignment. |
| 53 | LocalKubernetesPodNetwork event.Value[*Prefixes] |
| 54 | |
| 55 | // wg is the interface to all the low-level interactions with WireGuard (and |
| 56 | // kernel routing). If not set, this defaults to a production implementation. |
| 57 | // This can be overridden by test to a test implementation instead. |
| 58 | wg wireguard |
| 59 | } |
| 60 | |
| 61 | // Run the Service. This must be used in a supervisor Runnable. |
| 62 | func (s *Service) Run(ctx context.Context) error { |
| 63 | if s.wg == nil { |
| 64 | s.wg = &localWireguard{} |
| 65 | } |
| 66 | if err := s.wg.ensureOnDiskKey(s.DataDirectory); err != nil { |
| 67 | return fmt.Errorf("could not ensure wireguard key: %w", err) |
| 68 | } |
| 69 | if err := s.wg.setup(&s.ClusterNet); err != nil { |
| 70 | return fmt.Errorf("could not setup wireguard: %w", err) |
| 71 | } |
| 72 | |
| 73 | supervisor.Logger(ctx).Infof("Wireguard setup complete, starting updaters...") |
| 74 | |
| 75 | if err := supervisor.Run(ctx, "pusher", s.push); err != nil { |
| 76 | return err |
| 77 | } |
| 78 | if err := supervisor.Run(ctx, "puller", s.pull); err != nil { |
| 79 | return err |
| 80 | } |
| 81 | supervisor.Signal(ctx, supervisor.SignalHealthy) |
| 82 | <-ctx.Done() |
| 83 | return ctx.Err() |
| 84 | } |
| 85 | |
| 86 | // push is the sub-runnable responsible for letting the Curator know about what |
| 87 | // prefixes that are originated by this node. |
| 88 | func (s *Service) push(ctx context.Context) error { |
| 89 | supervisor.Signal(ctx, supervisor.SignalHealthy) |
| 90 | |
| 91 | w := s.LocalKubernetesPodNetwork.Watch() |
| 92 | defer w.Close() |
| 93 | |
| 94 | for { |
| 95 | // We only submit our wireguard key and prefixes when we're actually ready to |
| 96 | // announce something. |
| 97 | k8sPrefixes, err := w.Get(ctx) |
| 98 | if err != nil { |
| 99 | return fmt.Errorf("couldn't get k8s prefixes: %w", err) |
| 100 | } |
| 101 | |
| 102 | err = backoff.Retry(func() error { |
| 103 | _, err := s.Curator.UpdateNodeClusterNetworking(ctx, &apb.UpdateNodeClusterNetworkingRequest{ |
| 104 | Clusternet: &cpb.NodeClusterNetworking{ |
| 105 | WireguardPubkey: s.wg.key().PublicKey().String(), |
| 106 | Prefixes: k8sPrefixes.proto(), |
| 107 | }, |
| 108 | }) |
| 109 | if err != nil { |
| 110 | supervisor.Logger(ctx).Warningf("Could not submit cluster networking update: %v", err) |
| 111 | } |
| 112 | return err |
| 113 | }, backoff.WithContext(backoff.NewExponentialBackOff(), ctx)) |
| 114 | if err != nil { |
| 115 | return fmt.Errorf("couldn't update curator: %w", err) |
| 116 | } |
| 117 | } |
| 118 | } |
| 119 | |
| 120 | // pull is the sub-runnable responsible for fetching information about the |
| 121 | // cluster networking setup/status of other nodes, and programming it as |
| 122 | // WireGuard peers. |
| 123 | func (s *Service) pull(ctx context.Context) error { |
| 124 | supervisor.Signal(ctx, supervisor.SignalHealthy) |
| 125 | |
| 126 | srv, err := s.Curator.Watch(ctx, &apb.WatchRequest{ |
| 127 | Kind: &apb.WatchRequest_NodesInCluster_{ |
| 128 | NodesInCluster: &apb.WatchRequest_NodesInCluster{}, |
| 129 | }, |
| 130 | }) |
| 131 | if err != nil { |
| 132 | return fmt.Errorf("curator watch failed: %w", err) |
| 133 | } |
| 134 | defer srv.CloseSend() |
| 135 | |
| 136 | nodes := newNodemap() |
| 137 | for { |
| 138 | ev, err := srv.Recv() |
| 139 | if err != nil { |
| 140 | return fmt.Errorf("curator watch recv failed: %w", err) |
| 141 | } |
| 142 | |
| 143 | updated, removed := nodes.update(ctx, ev) |
| 144 | |
| 145 | for _, n := range removed { |
| 146 | supervisor.Logger(ctx).Infof("Node %s removed, unconfiguring", n.id) |
| 147 | if err := s.wg.unconfigurePeer(n); err != nil { |
| 148 | // Do nothing and hope whatever caused this will go away at some point. |
| 149 | supervisor.Logger(ctx).Errorf("Node %s couldn't be unconfigured: %v", n.id, err) |
| 150 | } |
| 151 | } |
| 152 | var newNodes []*node |
| 153 | for _, n := range updated { |
| 154 | newNodes = append(newNodes, n) |
| 155 | supervisor.Logger(ctx).Infof("Node %s updated: pk %s, address %s, prefixes %v", n.id, n.pubkey, n.address, n.prefixes) |
| 156 | } |
| 157 | succeeded := 0 |
| 158 | if err := s.wg.configurePeers(newNodes); err != nil { |
| 159 | // If configuring all nodes at once failed, go node-by-node to make sure we've |
| 160 | // done as much as possible. |
| 161 | supervisor.Logger(ctx).Warningf("Bulk node update call failed, trying node-by-node..: %v", err) |
| 162 | for _, n := range newNodes { |
| 163 | if err := s.wg.configurePeers([]*node{n}); err != nil { |
| 164 | supervisor.Logger(ctx).Errorf("Node %s failed: %v", n.id, err) |
| 165 | } else { |
| 166 | succeeded += 1 |
| 167 | } |
| 168 | } |
| 169 | } else { |
| 170 | succeeded = len(newNodes) |
| 171 | } |
| 172 | supervisor.Logger(ctx).Infof("Successfully updated %d out of %d nodes", succeeded, len(newNodes)) |
| 173 | } |
| 174 | } |