blob: dc2de00e8a3a2351df260fe75077309fac383724 [file] [log] [blame]
Serge Bazanski93d593b2023-03-28 16:43:47 +02001// Package clusternet implements a Cluster Networking mesh service running on all
2// Metropolis nodes.
3//
4// The mesh is based on wireguard and a centralized configuration store in the
5// cluster Curator (in etcd).
6//
7// While the implementation is nearly generic, it currently makes an assumption
8// that it is used only for Kubernetes pod networking. That has a few
9// implications:
10//
11// First, we only have a single real route on the host into the wireguard
12// networking mesh / interface, and that is configured ahead of time in the
13// Service as ClusterNet. All destination addresses that should be carried by the
14// mesh must thus be part of this single route. Otherwise, traffic will be able
15// to flow into the node from other nodes, but will exit through another
16// interface. This is used in practice to allow other host nodes (whose external
17// addresses are outside the cluster network) to access the cluster network.
18//
Serge Bazanskib565cc62023-03-30 18:43:51 +020019// Second, we have two hardcoded/purpose-specific sources of prefixes:
20// 1. Pod networking node prefixes from the kubelet
21// 2. The host's external IP address (as a /32) from the network service.
Serge Bazanski93d593b2023-03-28 16:43:47 +020022package clusternet
23
24import (
25 "context"
26 "fmt"
27 "net"
Serge Bazanskib565cc62023-03-30 18:43:51 +020028 "net/netip"
Serge Bazanski93d593b2023-03-28 16:43:47 +020029
30 "github.com/cenkalti/backoff/v4"
31
Serge Bazanski93d593b2023-03-28 16:43:47 +020032 "source.monogon.dev/metropolis/node/core/localstorage"
Serge Bazanskib565cc62023-03-30 18:43:51 +020033 "source.monogon.dev/metropolis/node/core/network"
Serge Bazanski93d593b2023-03-28 16:43:47 +020034 "source.monogon.dev/metropolis/pkg/event"
35 "source.monogon.dev/metropolis/pkg/supervisor"
Serge Bazanskib565cc62023-03-30 18:43:51 +020036
37 apb "source.monogon.dev/metropolis/node/core/curator/proto/api"
Serge Bazanski93d593b2023-03-28 16:43:47 +020038 cpb "source.monogon.dev/metropolis/proto/common"
39)
40
41// Service implements the Cluster Networking Mesh. See package-level docs for
42// more details.
43type Service struct {
44 // Curator is the gRPC client that the service will use to reach the cluster's
45 // Curator, for pushing locally announced prefixes and pulling information about
46 // other nodes.
47 Curator apb.CuratorClient
48 // ClusterNet is the prefix that will be programmed to exit through the wireguard
49 // mesh.
50 ClusterNet net.IPNet
51 // DataDirectory is where the WireGuard key of this node will be stored.
52 DataDirectory *localstorage.DataKubernetesClusterNetworkingDirectory
53 // LocalKubernetesPodNetwork is an event.Value watched for prefixes that should
54 // be announced into the mesh. This is to be Set by the Kubernetes service once
55 // it knows about the local node's IPAM address assignment.
56 LocalKubernetesPodNetwork event.Value[*Prefixes]
Serge Bazanskib565cc62023-03-30 18:43:51 +020057 // Network service used to get the local node's IP address to submit it as a /32.
58 Network event.Value[*network.Status]
Serge Bazanski93d593b2023-03-28 16:43:47 +020059
60 // wg is the interface to all the low-level interactions with WireGuard (and
61 // kernel routing). If not set, this defaults to a production implementation.
62 // This can be overridden by test to a test implementation instead.
63 wg wireguard
64}
65
66// Run the Service. This must be used in a supervisor Runnable.
67func (s *Service) Run(ctx context.Context) error {
68 if s.wg == nil {
69 s.wg = &localWireguard{}
70 }
71 if err := s.wg.ensureOnDiskKey(s.DataDirectory); err != nil {
72 return fmt.Errorf("could not ensure wireguard key: %w", err)
73 }
74 if err := s.wg.setup(&s.ClusterNet); err != nil {
75 return fmt.Errorf("could not setup wireguard: %w", err)
76 }
77
78 supervisor.Logger(ctx).Infof("Wireguard setup complete, starting updaters...")
79
Serge Bazanskib565cc62023-03-30 18:43:51 +020080 kubeC := make(chan *Prefixes)
81 netC := make(chan *network.Status)
82 if err := supervisor.RunGroup(ctx, map[string]supervisor.Runnable{
83 "source-kubernetes": event.Pipe(s.LocalKubernetesPodNetwork, kubeC),
84 "source-network": event.Pipe(s.Network, netC),
85 "push": func(ctx context.Context) error {
86 return s.push(ctx, kubeC, netC)
87 },
88 }); err != nil {
Serge Bazanski93d593b2023-03-28 16:43:47 +020089 return err
90 }
Serge Bazanskib565cc62023-03-30 18:43:51 +020091
92 if err := supervisor.Run(ctx, "pull", s.pull); err != nil {
Serge Bazanski93d593b2023-03-28 16:43:47 +020093 return err
94 }
95 supervisor.Signal(ctx, supervisor.SignalHealthy)
96 <-ctx.Done()
97 return ctx.Err()
98}
99
100// push is the sub-runnable responsible for letting the Curator know about what
101// prefixes that are originated by this node.
Serge Bazanskib565cc62023-03-30 18:43:51 +0200102func (s *Service) push(ctx context.Context, kubeC chan *Prefixes, netC chan *network.Status) error {
Serge Bazanski93d593b2023-03-28 16:43:47 +0200103 supervisor.Signal(ctx, supervisor.SignalHealthy)
104
Serge Bazanskib565cc62023-03-30 18:43:51 +0200105 var kubePrefixes *Prefixes
106 var localAddr net.IP
Serge Bazanski93d593b2023-03-28 16:43:47 +0200107 for {
Serge Bazanskib565cc62023-03-30 18:43:51 +0200108 select {
109 case <-ctx.Done():
110 return ctx.Err()
111 case kubePrefixes = <-kubeC:
112 case n := <-netC:
113 localAddr = n.ExternalAddress
Serge Bazanski93d593b2023-03-28 16:43:47 +0200114 }
115
Serge Bazanskib565cc62023-03-30 18:43:51 +0200116 // Prepare prefixes to submit to cluster.
117 var prefixes Prefixes
118
119 // Do we have a local node address? Add it to the prefixes.
120 if len(localAddr) > 0 {
121 addr, ok := netip.AddrFromSlice(localAddr)
122 if ok {
123 prefixes = append(prefixes, netip.PrefixFrom(addr, 32))
124 }
125 }
126 // Do we have any kubelet prefixes? Add them, too.
127 if kubePrefixes != nil {
128 prefixes.Update(kubePrefixes)
129 }
130
131 supervisor.Logger(ctx).Infof("Submitting prefixes: %s", prefixes)
132
133 err := backoff.Retry(func() error {
Serge Bazanski93d593b2023-03-28 16:43:47 +0200134 _, err := s.Curator.UpdateNodeClusterNetworking(ctx, &apb.UpdateNodeClusterNetworkingRequest{
135 Clusternet: &cpb.NodeClusterNetworking{
136 WireguardPubkey: s.wg.key().PublicKey().String(),
Serge Bazanskib565cc62023-03-30 18:43:51 +0200137 Prefixes: prefixes.proto(),
Serge Bazanski93d593b2023-03-28 16:43:47 +0200138 },
139 })
140 if err != nil {
141 supervisor.Logger(ctx).Warningf("Could not submit cluster networking update: %v", err)
142 }
143 return err
144 }, backoff.WithContext(backoff.NewExponentialBackOff(), ctx))
145 if err != nil {
146 return fmt.Errorf("couldn't update curator: %w", err)
147 }
148 }
149}
150
151// pull is the sub-runnable responsible for fetching information about the
152// cluster networking setup/status of other nodes, and programming it as
153// WireGuard peers.
154func (s *Service) pull(ctx context.Context) error {
155 supervisor.Signal(ctx, supervisor.SignalHealthy)
156
157 srv, err := s.Curator.Watch(ctx, &apb.WatchRequest{
158 Kind: &apb.WatchRequest_NodesInCluster_{
159 NodesInCluster: &apb.WatchRequest_NodesInCluster{},
160 },
161 })
162 if err != nil {
163 return fmt.Errorf("curator watch failed: %w", err)
164 }
165 defer srv.CloseSend()
166
167 nodes := newNodemap()
168 for {
169 ev, err := srv.Recv()
170 if err != nil {
171 return fmt.Errorf("curator watch recv failed: %w", err)
172 }
173
174 updated, removed := nodes.update(ctx, ev)
175
176 for _, n := range removed {
177 supervisor.Logger(ctx).Infof("Node %s removed, unconfiguring", n.id)
178 if err := s.wg.unconfigurePeer(n); err != nil {
179 // Do nothing and hope whatever caused this will go away at some point.
180 supervisor.Logger(ctx).Errorf("Node %s couldn't be unconfigured: %v", n.id, err)
181 }
182 }
183 var newNodes []*node
184 for _, n := range updated {
185 newNodes = append(newNodes, n)
186 supervisor.Logger(ctx).Infof("Node %s updated: pk %s, address %s, prefixes %v", n.id, n.pubkey, n.address, n.prefixes)
187 }
188 succeeded := 0
189 if err := s.wg.configurePeers(newNodes); err != nil {
190 // If configuring all nodes at once failed, go node-by-node to make sure we've
191 // done as much as possible.
192 supervisor.Logger(ctx).Warningf("Bulk node update call failed, trying node-by-node..: %v", err)
193 for _, n := range newNodes {
194 if err := s.wg.configurePeers([]*node{n}); err != nil {
195 supervisor.Logger(ctx).Errorf("Node %s failed: %v", n.id, err)
196 } else {
197 succeeded += 1
198 }
199 }
200 } else {
201 succeeded = len(newNodes)
202 }
203 supervisor.Logger(ctx).Infof("Successfully updated %d out of %d nodes", succeeded, len(newNodes))
204 }
205}