m/n/kubernetes: start splitting, run apiproxy

This begins the process to split the Kubernetes service into a
controller and a worker service.

First, we rename the existing service to a Controller, create a Worker
service, and make the Worker service run our new tinylb-based apiserver
loadbalancer.

We also make the roleserver aware of this change by making it spawn both
the controller and worker services according to roles.

We will move services to the Worker in follow up change requests.

Change-Id: I76e98baa0603ad5df30b5892dd69154b895b35fa
Reviewed-on: https://review.monogon.dev/c/monogon/+/1374
Reviewed-by: Lorenz Brun <lorenz@monogon.tech>
Tested-by: Jenkins CI
diff --git a/metropolis/node/kubernetes/apiproxy.go b/metropolis/node/kubernetes/apiproxy.go
new file mode 100644
index 0000000..9f9c851
--- /dev/null
+++ b/metropolis/node/kubernetes/apiproxy.go
@@ -0,0 +1,52 @@
+package kubernetes
+
+import (
+	"context"
+	"fmt"
+	"net"
+
+	"source.monogon.dev/go/net/tinylb"
+	"source.monogon.dev/metropolis/node"
+	ipb "source.monogon.dev/metropolis/node/core/curator/proto/api"
+	"source.monogon.dev/metropolis/pkg/event/memory"
+)
+
+// updateLoadBalancerAPIServers provides a tinylb BackendSet memory value with
+// the currently known nodes running a Kubernetes apiserver as retrieved from the
+// given curator client.
+func updateLoadbalancerAPIServers(ctx context.Context, val *memory.Value[tinylb.BackendSet], cur ipb.CuratorClient) error {
+	w, err := cur.Watch(ctx, &ipb.WatchRequest{
+		Kind: &ipb.WatchRequest_NodesInCluster_{
+			NodesInCluster: &ipb.WatchRequest_NodesInCluster{},
+		},
+	})
+	if err != nil {
+		return fmt.Errorf("watch failed: %w", err)
+	}
+	defer w.CloseSend()
+
+	set := &tinylb.BackendSet{}
+	val.Set(set.Clone())
+	for {
+		ev, err := w.Recv()
+		if err != nil {
+			return fmt.Errorf("receive failed: %w", err)
+		}
+
+		for _, n := range ev.Nodes {
+			if n.Status == nil || n.Status.ExternalAddress == "" {
+				set.Delete(n.Id)
+				continue
+			}
+			if n.Roles.KubernetesController == nil {
+				set.Delete(n.Id)
+				continue
+			}
+			set.Insert(n.Id, &tinylb.SimpleTCPBackend{Remote: net.JoinHostPort(n.Status.ExternalAddress, node.KubernetesAPIPort.PortString())})
+		}
+		for _, t := range ev.NodeTombstones {
+			set.Delete(t.NodeId)
+		}
+		val.Set(set.Clone())
+	}
+}