m/test: implement SOCKS proxy in cluster tests

This uses the new socksproxy package to run a proxy server in the
nanoswitch, and uses it within tests to access the test cluster's nodes.

The cluster test code (and nanoswitch) still forward traffic to the
first node, but this will be gradually removed as SOCKS support is
implemented in metroctl and the debug tool. Forwards from host ports to
different node can then be implemented as part of the dbg tool (instead
of the cluster launch code) to maintain a simple interface during debug
and development.

We also use the opportunity to make the non-cluster launch code not
Metropolis specific (by removing an assumption that all ports on all
nodes are Metropolis ports). In the long term, we will probably remove
non-cluster launches entirely (or further turn this code into just being
a 'launch qemu' wrapper).

Change-Id: I9b321bde95ba74fbfaa695eaaad8f9974aba5372
Reviewed-on: https://review.monogon.dev/c/monogon/+/648
Reviewed-by: Lorenz Brun <lorenz@monogon.tech>
diff --git a/metropolis/test/e2e/main_test.go b/metropolis/test/e2e/main_test.go
index f7dfff8..51dbe4b 100644
--- a/metropolis/test/e2e/main_test.go
+++ b/metropolis/test/e2e/main_test.go
@@ -30,6 +30,7 @@
 	"testing"
 	"time"
 
+	"google.golang.org/grpc"
 	corev1 "k8s.io/api/core/v1"
 	"k8s.io/apimachinery/pkg/api/resource"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -37,6 +38,7 @@
 
 	common "source.monogon.dev/metropolis/node"
 	"source.monogon.dev/metropolis/node/core/identity"
+	"source.monogon.dev/metropolis/node/core/rpc"
 	apb "source.monogon.dev/metropolis/proto/api"
 	"source.monogon.dev/metropolis/test/launch/cluster"
 )
@@ -96,13 +98,23 @@
 
 	log.Printf("E2E: Cluster running, starting tests...")
 
+	// Dial first node's curator.
+	creds := rpc.NewAuthenticatedCredentials(cluster.Owner, nil)
+	remote := net.JoinHostPort(cluster.NodeIDs[0], common.CuratorServicePort.PortString())
+	cl, err := grpc.Dial(remote, grpc.WithContextDialer(cluster.DialNode), grpc.WithTransportCredentials(creds))
+	if err != nil {
+		t.Fatalf("failed to dial first node's curator: %v", err)
+	}
+	defer cl.Close()
+	mgmt := apb.NewManagementClient(cl)
+
 	// This exists to keep the parent around while all the children race.
 	// It currently tests both a set of OS-level conditions and Kubernetes
 	// Deployments and StatefulSets
 	t.Run("RunGroup", func(t *testing.T) {
 		t.Run("Cluster", func(t *testing.T) {
 			testEventual(t, "Retrieving cluster directory sucessful", ctx, 60*time.Second, func(ctx context.Context) error {
-				res, err := cluster.Management.GetClusterInfo(ctx, &apb.GetClusterInfoRequest{})
+				res, err := mgmt.GetClusterInfo(ctx, &apb.GetClusterInfoRequest{})
 				if err != nil {
 					return fmt.Errorf("GetClusterInfo: %w", err)
 				}
@@ -133,7 +145,8 @@
 		})
 		t.Run("Kubernetes", func(t *testing.T) {
 			t.Parallel()
-			clientSet, err := GetKubeClientSet(cluster, cluster.Ports[common.KubernetesAPIWrappedPort])
+			// TODO(q3k): use SOCKS proxy.
+			clientSet, err := GetKubeClientSet(cluster, cluster.Ports[uint16(common.KubernetesAPIWrappedPort)])
 			if err != nil {
 				t.Fatal(err)
 			}