treewide: add more ptr.To usages

Change-Id: Ibf511bc012a17e39d6b7b4f3a7d9abc1304d755f
Reviewed-on: https://review.monogon.dev/c/monogon/+/3677
Tested-by: Jenkins CI
Reviewed-by: Tim Windelschmidt <tim@monogon.tech>
diff --git a/metropolis/test/e2e/suites/ha_cold/BUILD.bazel b/metropolis/test/e2e/suites/ha_cold/BUILD.bazel
index 2ace798..d3370d0 100644
--- a/metropolis/test/e2e/suites/ha_cold/BUILD.bazel
+++ b/metropolis/test/e2e/suites/ha_cold/BUILD.bazel
@@ -21,5 +21,6 @@
         "//metropolis/test/launch",
         "//metropolis/test/util",
         "//osbase/test/launch",
+        "@io_k8s_utils//ptr",
     ],
 )
diff --git a/metropolis/test/e2e/suites/ha_cold/run_test.go b/metropolis/test/e2e/suites/ha_cold/run_test.go
index 43ff689..ae6a043 100644
--- a/metropolis/test/e2e/suites/ha_cold/run_test.go
+++ b/metropolis/test/e2e/suites/ha_cold/run_test.go
@@ -6,6 +6,8 @@
 	"testing"
 	"time"
 
+	"k8s.io/utils/ptr"
+
 	mlaunch "source.monogon.dev/metropolis/test/launch"
 	"source.monogon.dev/metropolis/test/util"
 	"source.monogon.dev/osbase/test/launch"
@@ -96,22 +98,20 @@
 	cur := ipb.NewCuratorClient(curC)
 
 	util.MustTestEventual(t, "Remove KubernetesController role", ctx, 10*time.Second, func(ctx context.Context) error {
-		fa := false
 		_, err := mgmt.UpdateNodeRoles(ctx, &apb.UpdateNodeRolesRequest{
 			Node: &apb.UpdateNodeRolesRequest_Id{
 				Id: cluster.NodeIDs[0],
 			},
-			KubernetesController: &fa,
+			KubernetesController: ptr.To(false),
 		})
 		return err
 	})
 	util.MustTestEventual(t, "Remove ConsensusMember role", ctx, time.Minute, func(ctx context.Context) error {
-		fa := false
 		_, err := mgmt.UpdateNodeRoles(ctx, &apb.UpdateNodeRolesRequest{
 			Node: &apb.UpdateNodeRolesRequest_Id{
 				Id: cluster.NodeIDs[0],
 			},
-			ConsensusMember: &fa,
+			ConsensusMember: ptr.To(false),
 		})
 		return err
 	})
diff --git a/metropolis/test/e2e/suites/kubernetes/kubernetes_helpers.go b/metropolis/test/e2e/suites/kubernetes/kubernetes_helpers.go
index 9c67117..c8e04bf 100644
--- a/metropolis/test/e2e/suites/kubernetes/kubernetes_helpers.go
+++ b/metropolis/test/e2e/suites/kubernetes/kubernetes_helpers.go
@@ -73,14 +73,13 @@
 // makeHTTPServerDeploymentSpec generates the deployment spec for the test HTTP
 // server.
 func makeHTTPServerDeploymentSpec(name string) *appsv1.Deployment {
-	oneVal := int32(1)
 	return &appsv1.Deployment{
 		ObjectMeta: metav1.ObjectMeta{Name: name},
 		Spec: appsv1.DeploymentSpec{
 			Selector: &metav1.LabelSelector{MatchLabels: map[string]string{
 				"name": name,
 			}},
-			Replicas: &oneVal,
+			Replicas: ptr.To(int32(1)),
 			Template: corev1.PodTemplateSpec{
 				ObjectMeta: metav1.ObjectMeta{
 					Labels: map[string]string{
@@ -129,11 +128,10 @@
 
 // makeSelftestSpec generates a Job spec for the E2E self-test image.
 func makeSelftestSpec(name string) *batchv1.Job {
-	one := int32(1)
 	return &batchv1.Job{
 		ObjectMeta: metav1.ObjectMeta{Name: name},
 		Spec: batchv1.JobSpec{
-			BackoffLimit: &one,
+			BackoffLimit: ptr.To(int32(1)),
 			Template: corev1.PodTemplateSpec{
 				ObjectMeta: metav1.ObjectMeta{
 					Labels: map[string]string{
diff --git a/metropolis/test/e2e/suites/kubernetes/run_test.go b/metropolis/test/e2e/suites/kubernetes/run_test.go
index 18239e0..c7f5612 100644
--- a/metropolis/test/e2e/suites/kubernetes/run_test.go
+++ b/metropolis/test/e2e/suites/kubernetes/run_test.go
@@ -132,12 +132,11 @@
 	}
 	// Nominate both nodes to be Kubernetes workers.
 	for _, nid := range cluster.NodeIDs {
-		yes := true
 		_, err := mgmt.UpdateNodeRoles(ctx, &apb.UpdateNodeRolesRequest{
 			Node: &apb.UpdateNodeRolesRequest_Id{
 				Id: nid,
 			},
-			KubernetesWorker: &yes,
+			KubernetesWorker: ptr.To(true),
 		})
 		if err != nil {
 			t.Fatalf("Could not make %s a KubernetesWorker: %v", nid, err)
@@ -163,12 +162,11 @@
 
 	// Remove KubernetesWorker from first node again. It will stay in k8s (arguably,
 	// this is a bug) but its role label should be removed.
-	no := false
 	_, err = mgmt.UpdateNodeRoles(ctx, &apb.UpdateNodeRolesRequest{
 		Node: &apb.UpdateNodeRolesRequest_Id{
 			Id: cluster.NodeIDs[0],
 		},
-		KubernetesWorker: &no,
+		KubernetesWorker: ptr.To(false),
 	})
 	if err != nil {
 		t.Fatalf("Could not remove KubernetesWorker from %s: %v", cluster.NodeIDs[0], err)