treewide: add more ptr.To usages
Change-Id: Ibf511bc012a17e39d6b7b4f3a7d9abc1304d755f
Reviewed-on: https://review.monogon.dev/c/monogon/+/3677
Tested-by: Jenkins CI
Reviewed-by: Tim Windelschmidt <tim@monogon.tech>
diff --git a/metropolis/test/e2e/suites/kubernetes/kubernetes_helpers.go b/metropolis/test/e2e/suites/kubernetes/kubernetes_helpers.go
index 9c67117..c8e04bf 100644
--- a/metropolis/test/e2e/suites/kubernetes/kubernetes_helpers.go
+++ b/metropolis/test/e2e/suites/kubernetes/kubernetes_helpers.go
@@ -73,14 +73,13 @@
// makeHTTPServerDeploymentSpec generates the deployment spec for the test HTTP
// server.
func makeHTTPServerDeploymentSpec(name string) *appsv1.Deployment {
- oneVal := int32(1)
return &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{Name: name},
Spec: appsv1.DeploymentSpec{
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{
"name": name,
}},
- Replicas: &oneVal,
+ Replicas: ptr.To(int32(1)),
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
@@ -129,11 +128,10 @@
// makeSelftestSpec generates a Job spec for the E2E self-test image.
func makeSelftestSpec(name string) *batchv1.Job {
- one := int32(1)
return &batchv1.Job{
ObjectMeta: metav1.ObjectMeta{Name: name},
Spec: batchv1.JobSpec{
- BackoffLimit: &one,
+ BackoffLimit: ptr.To(int32(1)),
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
diff --git a/metropolis/test/e2e/suites/kubernetes/run_test.go b/metropolis/test/e2e/suites/kubernetes/run_test.go
index 18239e0..c7f5612 100644
--- a/metropolis/test/e2e/suites/kubernetes/run_test.go
+++ b/metropolis/test/e2e/suites/kubernetes/run_test.go
@@ -132,12 +132,11 @@
}
// Nominate both nodes to be Kubernetes workers.
for _, nid := range cluster.NodeIDs {
- yes := true
_, err := mgmt.UpdateNodeRoles(ctx, &apb.UpdateNodeRolesRequest{
Node: &apb.UpdateNodeRolesRequest_Id{
Id: nid,
},
- KubernetesWorker: &yes,
+ KubernetesWorker: ptr.To(true),
})
if err != nil {
t.Fatalf("Could not make %s a KubernetesWorker: %v", nid, err)
@@ -163,12 +162,11 @@
// Remove KubernetesWorker from first node again. It will stay in k8s (arguably,
// this is a bug) but its role label should be removed.
- no := false
_, err = mgmt.UpdateNodeRoles(ctx, &apb.UpdateNodeRolesRequest{
Node: &apb.UpdateNodeRolesRequest_Id{
Id: cluster.NodeIDs[0],
},
- KubernetesWorker: &no,
+ KubernetesWorker: ptr.To(false),
})
if err != nil {
t.Fatalf("Could not remove KubernetesWorker from %s: %v", cluster.NodeIDs[0], err)