| Serge Bazanski | 99b0214 | 2024-04-17 16:33:28 +0200 | [diff] [blame] | 1 | package kubernetes |
| 2 | |
| 3 | import ( |
| 4 | "context" |
| 5 | "crypto/tls" |
| 6 | "crypto/x509" |
| 7 | "errors" |
| 8 | "fmt" |
| 9 | "io" |
| 10 | "net" |
| 11 | "net/http" |
| 12 | _ "net/http/pprof" |
| 13 | "net/url" |
| 14 | "os" |
| 15 | "strings" |
| 16 | "testing" |
| 17 | "time" |
| 18 | |
| 19 | "github.com/bazelbuild/rules_go/go/runfiles" |
| 20 | corev1 "k8s.io/api/core/v1" |
| 21 | kerrors "k8s.io/apimachinery/pkg/api/errors" |
| 22 | "k8s.io/apimachinery/pkg/api/resource" |
| 23 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" |
| 24 | podv1 "k8s.io/kubernetes/pkg/api/v1/pod" |
| 25 | |
| Lorenz Brun | 732a884 | 2024-08-26 23:25:37 +0200 | [diff] [blame] | 26 | common "source.monogon.dev/metropolis/node" |
| Serge Bazanski | 6d1ff36 | 2024-09-30 15:15:31 +0000 | [diff] [blame] | 27 | apb "source.monogon.dev/metropolis/proto/api" |
| Lorenz Brun | 732a884 | 2024-08-26 23:25:37 +0200 | [diff] [blame] | 28 | cpb "source.monogon.dev/metropolis/proto/common" |
| Tim Windelschmidt | 9f21f53 | 2024-05-07 15:14:20 +0200 | [diff] [blame] | 29 | mlaunch "source.monogon.dev/metropolis/test/launch" |
| 30 | "source.monogon.dev/metropolis/test/localregistry" |
| Serge Bazanski | 99b0214 | 2024-04-17 16:33:28 +0200 | [diff] [blame] | 31 | "source.monogon.dev/metropolis/test/util" |
| Serge Bazanski | 99b0214 | 2024-04-17 16:33:28 +0200 | [diff] [blame] | 32 | ) |
| 33 | |
| Tim Windelschmidt | 82e6af7 | 2024-07-23 00:05:42 +0000 | [diff] [blame] | 34 | var ( |
| 35 | // These are filled by bazel at linking time with the canonical path of |
| 36 | // their corresponding file. Inside the init function we resolve it |
| 37 | // with the rules_go runfiles package to the real path. |
| 38 | xTestImagesManifestPath string |
| 39 | ) |
| 40 | |
| 41 | func init() { |
| 42 | var err error |
| 43 | for _, path := range []*string{ |
| 44 | &xTestImagesManifestPath, |
| 45 | } { |
| 46 | *path, err = runfiles.Rlocation(*path) |
| 47 | if err != nil { |
| 48 | panic(err) |
| 49 | } |
| 50 | } |
| 51 | } |
| 52 | |
| Serge Bazanski | 99b0214 | 2024-04-17 16:33:28 +0200 | [diff] [blame] | 53 | const ( |
| 54 | // Timeout for the global test context. |
| 55 | // |
| 56 | // Bazel would eventually time out the test after 900s ("large") if, for |
| 57 | // some reason, the context cancellation fails to abort it. |
| 58 | globalTestTimeout = 600 * time.Second |
| 59 | |
| 60 | // Timeouts for individual end-to-end tests of different sizes. |
| 61 | smallTestTimeout = 60 * time.Second |
| 62 | largeTestTimeout = 120 * time.Second |
| 63 | ) |
| 64 | |
| Serge Bazanski | 6d1ff36 | 2024-09-30 15:15:31 +0000 | [diff] [blame] | 65 | // TestE2EKubernetesLabels verifies that Kubernetes node labels are being updated |
| 66 | // when the cluster state changes. |
| 67 | func TestE2EKubernetesLabels(t *testing.T) { |
| 68 | ctx, cancel := context.WithTimeout(context.Background(), globalTestTimeout) |
| 69 | defer cancel() |
| 70 | |
| 71 | clusterOptions := mlaunch.ClusterOptions{ |
| 72 | NumNodes: 2, |
| 73 | InitialClusterConfiguration: &cpb.ClusterConfiguration{ |
| 74 | TpmMode: cpb.ClusterConfiguration_TPM_MODE_DISABLED, |
| 75 | StorageSecurityPolicy: cpb.ClusterConfiguration_STORAGE_SECURITY_POLICY_NEEDS_INSECURE, |
| Serge Bazanski | e99638e | 2024-09-30 17:06:44 +0000 | [diff] [blame] | 76 | KubernetesConfig: &cpb.ClusterConfiguration_KubernetesConfig{ |
| 77 | NodeLabelsToSynchronize: []*cpb.ClusterConfiguration_KubernetesConfig_NodeLabelsToSynchronize{ |
| 78 | {Regexp: `^test\.monogon\.dev/`}, |
| 79 | }, |
| 80 | }, |
| Serge Bazanski | 6d1ff36 | 2024-09-30 15:15:31 +0000 | [diff] [blame] | 81 | }, |
| 82 | } |
| 83 | cluster, err := mlaunch.LaunchCluster(ctx, clusterOptions) |
| 84 | if err != nil { |
| 85 | t.Fatalf("LaunchCluster failed: %v", err) |
| 86 | } |
| 87 | defer func() { |
| 88 | err := cluster.Close() |
| 89 | if err != nil { |
| 90 | t.Fatalf("cluster Close failed: %v", err) |
| 91 | } |
| 92 | }() |
| 93 | |
| 94 | con, err := cluster.CuratorClient() |
| 95 | if err != nil { |
| 96 | t.Fatalf("Could not get curator client: %v", err) |
| 97 | } |
| 98 | mgmt := apb.NewManagementClient(con) |
| 99 | clientSet, err := cluster.GetKubeClientSet() |
| 100 | if err != nil { |
| 101 | t.Fatal(err) |
| 102 | } |
| 103 | |
| 104 | getLabelsForNode := func(nid string) common.Labels { |
| 105 | node, err := clientSet.CoreV1().Nodes().Get(ctx, nid, metav1.GetOptions{}) |
| 106 | if kerrors.IsNotFound(err) { |
| 107 | return nil |
| 108 | } |
| 109 | if err != nil { |
| 110 | t.Fatalf("Could not get node %s: %v", nid, err) |
| 111 | return nil |
| 112 | } |
| 113 | return common.Labels(node.Labels).Filter(func(k, v string) bool { |
| Serge Bazanski | e99638e | 2024-09-30 17:06:44 +0000 | [diff] [blame] | 114 | if strings.HasPrefix(k, "node-role.kubernetes.io/") { |
| 115 | return true |
| 116 | } |
| 117 | if strings.HasPrefix(k, "test.monogon.dev/") { |
| 118 | return true |
| 119 | } |
| 120 | return false |
| Serge Bazanski | 6d1ff36 | 2024-09-30 15:15:31 +0000 | [diff] [blame] | 121 | }) |
| 122 | } |
| 123 | |
| 124 | // Nodes should have no labels at first. |
| 125 | for _, nid := range cluster.NodeIDs { |
| 126 | if labels := getLabelsForNode(nid); !labels.Equals(nil) { |
| 127 | t.Errorf("Node %s should have no labels, has %s", nid, labels) |
| 128 | } |
| 129 | } |
| 130 | // Nominate both nodes to be Kubernetes workers. |
| 131 | for _, nid := range cluster.NodeIDs { |
| 132 | yes := true |
| 133 | _, err := mgmt.UpdateNodeRoles(ctx, &apb.UpdateNodeRolesRequest{ |
| 134 | Node: &apb.UpdateNodeRolesRequest_Id{ |
| 135 | Id: nid, |
| 136 | }, |
| 137 | KubernetesWorker: &yes, |
| 138 | }) |
| 139 | if err != nil { |
| 140 | t.Fatalf("Could not make %s a KubernetesWorker: %v", nid, err) |
| 141 | } |
| 142 | } |
| 143 | |
| 144 | util.MustTestEventual(t, "Labels added", ctx, time.Second*5, func(ctx context.Context) error { |
| 145 | // Nodes should have role labels now. |
| 146 | for _, nid := range cluster.NodeIDs { |
| 147 | want := common.Labels{ |
| 148 | "node-role.kubernetes.io/KubernetesWorker": "", |
| 149 | } |
| 150 | if nid == cluster.NodeIDs[0] { |
| 151 | want["node-role.kubernetes.io/KubernetesController"] = "" |
| 152 | want["node-role.kubernetes.io/ConsensusMember"] = "" |
| 153 | } |
| 154 | if labels := getLabelsForNode(nid); !want.Equals(labels) { |
| 155 | return fmt.Errorf("node %s should have labels %s, has %s", nid, want, labels) |
| 156 | } |
| 157 | } |
| 158 | return nil |
| 159 | }) |
| 160 | |
| 161 | // Remove KubernetesWorker from first node again. It will stay in k8s (arguably, |
| 162 | // this is a bug) but its role label should be removed. |
| 163 | no := false |
| 164 | _, err = mgmt.UpdateNodeRoles(ctx, &apb.UpdateNodeRolesRequest{ |
| 165 | Node: &apb.UpdateNodeRolesRequest_Id{ |
| 166 | Id: cluster.NodeIDs[0], |
| 167 | }, |
| 168 | KubernetesWorker: &no, |
| 169 | }) |
| 170 | if err != nil { |
| 171 | t.Fatalf("Could not remove KubernetesWorker from %s: %v", cluster.NodeIDs[0], err) |
| 172 | } |
| 173 | |
| 174 | util.MustTestEventual(t, "Labels removed", ctx, time.Second*5, func(ctx context.Context) error { |
| 175 | for _, nid := range cluster.NodeIDs { |
| 176 | want := make(common.Labels) |
| 177 | if nid == cluster.NodeIDs[0] { |
| 178 | want["node-role.kubernetes.io/KubernetesController"] = "" |
| 179 | want["node-role.kubernetes.io/ConsensusMember"] = "" |
| 180 | } else { |
| 181 | want["node-role.kubernetes.io/KubernetesWorker"] = "" |
| 182 | } |
| 183 | if labels := getLabelsForNode(nid); !want.Equals(labels) { |
| 184 | return fmt.Errorf("node %s should have labels %s, has %s", nid, want, labels) |
| 185 | } |
| 186 | } |
| 187 | return nil |
| 188 | }) |
| Serge Bazanski | e99638e | 2024-09-30 17:06:44 +0000 | [diff] [blame] | 189 | |
| 190 | // Add Metropolis node label, ensure it gets reflected on the Kubernetes node. |
| 191 | _, err = mgmt.UpdateNodeLabels(ctx, &apb.UpdateNodeLabelsRequest{ |
| 192 | Node: &apb.UpdateNodeLabelsRequest_Id{ |
| 193 | Id: cluster.NodeIDs[1], |
| 194 | }, |
| 195 | Upsert: []*apb.UpdateNodeLabelsRequest_Pair{ |
| 196 | {Key: "test.monogon.dev/foo", Value: "bar"}, |
| 197 | }, |
| 198 | }) |
| 199 | |
| 200 | util.MustTestEventual(t, "Metropolis labels added", ctx, time.Second*5, func(ctx context.Context) error { |
| 201 | if err != nil { |
| 202 | t.Fatalf("Could not add label to node: %v", err) |
| 203 | } |
| 204 | want := common.Labels{ |
| 205 | "node-role.kubernetes.io/KubernetesWorker": "", |
| 206 | "test.monogon.dev/foo": "bar", |
| 207 | } |
| 208 | if labels := getLabelsForNode(cluster.NodeIDs[1]); !want.Equals(labels) { |
| 209 | return fmt.Errorf("Node %s should have labels %s, has %s", cluster.NodeIDs[1], want, labels) |
| 210 | } |
| 211 | return nil |
| 212 | }) |
| Serge Bazanski | 6d1ff36 | 2024-09-30 15:15:31 +0000 | [diff] [blame] | 213 | } |
| 214 | |
| Serge Bazanski | 99b0214 | 2024-04-17 16:33:28 +0200 | [diff] [blame] | 215 | // TestE2EKubernetes exercises the Kubernetes functionality of Metropolis. |
| 216 | // |
| 217 | // The tests are performed against an in-memory cluster. |
| 218 | func TestE2EKubernetes(t *testing.T) { |
| 219 | // Set a global timeout to make sure this terminates |
| 220 | ctx, cancel := context.WithTimeout(context.Background(), globalTestTimeout) |
| 221 | defer cancel() |
| 222 | |
| Tim Windelschmidt | 82e6af7 | 2024-07-23 00:05:42 +0000 | [diff] [blame] | 223 | df, err := os.ReadFile(xTestImagesManifestPath) |
| Serge Bazanski | 99b0214 | 2024-04-17 16:33:28 +0200 | [diff] [blame] | 224 | if err != nil { |
| 225 | t.Fatalf("Reading registry manifest failed: %v", err) |
| 226 | } |
| 227 | lr, err := localregistry.FromBazelManifest(df) |
| 228 | if err != nil { |
| 229 | t.Fatalf("Creating test image registry failed: %v", err) |
| 230 | } |
| 231 | |
| 232 | // Launch cluster. |
| Tim Windelschmidt | 9f21f53 | 2024-05-07 15:14:20 +0200 | [diff] [blame] | 233 | clusterOptions := mlaunch.ClusterOptions{ |
| Serge Bazanski | 99b0214 | 2024-04-17 16:33:28 +0200 | [diff] [blame] | 234 | NumNodes: 2, |
| 235 | LocalRegistry: lr, |
| Lorenz Brun | 732a884 | 2024-08-26 23:25:37 +0200 | [diff] [blame] | 236 | InitialClusterConfiguration: &cpb.ClusterConfiguration{ |
| 237 | TpmMode: cpb.ClusterConfiguration_TPM_MODE_DISABLED, |
| 238 | StorageSecurityPolicy: cpb.ClusterConfiguration_STORAGE_SECURITY_POLICY_NEEDS_INSECURE, |
| 239 | }, |
| Serge Bazanski | 99b0214 | 2024-04-17 16:33:28 +0200 | [diff] [blame] | 240 | } |
| Tim Windelschmidt | 9f21f53 | 2024-05-07 15:14:20 +0200 | [diff] [blame] | 241 | cluster, err := mlaunch.LaunchCluster(ctx, clusterOptions) |
| Serge Bazanski | 99b0214 | 2024-04-17 16:33:28 +0200 | [diff] [blame] | 242 | if err != nil { |
| 243 | t.Fatalf("LaunchCluster failed: %v", err) |
| 244 | } |
| 245 | defer func() { |
| 246 | err := cluster.Close() |
| 247 | if err != nil { |
| 248 | t.Fatalf("cluster Close failed: %v", err) |
| 249 | } |
| 250 | }() |
| 251 | |
| 252 | clientSet, err := cluster.GetKubeClientSet() |
| 253 | if err != nil { |
| 254 | t.Fatal(err) |
| 255 | } |
| 256 | util.TestEventual(t, "Add KubernetesWorker roles", ctx, smallTestTimeout, func(ctx context.Context) error { |
| 257 | // Make everything but the first node into KubernetesWorkers. |
| 258 | for i := 1; i < clusterOptions.NumNodes; i++ { |
| 259 | err := cluster.MakeKubernetesWorker(ctx, cluster.NodeIDs[i]) |
| 260 | if err != nil { |
| 261 | return util.Permanent(fmt.Errorf("MakeKubernetesWorker: %w", err)) |
| 262 | } |
| 263 | } |
| 264 | return nil |
| 265 | }) |
| 266 | util.TestEventual(t, "Node is registered and ready", ctx, largeTestTimeout, func(ctx context.Context) error { |
| 267 | nodes, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) |
| 268 | if err != nil { |
| 269 | return err |
| 270 | } |
| 271 | if len(nodes.Items) < 1 { |
| 272 | return errors.New("node not yet registered") |
| 273 | } |
| 274 | node := nodes.Items[0] |
| 275 | for _, cond := range node.Status.Conditions { |
| 276 | if cond.Type != corev1.NodeReady { |
| 277 | continue |
| 278 | } |
| 279 | if cond.Status != corev1.ConditionTrue { |
| 280 | return fmt.Errorf("node not ready: %v", cond.Message) |
| 281 | } |
| 282 | } |
| 283 | return nil |
| 284 | }) |
| 285 | util.TestEventual(t, "Simple deployment", ctx, largeTestTimeout, func(ctx context.Context) error { |
| 286 | _, err := clientSet.AppsV1().Deployments("default").Create(ctx, makeTestDeploymentSpec("test-deploy-1"), metav1.CreateOptions{}) |
| 287 | return err |
| 288 | }) |
| 289 | util.TestEventual(t, "Simple deployment is running", ctx, largeTestTimeout, func(ctx context.Context) error { |
| 290 | res, err := clientSet.CoreV1().Pods("default").List(ctx, metav1.ListOptions{LabelSelector: "name=test-deploy-1"}) |
| 291 | if err != nil { |
| 292 | return err |
| 293 | } |
| 294 | if len(res.Items) == 0 { |
| 295 | return errors.New("pod didn't get created") |
| 296 | } |
| 297 | pod := res.Items[0] |
| 298 | if podv1.IsPodAvailable(&pod, 1, metav1.NewTime(time.Now())) { |
| 299 | return nil |
| 300 | } |
| 301 | events, err := clientSet.CoreV1().Events("default").List(ctx, metav1.ListOptions{FieldSelector: fmt.Sprintf("involvedObject.name=%s,involvedObject.namespace=default", pod.Name)}) |
| 302 | if err != nil || len(events.Items) == 0 { |
| 303 | return fmt.Errorf("pod is not ready: %v", pod.Status.Phase) |
| 304 | } else { |
| 305 | return fmt.Errorf("pod is not ready: %v", events.Items[0].Message) |
| 306 | } |
| 307 | }) |
| 308 | util.TestEventual(t, "Simple deployment with gvisor", ctx, largeTestTimeout, func(ctx context.Context) error { |
| 309 | deployment := makeTestDeploymentSpec("test-deploy-2") |
| 310 | gvisorStr := "gvisor" |
| 311 | deployment.Spec.Template.Spec.RuntimeClassName = &gvisorStr |
| 312 | _, err := clientSet.AppsV1().Deployments("default").Create(ctx, deployment, metav1.CreateOptions{}) |
| 313 | return err |
| 314 | }) |
| 315 | util.TestEventual(t, "Simple deployment is running on gvisor", ctx, largeTestTimeout, func(ctx context.Context) error { |
| 316 | res, err := clientSet.CoreV1().Pods("default").List(ctx, metav1.ListOptions{LabelSelector: "name=test-deploy-2"}) |
| 317 | if err != nil { |
| 318 | return err |
| 319 | } |
| 320 | if len(res.Items) == 0 { |
| 321 | return errors.New("pod didn't get created") |
| 322 | } |
| 323 | pod := res.Items[0] |
| 324 | if podv1.IsPodAvailable(&pod, 1, metav1.NewTime(time.Now())) { |
| 325 | return nil |
| 326 | } |
| 327 | events, err := clientSet.CoreV1().Events("default").List(ctx, metav1.ListOptions{FieldSelector: fmt.Sprintf("involvedObject.name=%s,involvedObject.namespace=default", pod.Name)}) |
| 328 | if err != nil || len(events.Items) == 0 { |
| 329 | return fmt.Errorf("pod is not ready: %v", pod.Status.Phase) |
| 330 | } else { |
| 331 | var errorMsg strings.Builder |
| 332 | for _, msg := range events.Items { |
| 333 | errorMsg.WriteString(" | ") |
| 334 | errorMsg.WriteString(msg.Message) |
| 335 | } |
| Tim Windelschmidt | 5f1a7de | 2024-09-19 02:00:14 +0200 | [diff] [blame] | 336 | return fmt.Errorf("pod is not ready: %s", errorMsg.String()) |
| Serge Bazanski | 99b0214 | 2024-04-17 16:33:28 +0200 | [diff] [blame] | 337 | } |
| 338 | }) |
| 339 | util.TestEventual(t, "Simple StatefulSet with PVC", ctx, largeTestTimeout, func(ctx context.Context) error { |
| 340 | _, err := clientSet.AppsV1().StatefulSets("default").Create(ctx, makeTestStatefulSet("test-statefulset-1", corev1.PersistentVolumeFilesystem), metav1.CreateOptions{}) |
| 341 | return err |
| 342 | }) |
| 343 | util.TestEventual(t, "Simple StatefulSet with PVC is running", ctx, largeTestTimeout, func(ctx context.Context) error { |
| 344 | res, err := clientSet.CoreV1().Pods("default").List(ctx, metav1.ListOptions{LabelSelector: "name=test-statefulset-1"}) |
| 345 | if err != nil { |
| 346 | return err |
| 347 | } |
| 348 | if len(res.Items) == 0 { |
| 349 | return errors.New("pod didn't get created") |
| 350 | } |
| 351 | pod := res.Items[0] |
| 352 | if podv1.IsPodAvailable(&pod, 1, metav1.NewTime(time.Now())) { |
| 353 | return nil |
| 354 | } |
| 355 | events, err := clientSet.CoreV1().Events("default").List(ctx, metav1.ListOptions{FieldSelector: fmt.Sprintf("involvedObject.name=%s,involvedObject.namespace=default", pod.Name)}) |
| 356 | if err != nil || len(events.Items) == 0 { |
| 357 | return fmt.Errorf("pod is not ready: %v", pod.Status.Phase) |
| 358 | } else { |
| 359 | return fmt.Errorf("pod is not ready: %v", events.Items[0].Message) |
| 360 | } |
| 361 | }) |
| 362 | util.TestEventual(t, "Simple StatefulSet with Block PVC", ctx, largeTestTimeout, func(ctx context.Context) error { |
| 363 | _, err := clientSet.AppsV1().StatefulSets("default").Create(ctx, makeTestStatefulSet("test-statefulset-2", corev1.PersistentVolumeBlock), metav1.CreateOptions{}) |
| 364 | return err |
| 365 | }) |
| 366 | util.TestEventual(t, "Simple StatefulSet with Block PVC is running", ctx, largeTestTimeout, func(ctx context.Context) error { |
| 367 | res, err := clientSet.CoreV1().Pods("default").List(ctx, metav1.ListOptions{LabelSelector: "name=test-statefulset-2"}) |
| 368 | if err != nil { |
| 369 | return err |
| 370 | } |
| 371 | if len(res.Items) == 0 { |
| 372 | return errors.New("pod didn't get created") |
| 373 | } |
| 374 | pod := res.Items[0] |
| 375 | if podv1.IsPodAvailable(&pod, 1, metav1.NewTime(time.Now())) { |
| 376 | return nil |
| 377 | } |
| 378 | events, err := clientSet.CoreV1().Events("default").List(ctx, metav1.ListOptions{FieldSelector: fmt.Sprintf("involvedObject.name=%s,involvedObject.namespace=default", pod.Name)}) |
| 379 | if err != nil || len(events.Items) == 0 { |
| 380 | return fmt.Errorf("pod is not ready: %v", pod.Status.Phase) |
| 381 | } else { |
| 382 | return fmt.Errorf("pod is not ready: %v", events.Items[0].Message) |
| 383 | } |
| 384 | }) |
| 385 | util.TestEventual(t, "In-cluster self-test job", ctx, smallTestTimeout, func(ctx context.Context) error { |
| 386 | _, err := clientSet.BatchV1().Jobs("default").Create(ctx, makeSelftestSpec("selftest"), metav1.CreateOptions{}) |
| 387 | return err |
| 388 | }) |
| 389 | util.TestEventual(t, "In-cluster self-test job passed", ctx, smallTestTimeout, func(ctx context.Context) error { |
| 390 | res, err := clientSet.BatchV1().Jobs("default").Get(ctx, "selftest", metav1.GetOptions{}) |
| 391 | if err != nil { |
| 392 | return err |
| 393 | } |
| 394 | if res.Status.Failed > 0 { |
| 395 | pods, err := clientSet.CoreV1().Pods("default").List(ctx, metav1.ListOptions{ |
| 396 | LabelSelector: "job-name=selftest", |
| 397 | }) |
| 398 | if err != nil { |
| 399 | return util.Permanent(fmt.Errorf("job failed but failed to find pod: %w", err)) |
| 400 | } |
| 401 | if len(pods.Items) < 1 { |
| 402 | return fmt.Errorf("job failed but pod does not exist") |
| 403 | } |
| 404 | lines, err := getPodLogLines(ctx, clientSet, pods.Items[0].Name, 1) |
| 405 | if err != nil { |
| 406 | return fmt.Errorf("job failed but could not get logs: %w", err) |
| 407 | } |
| 408 | if len(lines) > 0 { |
| 409 | return util.Permanent(fmt.Errorf("job failed, last log line: %s", lines[0])) |
| 410 | } |
| 411 | return util.Permanent(fmt.Errorf("job failed, empty log")) |
| 412 | } |
| 413 | if res.Status.Succeeded > 0 { |
| 414 | return nil |
| 415 | } |
| 416 | return fmt.Errorf("job still running") |
| 417 | }) |
| 418 | util.TestEventual(t, "Start NodePort test setup", ctx, smallTestTimeout, func(ctx context.Context) error { |
| 419 | _, err := clientSet.AppsV1().Deployments("default").Create(ctx, makeHTTPServerDeploymentSpec("nodeport-server"), metav1.CreateOptions{}) |
| 420 | if err != nil && !kerrors.IsAlreadyExists(err) { |
| 421 | return err |
| 422 | } |
| 423 | _, err = clientSet.CoreV1().Services("default").Create(ctx, makeHTTPServerNodePortService("nodeport-server"), metav1.CreateOptions{}) |
| 424 | if err != nil && !kerrors.IsAlreadyExists(err) { |
| 425 | return err |
| 426 | } |
| 427 | return nil |
| 428 | }) |
| 429 | util.TestEventual(t, "NodePort accessible from all nodes", ctx, smallTestTimeout, func(ctx context.Context) error { |
| 430 | nodes, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) |
| 431 | if err != nil { |
| 432 | return err |
| 433 | } |
| 434 | // Use a new client for each attempt |
| 435 | hc := http.Client{ |
| 436 | Timeout: 2 * time.Second, |
| 437 | Transport: &http.Transport{ |
| 438 | Dial: cluster.SOCKSDialer.Dial, |
| 439 | }, |
| 440 | } |
| 441 | for _, n := range nodes.Items { |
| 442 | var addr string |
| 443 | for _, a := range n.Status.Addresses { |
| 444 | if a.Type == corev1.NodeInternalIP { |
| 445 | addr = a.Address |
| 446 | } |
| 447 | } |
| 448 | u := url.URL{Scheme: "http", Host: addr, Path: "/"} |
| 449 | res, err := hc.Get(u.String()) |
| 450 | if err != nil { |
| 451 | return fmt.Errorf("failed getting from node %q: %w", n.Name, err) |
| 452 | } |
| 453 | if res.StatusCode != http.StatusOK { |
| 454 | return fmt.Errorf("getting from node %q: HTTP %d", n.Name, res.StatusCode) |
| 455 | } |
| 456 | t.Logf("Got response from %q", n.Name) |
| 457 | } |
| 458 | return nil |
| 459 | }) |
| 460 | util.TestEventual(t, "containerd metrics retrieved", ctx, smallTestTimeout, func(ctx context.Context) error { |
| 461 | pool := x509.NewCertPool() |
| 462 | pool.AddCert(cluster.CACertificate) |
| 463 | cl := http.Client{ |
| 464 | Transport: &http.Transport{ |
| 465 | TLSClientConfig: &tls.Config{ |
| 466 | Certificates: []tls.Certificate{cluster.Owner}, |
| 467 | RootCAs: pool, |
| 468 | }, |
| 469 | DialContext: func(ctx context.Context, _, addr string) (net.Conn, error) { |
| 470 | return cluster.DialNode(ctx, addr) |
| 471 | }, |
| 472 | }, |
| 473 | } |
| 474 | u := url.URL{ |
| 475 | Scheme: "https", |
| 476 | Host: net.JoinHostPort(cluster.NodeIDs[1], common.MetricsPort.PortString()), |
| 477 | Path: "/metrics/containerd", |
| 478 | } |
| 479 | res, err := cl.Get(u.String()) |
| 480 | if err != nil { |
| 481 | return err |
| 482 | } |
| 483 | defer res.Body.Close() |
| 484 | if res.StatusCode != 200 { |
| 485 | return fmt.Errorf("status code %d", res.StatusCode) |
| 486 | } |
| 487 | |
| 488 | body, err := io.ReadAll(res.Body) |
| 489 | if err != nil { |
| 490 | return err |
| 491 | } |
| 492 | needle := "containerd_build_info_total" |
| 493 | if !strings.Contains(string(body), needle) { |
| 494 | return util.Permanent(fmt.Errorf("could not find %q in returned response", needle)) |
| 495 | } |
| 496 | return nil |
| 497 | }) |
| 498 | if os.Getenv("HAVE_NESTED_KVM") != "" { |
| 499 | util.TestEventual(t, "Pod for KVM/QEMU smoke test", ctx, smallTestTimeout, func(ctx context.Context) error { |
| 500 | runcRuntimeClass := "runc" |
| 501 | _, err := clientSet.CoreV1().Pods("default").Create(ctx, &corev1.Pod{ |
| 502 | ObjectMeta: metav1.ObjectMeta{ |
| 503 | Name: "vm-smoketest", |
| 504 | }, |
| 505 | Spec: corev1.PodSpec{ |
| 506 | Containers: []corev1.Container{{ |
| 507 | Name: "vm-smoketest", |
| 508 | ImagePullPolicy: corev1.PullNever, |
| 509 | Image: "test.monogon.internal/metropolis/vm/smoketest:smoketest_container", |
| 510 | Resources: corev1.ResourceRequirements{ |
| 511 | Limits: corev1.ResourceList{ |
| 512 | "devices.monogon.dev/kvm": *resource.NewQuantity(1, ""), |
| 513 | }, |
| 514 | }, |
| 515 | }}, |
| 516 | RuntimeClassName: &runcRuntimeClass, |
| 517 | RestartPolicy: corev1.RestartPolicyNever, |
| 518 | }, |
| 519 | }, metav1.CreateOptions{}) |
| 520 | return err |
| 521 | }) |
| 522 | util.TestEventual(t, "KVM/QEMU smoke test completion", ctx, smallTestTimeout, func(ctx context.Context) error { |
| 523 | pod, err := clientSet.CoreV1().Pods("default").Get(ctx, "vm-smoketest", metav1.GetOptions{}) |
| 524 | if err != nil { |
| 525 | return fmt.Errorf("failed to get pod: %w", err) |
| 526 | } |
| 527 | if pod.Status.Phase == corev1.PodSucceeded { |
| 528 | return nil |
| 529 | } |
| 530 | events, err := clientSet.CoreV1().Events("default").List(ctx, metav1.ListOptions{FieldSelector: fmt.Sprintf("involvedObject.name=%s,involvedObject.namespace=default", pod.Name)}) |
| 531 | if err != nil || len(events.Items) == 0 { |
| 532 | return fmt.Errorf("pod is not ready: %v", pod.Status.Phase) |
| 533 | } else { |
| 534 | return fmt.Errorf("pod is not ready: %v", events.Items[len(events.Items)-1].Message) |
| 535 | } |
| 536 | }) |
| 537 | } |
| 538 | } |