| Serge Bazanski | 99b0214 | 2024-04-17 16:33:28 +0200 | [diff] [blame] | 1 | package kubernetes |
| 2 | |
| 3 | import ( |
| 4 | "context" |
| 5 | "crypto/tls" |
| 6 | "crypto/x509" |
| 7 | "errors" |
| 8 | "fmt" |
| 9 | "io" |
| 10 | "net" |
| 11 | "net/http" |
| 12 | _ "net/http/pprof" |
| 13 | "net/url" |
| 14 | "os" |
| 15 | "strings" |
| 16 | "testing" |
| 17 | "time" |
| 18 | |
| 19 | "github.com/bazelbuild/rules_go/go/runfiles" |
| Serge Bazanski | 1e39914 | 2024-10-22 10:58:15 +0000 | [diff] [blame] | 20 | "google.golang.org/protobuf/types/known/fieldmaskpb" |
| Serge Bazanski | 99b0214 | 2024-04-17 16:33:28 +0200 | [diff] [blame] | 21 | corev1 "k8s.io/api/core/v1" |
| 22 | kerrors "k8s.io/apimachinery/pkg/api/errors" |
| 23 | "k8s.io/apimachinery/pkg/api/resource" |
| 24 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" |
| 25 | podv1 "k8s.io/kubernetes/pkg/api/v1/pod" |
| 26 | |
| Lorenz Brun | 732a884 | 2024-08-26 23:25:37 +0200 | [diff] [blame] | 27 | common "source.monogon.dev/metropolis/node" |
| Serge Bazanski | 6d1ff36 | 2024-09-30 15:15:31 +0000 | [diff] [blame] | 28 | apb "source.monogon.dev/metropolis/proto/api" |
| Lorenz Brun | 732a884 | 2024-08-26 23:25:37 +0200 | [diff] [blame] | 29 | cpb "source.monogon.dev/metropolis/proto/common" |
| Tim Windelschmidt | 9f21f53 | 2024-05-07 15:14:20 +0200 | [diff] [blame] | 30 | mlaunch "source.monogon.dev/metropolis/test/launch" |
| 31 | "source.monogon.dev/metropolis/test/localregistry" |
| Serge Bazanski | 99b0214 | 2024-04-17 16:33:28 +0200 | [diff] [blame] | 32 | "source.monogon.dev/metropolis/test/util" |
| Serge Bazanski | 99b0214 | 2024-04-17 16:33:28 +0200 | [diff] [blame] | 33 | ) |
| 34 | |
| Tim Windelschmidt | 82e6af7 | 2024-07-23 00:05:42 +0000 | [diff] [blame] | 35 | var ( |
| 36 | // These are filled by bazel at linking time with the canonical path of |
| 37 | // their corresponding file. Inside the init function we resolve it |
| 38 | // with the rules_go runfiles package to the real path. |
| 39 | xTestImagesManifestPath string |
| 40 | ) |
| 41 | |
| 42 | func init() { |
| 43 | var err error |
| 44 | for _, path := range []*string{ |
| 45 | &xTestImagesManifestPath, |
| 46 | } { |
| 47 | *path, err = runfiles.Rlocation(*path) |
| 48 | if err != nil { |
| 49 | panic(err) |
| 50 | } |
| 51 | } |
| 52 | } |
| 53 | |
| Serge Bazanski | 99b0214 | 2024-04-17 16:33:28 +0200 | [diff] [blame] | 54 | const ( |
| 55 | // Timeout for the global test context. |
| 56 | // |
| 57 | // Bazel would eventually time out the test after 900s ("large") if, for |
| 58 | // some reason, the context cancellation fails to abort it. |
| 59 | globalTestTimeout = 600 * time.Second |
| 60 | |
| 61 | // Timeouts for individual end-to-end tests of different sizes. |
| 62 | smallTestTimeout = 60 * time.Second |
| 63 | largeTestTimeout = 120 * time.Second |
| 64 | ) |
| 65 | |
| Serge Bazanski | 6d1ff36 | 2024-09-30 15:15:31 +0000 | [diff] [blame] | 66 | // TestE2EKubernetesLabels verifies that Kubernetes node labels are being updated |
| 67 | // when the cluster state changes. |
| 68 | func TestE2EKubernetesLabels(t *testing.T) { |
| 69 | ctx, cancel := context.WithTimeout(context.Background(), globalTestTimeout) |
| 70 | defer cancel() |
| 71 | |
| 72 | clusterOptions := mlaunch.ClusterOptions{ |
| 73 | NumNodes: 2, |
| 74 | InitialClusterConfiguration: &cpb.ClusterConfiguration{ |
| Jan Schär | 39f4f5c | 2024-10-29 09:41:50 +0100 | [diff] [blame] | 75 | ClusterDomain: "cluster.test", |
| Serge Bazanski | 6d1ff36 | 2024-09-30 15:15:31 +0000 | [diff] [blame] | 76 | TpmMode: cpb.ClusterConfiguration_TPM_MODE_DISABLED, |
| 77 | StorageSecurityPolicy: cpb.ClusterConfiguration_STORAGE_SECURITY_POLICY_NEEDS_INSECURE, |
| Serge Bazanski | 7856760 | 2024-10-31 13:42:04 +0000 | [diff] [blame] | 78 | Kubernetes: &cpb.ClusterConfiguration_Kubernetes{ |
| 79 | NodeLabelsToSynchronize: []*cpb.ClusterConfiguration_Kubernetes_NodeLabelsToSynchronize{ |
| Serge Bazanski | e99638e | 2024-09-30 17:06:44 +0000 | [diff] [blame] | 80 | {Regexp: `^test\.monogon\.dev/`}, |
| 81 | }, |
| 82 | }, |
| Serge Bazanski | 6d1ff36 | 2024-09-30 15:15:31 +0000 | [diff] [blame] | 83 | }, |
| 84 | } |
| 85 | cluster, err := mlaunch.LaunchCluster(ctx, clusterOptions) |
| 86 | if err != nil { |
| 87 | t.Fatalf("LaunchCluster failed: %v", err) |
| 88 | } |
| 89 | defer func() { |
| 90 | err := cluster.Close() |
| 91 | if err != nil { |
| 92 | t.Fatalf("cluster Close failed: %v", err) |
| 93 | } |
| 94 | }() |
| 95 | |
| 96 | con, err := cluster.CuratorClient() |
| 97 | if err != nil { |
| 98 | t.Fatalf("Could not get curator client: %v", err) |
| 99 | } |
| 100 | mgmt := apb.NewManagementClient(con) |
| 101 | clientSet, err := cluster.GetKubeClientSet() |
| 102 | if err != nil { |
| 103 | t.Fatal(err) |
| 104 | } |
| 105 | |
| 106 | getLabelsForNode := func(nid string) common.Labels { |
| 107 | node, err := clientSet.CoreV1().Nodes().Get(ctx, nid, metav1.GetOptions{}) |
| 108 | if kerrors.IsNotFound(err) { |
| 109 | return nil |
| 110 | } |
| 111 | if err != nil { |
| 112 | t.Fatalf("Could not get node %s: %v", nid, err) |
| 113 | return nil |
| 114 | } |
| 115 | return common.Labels(node.Labels).Filter(func(k, v string) bool { |
| Serge Bazanski | e99638e | 2024-09-30 17:06:44 +0000 | [diff] [blame] | 116 | if strings.HasPrefix(k, "node-role.kubernetes.io/") { |
| 117 | return true |
| 118 | } |
| 119 | if strings.HasPrefix(k, "test.monogon.dev/") { |
| 120 | return true |
| 121 | } |
| 122 | return false |
| Serge Bazanski | 6d1ff36 | 2024-09-30 15:15:31 +0000 | [diff] [blame] | 123 | }) |
| 124 | } |
| 125 | |
| 126 | // Nodes should have no labels at first. |
| 127 | for _, nid := range cluster.NodeIDs { |
| 128 | if labels := getLabelsForNode(nid); !labels.Equals(nil) { |
| 129 | t.Errorf("Node %s should have no labels, has %s", nid, labels) |
| 130 | } |
| 131 | } |
| 132 | // Nominate both nodes to be Kubernetes workers. |
| 133 | for _, nid := range cluster.NodeIDs { |
| 134 | yes := true |
| 135 | _, err := mgmt.UpdateNodeRoles(ctx, &apb.UpdateNodeRolesRequest{ |
| 136 | Node: &apb.UpdateNodeRolesRequest_Id{ |
| 137 | Id: nid, |
| 138 | }, |
| 139 | KubernetesWorker: &yes, |
| 140 | }) |
| 141 | if err != nil { |
| 142 | t.Fatalf("Could not make %s a KubernetesWorker: %v", nid, err) |
| 143 | } |
| 144 | } |
| 145 | |
| Jan Schär | 36f0375 | 2024-11-19 17:41:05 +0100 | [diff] [blame] | 146 | util.MustTestEventual(t, "Labels added", ctx, smallTestTimeout, func(ctx context.Context) error { |
| Serge Bazanski | 6d1ff36 | 2024-09-30 15:15:31 +0000 | [diff] [blame] | 147 | // Nodes should have role labels now. |
| 148 | for _, nid := range cluster.NodeIDs { |
| 149 | want := common.Labels{ |
| 150 | "node-role.kubernetes.io/KubernetesWorker": "", |
| 151 | } |
| 152 | if nid == cluster.NodeIDs[0] { |
| 153 | want["node-role.kubernetes.io/KubernetesController"] = "" |
| 154 | want["node-role.kubernetes.io/ConsensusMember"] = "" |
| 155 | } |
| 156 | if labels := getLabelsForNode(nid); !want.Equals(labels) { |
| 157 | return fmt.Errorf("node %s should have labels %s, has %s", nid, want, labels) |
| 158 | } |
| 159 | } |
| 160 | return nil |
| 161 | }) |
| 162 | |
| 163 | // Remove KubernetesWorker from first node again. It will stay in k8s (arguably, |
| 164 | // this is a bug) but its role label should be removed. |
| 165 | no := false |
| 166 | _, err = mgmt.UpdateNodeRoles(ctx, &apb.UpdateNodeRolesRequest{ |
| 167 | Node: &apb.UpdateNodeRolesRequest_Id{ |
| 168 | Id: cluster.NodeIDs[0], |
| 169 | }, |
| 170 | KubernetesWorker: &no, |
| 171 | }) |
| 172 | if err != nil { |
| 173 | t.Fatalf("Could not remove KubernetesWorker from %s: %v", cluster.NodeIDs[0], err) |
| 174 | } |
| 175 | |
| Jan Schär | 36f0375 | 2024-11-19 17:41:05 +0100 | [diff] [blame] | 176 | util.MustTestEventual(t, "Labels removed", ctx, smallTestTimeout, func(ctx context.Context) error { |
| Serge Bazanski | 6d1ff36 | 2024-09-30 15:15:31 +0000 | [diff] [blame] | 177 | for _, nid := range cluster.NodeIDs { |
| 178 | want := make(common.Labels) |
| 179 | if nid == cluster.NodeIDs[0] { |
| 180 | want["node-role.kubernetes.io/KubernetesController"] = "" |
| 181 | want["node-role.kubernetes.io/ConsensusMember"] = "" |
| 182 | } else { |
| 183 | want["node-role.kubernetes.io/KubernetesWorker"] = "" |
| 184 | } |
| 185 | if labels := getLabelsForNode(nid); !want.Equals(labels) { |
| 186 | return fmt.Errorf("node %s should have labels %s, has %s", nid, want, labels) |
| 187 | } |
| 188 | } |
| 189 | return nil |
| 190 | }) |
| Serge Bazanski | e99638e | 2024-09-30 17:06:44 +0000 | [diff] [blame] | 191 | |
| 192 | // Add Metropolis node label, ensure it gets reflected on the Kubernetes node. |
| 193 | _, err = mgmt.UpdateNodeLabels(ctx, &apb.UpdateNodeLabelsRequest{ |
| 194 | Node: &apb.UpdateNodeLabelsRequest_Id{ |
| 195 | Id: cluster.NodeIDs[1], |
| 196 | }, |
| 197 | Upsert: []*apb.UpdateNodeLabelsRequest_Pair{ |
| 198 | {Key: "test.monogon.dev/foo", Value: "bar"}, |
| 199 | }, |
| 200 | }) |
| 201 | |
| Jan Schär | 36f0375 | 2024-11-19 17:41:05 +0100 | [diff] [blame] | 202 | util.MustTestEventual(t, "Metropolis labels added", ctx, smallTestTimeout, func(ctx context.Context) error { |
| Serge Bazanski | e99638e | 2024-09-30 17:06:44 +0000 | [diff] [blame] | 203 | if err != nil { |
| 204 | t.Fatalf("Could not add label to node: %v", err) |
| 205 | } |
| 206 | want := common.Labels{ |
| 207 | "node-role.kubernetes.io/KubernetesWorker": "", |
| 208 | "test.monogon.dev/foo": "bar", |
| 209 | } |
| 210 | if labels := getLabelsForNode(cluster.NodeIDs[1]); !want.Equals(labels) { |
| Serge Bazanski | 1e39914 | 2024-10-22 10:58:15 +0000 | [diff] [blame] | 211 | return fmt.Errorf("node %s should have labels %s, has %s", cluster.NodeIDs[1], want, labels) |
| Serge Bazanski | e99638e | 2024-09-30 17:06:44 +0000 | [diff] [blame] | 212 | } |
| 213 | return nil |
| 214 | }) |
| Serge Bazanski | 1e39914 | 2024-10-22 10:58:15 +0000 | [diff] [blame] | 215 | |
| 216 | // Reconfigure node label rules. |
| 217 | _, err = mgmt.ConfigureCluster(ctx, &apb.ConfigureClusterRequest{ |
| 218 | BaseConfig: &cpb.ClusterConfiguration{ |
| Serge Bazanski | 7856760 | 2024-10-31 13:42:04 +0000 | [diff] [blame] | 219 | Kubernetes: &cpb.ClusterConfiguration_Kubernetes{ |
| 220 | NodeLabelsToSynchronize: []*cpb.ClusterConfiguration_Kubernetes_NodeLabelsToSynchronize{ |
| Serge Bazanski | 1e39914 | 2024-10-22 10:58:15 +0000 | [diff] [blame] | 221 | {Regexp: `^test\.monogon\.dev/`}, |
| 222 | }, |
| 223 | }, |
| 224 | }, |
| 225 | NewConfig: &cpb.ClusterConfiguration{ |
| Serge Bazanski | 7856760 | 2024-10-31 13:42:04 +0000 | [diff] [blame] | 226 | Kubernetes: &cpb.ClusterConfiguration_Kubernetes{}, |
| Serge Bazanski | 1e39914 | 2024-10-22 10:58:15 +0000 | [diff] [blame] | 227 | }, |
| 228 | UpdateMask: &fieldmaskpb.FieldMask{ |
| Serge Bazanski | 7856760 | 2024-10-31 13:42:04 +0000 | [diff] [blame] | 229 | Paths: []string{"kubernetes.node_labels_to_synchronize"}, |
| Serge Bazanski | 1e39914 | 2024-10-22 10:58:15 +0000 | [diff] [blame] | 230 | }, |
| 231 | }) |
| 232 | if err != nil { |
| 233 | t.Fatalf("Could not update cluster configuration: %v", err) |
| 234 | } |
| 235 | |
| 236 | ci, err := mgmt.GetClusterInfo(ctx, &apb.GetClusterInfoRequest{}) |
| 237 | if err != nil { |
| 238 | t.Fatalf("Could not get cluster info") |
| 239 | } |
| 240 | // See if the config changed. |
| Serge Bazanski | 7856760 | 2024-10-31 13:42:04 +0000 | [diff] [blame] | 241 | if rules := ci.ClusterConfiguration.Kubernetes.NodeLabelsToSynchronize; len(rules) != 0 { |
| Serge Bazanski | 1e39914 | 2024-10-22 10:58:15 +0000 | [diff] [blame] | 242 | t.Fatalf("Wanted 0 label rules in config after reconfiguration, have %d: %v", len(rules), rules) |
| 243 | } |
| 244 | // TODO: ensure new rules get applied, but that will require watching the cluster |
| 245 | // config for changes in the labelmaker. |
| Serge Bazanski | 6d1ff36 | 2024-09-30 15:15:31 +0000 | [diff] [blame] | 246 | } |
| 247 | |
| Serge Bazanski | 99b0214 | 2024-04-17 16:33:28 +0200 | [diff] [blame] | 248 | // TestE2EKubernetes exercises the Kubernetes functionality of Metropolis. |
| 249 | // |
| 250 | // The tests are performed against an in-memory cluster. |
| 251 | func TestE2EKubernetes(t *testing.T) { |
| 252 | // Set a global timeout to make sure this terminates |
| 253 | ctx, cancel := context.WithTimeout(context.Background(), globalTestTimeout) |
| 254 | defer cancel() |
| 255 | |
| Tim Windelschmidt | 82e6af7 | 2024-07-23 00:05:42 +0000 | [diff] [blame] | 256 | df, err := os.ReadFile(xTestImagesManifestPath) |
| Serge Bazanski | 99b0214 | 2024-04-17 16:33:28 +0200 | [diff] [blame] | 257 | if err != nil { |
| 258 | t.Fatalf("Reading registry manifest failed: %v", err) |
| 259 | } |
| 260 | lr, err := localregistry.FromBazelManifest(df) |
| 261 | if err != nil { |
| 262 | t.Fatalf("Creating test image registry failed: %v", err) |
| 263 | } |
| 264 | |
| 265 | // Launch cluster. |
| Tim Windelschmidt | 9f21f53 | 2024-05-07 15:14:20 +0200 | [diff] [blame] | 266 | clusterOptions := mlaunch.ClusterOptions{ |
| Serge Bazanski | 99b0214 | 2024-04-17 16:33:28 +0200 | [diff] [blame] | 267 | NumNodes: 2, |
| 268 | LocalRegistry: lr, |
| Lorenz Brun | 732a884 | 2024-08-26 23:25:37 +0200 | [diff] [blame] | 269 | InitialClusterConfiguration: &cpb.ClusterConfiguration{ |
| Jan Schär | 39f4f5c | 2024-10-29 09:41:50 +0100 | [diff] [blame] | 270 | ClusterDomain: "cluster.test", |
| Lorenz Brun | 732a884 | 2024-08-26 23:25:37 +0200 | [diff] [blame] | 271 | TpmMode: cpb.ClusterConfiguration_TPM_MODE_DISABLED, |
| 272 | StorageSecurityPolicy: cpb.ClusterConfiguration_STORAGE_SECURITY_POLICY_NEEDS_INSECURE, |
| 273 | }, |
| Serge Bazanski | 99b0214 | 2024-04-17 16:33:28 +0200 | [diff] [blame] | 274 | } |
| Tim Windelschmidt | 9f21f53 | 2024-05-07 15:14:20 +0200 | [diff] [blame] | 275 | cluster, err := mlaunch.LaunchCluster(ctx, clusterOptions) |
| Serge Bazanski | 99b0214 | 2024-04-17 16:33:28 +0200 | [diff] [blame] | 276 | if err != nil { |
| 277 | t.Fatalf("LaunchCluster failed: %v", err) |
| 278 | } |
| 279 | defer func() { |
| 280 | err := cluster.Close() |
| 281 | if err != nil { |
| 282 | t.Fatalf("cluster Close failed: %v", err) |
| 283 | } |
| 284 | }() |
| 285 | |
| 286 | clientSet, err := cluster.GetKubeClientSet() |
| 287 | if err != nil { |
| 288 | t.Fatal(err) |
| 289 | } |
| 290 | util.TestEventual(t, "Add KubernetesWorker roles", ctx, smallTestTimeout, func(ctx context.Context) error { |
| 291 | // Make everything but the first node into KubernetesWorkers. |
| 292 | for i := 1; i < clusterOptions.NumNodes; i++ { |
| 293 | err := cluster.MakeKubernetesWorker(ctx, cluster.NodeIDs[i]) |
| 294 | if err != nil { |
| 295 | return util.Permanent(fmt.Errorf("MakeKubernetesWorker: %w", err)) |
| 296 | } |
| 297 | } |
| 298 | return nil |
| 299 | }) |
| 300 | util.TestEventual(t, "Node is registered and ready", ctx, largeTestTimeout, func(ctx context.Context) error { |
| 301 | nodes, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) |
| 302 | if err != nil { |
| 303 | return err |
| 304 | } |
| 305 | if len(nodes.Items) < 1 { |
| 306 | return errors.New("node not yet registered") |
| 307 | } |
| 308 | node := nodes.Items[0] |
| 309 | for _, cond := range node.Status.Conditions { |
| 310 | if cond.Type != corev1.NodeReady { |
| 311 | continue |
| 312 | } |
| 313 | if cond.Status != corev1.ConditionTrue { |
| 314 | return fmt.Errorf("node not ready: %v", cond.Message) |
| 315 | } |
| 316 | } |
| 317 | return nil |
| 318 | }) |
| 319 | util.TestEventual(t, "Simple deployment", ctx, largeTestTimeout, func(ctx context.Context) error { |
| 320 | _, err := clientSet.AppsV1().Deployments("default").Create(ctx, makeTestDeploymentSpec("test-deploy-1"), metav1.CreateOptions{}) |
| 321 | return err |
| 322 | }) |
| 323 | util.TestEventual(t, "Simple deployment is running", ctx, largeTestTimeout, func(ctx context.Context) error { |
| 324 | res, err := clientSet.CoreV1().Pods("default").List(ctx, metav1.ListOptions{LabelSelector: "name=test-deploy-1"}) |
| 325 | if err != nil { |
| 326 | return err |
| 327 | } |
| 328 | if len(res.Items) == 0 { |
| 329 | return errors.New("pod didn't get created") |
| 330 | } |
| 331 | pod := res.Items[0] |
| 332 | if podv1.IsPodAvailable(&pod, 1, metav1.NewTime(time.Now())) { |
| 333 | return nil |
| 334 | } |
| 335 | events, err := clientSet.CoreV1().Events("default").List(ctx, metav1.ListOptions{FieldSelector: fmt.Sprintf("involvedObject.name=%s,involvedObject.namespace=default", pod.Name)}) |
| 336 | if err != nil || len(events.Items) == 0 { |
| 337 | return fmt.Errorf("pod is not ready: %v", pod.Status.Phase) |
| 338 | } else { |
| 339 | return fmt.Errorf("pod is not ready: %v", events.Items[0].Message) |
| 340 | } |
| 341 | }) |
| 342 | util.TestEventual(t, "Simple deployment with gvisor", ctx, largeTestTimeout, func(ctx context.Context) error { |
| 343 | deployment := makeTestDeploymentSpec("test-deploy-2") |
| 344 | gvisorStr := "gvisor" |
| 345 | deployment.Spec.Template.Spec.RuntimeClassName = &gvisorStr |
| 346 | _, err := clientSet.AppsV1().Deployments("default").Create(ctx, deployment, metav1.CreateOptions{}) |
| 347 | return err |
| 348 | }) |
| 349 | util.TestEventual(t, "Simple deployment is running on gvisor", ctx, largeTestTimeout, func(ctx context.Context) error { |
| 350 | res, err := clientSet.CoreV1().Pods("default").List(ctx, metav1.ListOptions{LabelSelector: "name=test-deploy-2"}) |
| 351 | if err != nil { |
| 352 | return err |
| 353 | } |
| 354 | if len(res.Items) == 0 { |
| 355 | return errors.New("pod didn't get created") |
| 356 | } |
| 357 | pod := res.Items[0] |
| 358 | if podv1.IsPodAvailable(&pod, 1, metav1.NewTime(time.Now())) { |
| 359 | return nil |
| 360 | } |
| 361 | events, err := clientSet.CoreV1().Events("default").List(ctx, metav1.ListOptions{FieldSelector: fmt.Sprintf("involvedObject.name=%s,involvedObject.namespace=default", pod.Name)}) |
| 362 | if err != nil || len(events.Items) == 0 { |
| 363 | return fmt.Errorf("pod is not ready: %v", pod.Status.Phase) |
| 364 | } else { |
| 365 | var errorMsg strings.Builder |
| 366 | for _, msg := range events.Items { |
| 367 | errorMsg.WriteString(" | ") |
| 368 | errorMsg.WriteString(msg.Message) |
| 369 | } |
| Tim Windelschmidt | 5f1a7de | 2024-09-19 02:00:14 +0200 | [diff] [blame] | 370 | return fmt.Errorf("pod is not ready: %s", errorMsg.String()) |
| Serge Bazanski | 99b0214 | 2024-04-17 16:33:28 +0200 | [diff] [blame] | 371 | } |
| 372 | }) |
| Jan Schär | 652c2ad | 2024-11-19 17:40:50 +0100 | [diff] [blame] | 373 | util.TestEventual(t, "StatefulSet with PersistentVolume tests", ctx, smallTestTimeout, func(ctx context.Context) error { |
| 374 | _, err := clientSet.AppsV1().StatefulSets("default").Create(ctx, makeTestStatefulSet("test-statefulset-1"), metav1.CreateOptions{}) |
| Serge Bazanski | 99b0214 | 2024-04-17 16:33:28 +0200 | [diff] [blame] | 375 | return err |
| 376 | }) |
| Jan Schär | 652c2ad | 2024-11-19 17:40:50 +0100 | [diff] [blame] | 377 | util.TestEventual(t, "StatefulSet with PersistentVolume tests successful", ctx, smallTestTimeout, func(ctx context.Context) error { |
| Serge Bazanski | 99b0214 | 2024-04-17 16:33:28 +0200 | [diff] [blame] | 378 | res, err := clientSet.CoreV1().Pods("default").List(ctx, metav1.ListOptions{LabelSelector: "name=test-statefulset-1"}) |
| 379 | if err != nil { |
| 380 | return err |
| 381 | } |
| 382 | if len(res.Items) == 0 { |
| 383 | return errors.New("pod didn't get created") |
| 384 | } |
| 385 | pod := res.Items[0] |
| Jan Schär | 652c2ad | 2024-11-19 17:40:50 +0100 | [diff] [blame] | 386 | lines, err := getPodLogLines(ctx, clientSet, pod.Name, 50) |
| Serge Bazanski | 99b0214 | 2024-04-17 16:33:28 +0200 | [diff] [blame] | 387 | if err != nil { |
| Jan Schär | 652c2ad | 2024-11-19 17:40:50 +0100 | [diff] [blame] | 388 | return fmt.Errorf("could not get logs: %w", err) |
| Serge Bazanski | 99b0214 | 2024-04-17 16:33:28 +0200 | [diff] [blame] | 389 | } |
| Jan Schär | 652c2ad | 2024-11-19 17:40:50 +0100 | [diff] [blame] | 390 | if len(lines) > 0 { |
| 391 | switch lines[len(lines)-1] { |
| 392 | case "[TESTS-PASSED]": |
| 393 | return nil |
| 394 | case "[TESTS-FAILED]": |
| 395 | return util.Permanent(fmt.Errorf("tests failed, log:\n %s", strings.Join(lines, "\n "))) |
| 396 | } |
| Serge Bazanski | 99b0214 | 2024-04-17 16:33:28 +0200 | [diff] [blame] | 397 | } |
| Jan Schär | 652c2ad | 2024-11-19 17:40:50 +0100 | [diff] [blame] | 398 | return fmt.Errorf("pod is not ready: %v, log:\n %s", pod.Status.Phase, strings.Join(lines, "\n ")) |
| Serge Bazanski | 99b0214 | 2024-04-17 16:33:28 +0200 | [diff] [blame] | 399 | }) |
| 400 | util.TestEventual(t, "In-cluster self-test job", ctx, smallTestTimeout, func(ctx context.Context) error { |
| 401 | _, err := clientSet.BatchV1().Jobs("default").Create(ctx, makeSelftestSpec("selftest"), metav1.CreateOptions{}) |
| 402 | return err |
| 403 | }) |
| 404 | util.TestEventual(t, "In-cluster self-test job passed", ctx, smallTestTimeout, func(ctx context.Context) error { |
| 405 | res, err := clientSet.BatchV1().Jobs("default").Get(ctx, "selftest", metav1.GetOptions{}) |
| 406 | if err != nil { |
| 407 | return err |
| 408 | } |
| 409 | if res.Status.Failed > 0 { |
| 410 | pods, err := clientSet.CoreV1().Pods("default").List(ctx, metav1.ListOptions{ |
| 411 | LabelSelector: "job-name=selftest", |
| 412 | }) |
| 413 | if err != nil { |
| 414 | return util.Permanent(fmt.Errorf("job failed but failed to find pod: %w", err)) |
| 415 | } |
| 416 | if len(pods.Items) < 1 { |
| 417 | return fmt.Errorf("job failed but pod does not exist") |
| 418 | } |
| 419 | lines, err := getPodLogLines(ctx, clientSet, pods.Items[0].Name, 1) |
| 420 | if err != nil { |
| 421 | return fmt.Errorf("job failed but could not get logs: %w", err) |
| 422 | } |
| 423 | if len(lines) > 0 { |
| 424 | return util.Permanent(fmt.Errorf("job failed, last log line: %s", lines[0])) |
| 425 | } |
| 426 | return util.Permanent(fmt.Errorf("job failed, empty log")) |
| 427 | } |
| 428 | if res.Status.Succeeded > 0 { |
| 429 | return nil |
| 430 | } |
| 431 | return fmt.Errorf("job still running") |
| 432 | }) |
| 433 | util.TestEventual(t, "Start NodePort test setup", ctx, smallTestTimeout, func(ctx context.Context) error { |
| 434 | _, err := clientSet.AppsV1().Deployments("default").Create(ctx, makeHTTPServerDeploymentSpec("nodeport-server"), metav1.CreateOptions{}) |
| 435 | if err != nil && !kerrors.IsAlreadyExists(err) { |
| 436 | return err |
| 437 | } |
| 438 | _, err = clientSet.CoreV1().Services("default").Create(ctx, makeHTTPServerNodePortService("nodeport-server"), metav1.CreateOptions{}) |
| 439 | if err != nil && !kerrors.IsAlreadyExists(err) { |
| 440 | return err |
| 441 | } |
| 442 | return nil |
| 443 | }) |
| 444 | util.TestEventual(t, "NodePort accessible from all nodes", ctx, smallTestTimeout, func(ctx context.Context) error { |
| 445 | nodes, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) |
| 446 | if err != nil { |
| 447 | return err |
| 448 | } |
| 449 | // Use a new client for each attempt |
| 450 | hc := http.Client{ |
| 451 | Timeout: 2 * time.Second, |
| 452 | Transport: &http.Transport{ |
| 453 | Dial: cluster.SOCKSDialer.Dial, |
| 454 | }, |
| 455 | } |
| 456 | for _, n := range nodes.Items { |
| 457 | var addr string |
| 458 | for _, a := range n.Status.Addresses { |
| 459 | if a.Type == corev1.NodeInternalIP { |
| 460 | addr = a.Address |
| 461 | } |
| 462 | } |
| 463 | u := url.URL{Scheme: "http", Host: addr, Path: "/"} |
| 464 | res, err := hc.Get(u.String()) |
| 465 | if err != nil { |
| 466 | return fmt.Errorf("failed getting from node %q: %w", n.Name, err) |
| 467 | } |
| 468 | if res.StatusCode != http.StatusOK { |
| 469 | return fmt.Errorf("getting from node %q: HTTP %d", n.Name, res.StatusCode) |
| 470 | } |
| 471 | t.Logf("Got response from %q", n.Name) |
| 472 | } |
| 473 | return nil |
| 474 | }) |
| 475 | util.TestEventual(t, "containerd metrics retrieved", ctx, smallTestTimeout, func(ctx context.Context) error { |
| 476 | pool := x509.NewCertPool() |
| 477 | pool.AddCert(cluster.CACertificate) |
| 478 | cl := http.Client{ |
| 479 | Transport: &http.Transport{ |
| 480 | TLSClientConfig: &tls.Config{ |
| 481 | Certificates: []tls.Certificate{cluster.Owner}, |
| 482 | RootCAs: pool, |
| 483 | }, |
| 484 | DialContext: func(ctx context.Context, _, addr string) (net.Conn, error) { |
| 485 | return cluster.DialNode(ctx, addr) |
| 486 | }, |
| 487 | }, |
| 488 | } |
| 489 | u := url.URL{ |
| 490 | Scheme: "https", |
| 491 | Host: net.JoinHostPort(cluster.NodeIDs[1], common.MetricsPort.PortString()), |
| 492 | Path: "/metrics/containerd", |
| 493 | } |
| 494 | res, err := cl.Get(u.String()) |
| 495 | if err != nil { |
| 496 | return err |
| 497 | } |
| 498 | defer res.Body.Close() |
| 499 | if res.StatusCode != 200 { |
| 500 | return fmt.Errorf("status code %d", res.StatusCode) |
| 501 | } |
| 502 | |
| 503 | body, err := io.ReadAll(res.Body) |
| 504 | if err != nil { |
| 505 | return err |
| 506 | } |
| 507 | needle := "containerd_build_info_total" |
| 508 | if !strings.Contains(string(body), needle) { |
| 509 | return util.Permanent(fmt.Errorf("could not find %q in returned response", needle)) |
| 510 | } |
| 511 | return nil |
| 512 | }) |
| 513 | if os.Getenv("HAVE_NESTED_KVM") != "" { |
| 514 | util.TestEventual(t, "Pod for KVM/QEMU smoke test", ctx, smallTestTimeout, func(ctx context.Context) error { |
| 515 | runcRuntimeClass := "runc" |
| 516 | _, err := clientSet.CoreV1().Pods("default").Create(ctx, &corev1.Pod{ |
| 517 | ObjectMeta: metav1.ObjectMeta{ |
| 518 | Name: "vm-smoketest", |
| 519 | }, |
| 520 | Spec: corev1.PodSpec{ |
| 521 | Containers: []corev1.Container{{ |
| 522 | Name: "vm-smoketest", |
| 523 | ImagePullPolicy: corev1.PullNever, |
| 524 | Image: "test.monogon.internal/metropolis/vm/smoketest:smoketest_container", |
| 525 | Resources: corev1.ResourceRequirements{ |
| 526 | Limits: corev1.ResourceList{ |
| 527 | "devices.monogon.dev/kvm": *resource.NewQuantity(1, ""), |
| 528 | }, |
| 529 | }, |
| 530 | }}, |
| 531 | RuntimeClassName: &runcRuntimeClass, |
| 532 | RestartPolicy: corev1.RestartPolicyNever, |
| 533 | }, |
| 534 | }, metav1.CreateOptions{}) |
| 535 | return err |
| 536 | }) |
| 537 | util.TestEventual(t, "KVM/QEMU smoke test completion", ctx, smallTestTimeout, func(ctx context.Context) error { |
| 538 | pod, err := clientSet.CoreV1().Pods("default").Get(ctx, "vm-smoketest", metav1.GetOptions{}) |
| 539 | if err != nil { |
| 540 | return fmt.Errorf("failed to get pod: %w", err) |
| 541 | } |
| 542 | if pod.Status.Phase == corev1.PodSucceeded { |
| 543 | return nil |
| 544 | } |
| 545 | events, err := clientSet.CoreV1().Events("default").List(ctx, metav1.ListOptions{FieldSelector: fmt.Sprintf("involvedObject.name=%s,involvedObject.namespace=default", pod.Name)}) |
| 546 | if err != nil || len(events.Items) == 0 { |
| 547 | return fmt.Errorf("pod is not ready: %v", pod.Status.Phase) |
| 548 | } else { |
| 549 | return fmt.Errorf("pod is not ready: %v", events.Items[len(events.Items)-1].Message) |
| 550 | } |
| 551 | }) |
| 552 | } |
| 553 | } |