blob: d4b822702ac32f0662f35ee93ee90a86145106ed [file] [log] [blame]
Lorenz Brunfc5dbc62020-05-28 12:18:07 +02001// Copyright 2020 The Monogon Project Authors.
2//
3// SPDX-License-Identifier: Apache-2.0
4//
5// Licensed under the Apache License, Version 2.0 (the "License");
6// you may not use this file except in compliance with the License.
7// You may obtain a copy of the License at
8//
9// http://www.apache.org/licenses/LICENSE-2.0
10//
11// Unless required by applicable law or agreed to in writing, software
12// distributed under the License is distributed on an "AS IS" BASIS,
13// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14// See the License for the specific language governing permissions and
15// limitations under the License.
16
17package e2e
18
19import (
20 "context"
Serge Bazanski54e212a2023-06-14 13:45:11 +020021 "crypto/tls"
22 "crypto/x509"
Lorenz Brunfc5dbc62020-05-28 12:18:07 +020023 "errors"
24 "fmt"
Serge Bazanski2cfafc92023-03-21 16:42:47 +010025 "io"
Leopold Schabele28e6d72020-06-03 11:39:25 +020026 "net"
Lorenz Brunfc5dbc62020-05-28 12:18:07 +020027 "net/http"
28 _ "net/http"
29 _ "net/http/pprof"
Serge Bazanski54e212a2023-06-14 13:45:11 +020030 "net/url"
Lorenz Brun3ff5af32020-06-24 16:34:11 +020031 "os"
Lorenz Brun5e4fc2d2020-09-22 18:35:15 +020032 "strings"
Lorenz Brunfc5dbc62020-05-28 12:18:07 +020033 "testing"
34 "time"
35
Tim Windelschmidt2a1d1b22024-02-06 07:07:42 +010036 "github.com/bazelbuild/rules_go/go/runfiles"
Serge Bazanskibe742842022-04-04 13:18:50 +020037 "google.golang.org/grpc"
Lorenz Brunfc5dbc62020-05-28 12:18:07 +020038 corev1 "k8s.io/api/core/v1"
Lorenz Brun276a7462023-07-12 21:28:54 +020039 kerrors "k8s.io/apimachinery/pkg/api/errors"
Lorenz Brun30167f52021-03-17 17:49:01 +010040 "k8s.io/apimachinery/pkg/api/resource"
Lorenz Brunfc5dbc62020-05-28 12:18:07 +020041 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
42 podv1 "k8s.io/kubernetes/pkg/api/v1/pod"
43
Tim Windelschmidt2a1d1b22024-02-06 07:07:42 +010044 apb "source.monogon.dev/metropolis/proto/api"
45
Serge Bazanski31370b02021-01-07 16:31:14 +010046 common "source.monogon.dev/metropolis/node"
Serge Bazanskibe742842022-04-04 13:18:50 +020047 "source.monogon.dev/metropolis/node/core/rpc"
Lorenz Brun150f24a2023-07-13 20:11:06 +020048 "source.monogon.dev/metropolis/pkg/localregistry"
Serge Bazanski05f813b2023-03-16 17:58:39 +010049 "source.monogon.dev/metropolis/test/launch"
Serge Bazanski66e58952021-10-05 17:06:56 +020050 "source.monogon.dev/metropolis/test/launch/cluster"
Mateusz Zalegaddf19b42022-06-22 12:27:37 +020051 "source.monogon.dev/metropolis/test/util"
Lorenz Brunfc5dbc62020-05-28 12:18:07 +020052)
53
Leopold Schabeld603f842020-06-09 17:48:09 +020054const (
55 // Timeout for the global test context.
56 //
Serge Bazanski216fe7b2021-05-21 18:36:16 +020057 // Bazel would eventually time out the test after 900s ("large") if, for
58 // some reason, the context cancellation fails to abort it.
Leopold Schabeld603f842020-06-09 17:48:09 +020059 globalTestTimeout = 600 * time.Second
60
61 // Timeouts for individual end-to-end tests of different sizes.
Serge Bazanski1ebd1e12020-07-13 19:17:16 +020062 smallTestTimeout = 60 * time.Second
Leopold Schabeld603f842020-06-09 17:48:09 +020063 largeTestTimeout = 120 * time.Second
64)
65
Serge Bazanskia0bc6d32023-06-28 18:57:40 +020066// TestE2ECore exercisees the core functionality of Metropolis: maintaining a
67// control plane, changing node roles, ...
68//
69// The tests are performed against an in-memory cluster.
70func TestE2ECore(t *testing.T) {
Lorenz Brunfc5dbc62020-05-28 12:18:07 +020071 // Set a global timeout to make sure this terminates
Leopold Schabeld603f842020-06-09 17:48:09 +020072 ctx, cancel := context.WithTimeout(context.Background(), globalTestTimeout)
Serge Bazanski1f9a03b2021-08-17 13:40:53 +020073 defer cancel()
Serge Bazanski66e58952021-10-05 17:06:56 +020074
Tim Windelschmidt2a1d1b22024-02-06 07:07:42 +010075 rPath, err := runfiles.Rlocation("_main/metropolis/test/e2e/testimages_manifest.prototxt")
76 if err != nil {
77 t.Fatalf("Resolving registry manifest failed: %v", err)
78 }
79 df, err := os.ReadFile(rPath)
80 if err != nil {
81 t.Fatalf("Reading registry manifest failed: %v", err)
82 }
83 lr, err := localregistry.FromBazelManifest(df)
Lorenz Brun150f24a2023-07-13 20:11:06 +020084 if err != nil {
85 t.Fatalf("Creating test image registry failed: %v", err)
86 }
Serge Bazanski66e58952021-10-05 17:06:56 +020087 // Launch cluster.
Serge Bazanskie78a0892021-10-07 17:03:49 +020088 clusterOptions := cluster.ClusterOptions{
Lorenz Brun150f24a2023-07-13 20:11:06 +020089 NumNodes: 2,
90 LocalRegistry: lr,
Serge Bazanskie78a0892021-10-07 17:03:49 +020091 }
92 cluster, err := cluster.LaunchCluster(ctx, clusterOptions)
Lorenz Brunfc5dbc62020-05-28 12:18:07 +020093 if err != nil {
Serge Bazanski66e58952021-10-05 17:06:56 +020094 t.Fatalf("LaunchCluster failed: %v", err)
Lorenz Brunfc5dbc62020-05-28 12:18:07 +020095 }
Serge Bazanski66e58952021-10-05 17:06:56 +020096 defer func() {
97 err := cluster.Close()
98 if err != nil {
99 t.Fatalf("cluster Close failed: %v", err)
Lorenz Brunfc5dbc62020-05-28 12:18:07 +0200100 }
101 }()
Serge Bazanski1f9a03b2021-08-17 13:40:53 +0200102
Serge Bazanski05f813b2023-03-16 17:58:39 +0100103 launch.Log("E2E: Cluster running, starting tests...")
Lorenz Brunfc5dbc62020-05-28 12:18:07 +0200104
Serge Bazanskibe742842022-04-04 13:18:50 +0200105 // Dial first node's curator.
Serge Bazanski8535cb52023-03-29 14:15:08 +0200106 creds := rpc.NewAuthenticatedCredentials(cluster.Owner, rpc.WantInsecure())
Serge Bazanskibe742842022-04-04 13:18:50 +0200107 remote := net.JoinHostPort(cluster.NodeIDs[0], common.CuratorServicePort.PortString())
108 cl, err := grpc.Dial(remote, grpc.WithContextDialer(cluster.DialNode), grpc.WithTransportCredentials(creds))
109 if err != nil {
110 t.Fatalf("failed to dial first node's curator: %v", err)
111 }
112 defer cl.Close()
113 mgmt := apb.NewManagementClient(cl)
114
Serge Bazanskia0bc6d32023-06-28 18:57:40 +0200115 util.TestEventual(t, "Retrieving cluster directory sucessful", ctx, 60*time.Second, func(ctx context.Context) error {
116 res, err := mgmt.GetClusterInfo(ctx, &apb.GetClusterInfoRequest{})
117 if err != nil {
118 return fmt.Errorf("GetClusterInfo: %w", err)
119 }
Serge Bazanskibf68fa92021-10-05 17:53:58 +0200120
Serge Bazanskia0bc6d32023-06-28 18:57:40 +0200121 // Ensure that the expected node count is present.
122 nodes := res.ClusterDirectory.Nodes
123 if want, got := clusterOptions.NumNodes, len(nodes); want != got {
124 return fmt.Errorf("wanted %d nodes in cluster directory, got %d", want, got)
125 }
Serge Bazanski6dff6d62022-01-28 18:15:14 +0100126
Serge Bazanskia0bc6d32023-06-28 18:57:40 +0200127 // Ensure the nodes have the expected addresses.
128 addresses := make(map[string]bool)
129 for _, n := range nodes {
130 if len(n.Addresses) != 1 {
Serge Bazanski538292d2024-04-17 14:50:02 +0200131 return fmt.Errorf("node %s has no addresss", n.Id)
Lorenz Brunfc5dbc62020-05-28 12:18:07 +0200132 }
Serge Bazanskia0bc6d32023-06-28 18:57:40 +0200133 address := n.Addresses[0].Host
134 addresses[address] = true
135 }
Serge Bazanski2cfafc92023-03-21 16:42:47 +0100136
Serge Bazanskia0bc6d32023-06-28 18:57:40 +0200137 for _, address := range []string{"10.1.0.2", "10.1.0.3"} {
138 if !addresses[address] {
139 return fmt.Errorf("address %q not found in directory", address)
Lorenz Brun30167f52021-03-17 17:49:01 +0100140 }
Serge Bazanskia0bc6d32023-06-28 18:57:40 +0200141 }
142 return nil
Lorenz Brunfc5dbc62020-05-28 12:18:07 +0200143 })
Serge Bazanskia0bc6d32023-06-28 18:57:40 +0200144 util.TestEventual(t, "Heartbeat test successful", ctx, 20*time.Second, cluster.AllNodesHealthy)
145 util.TestEventual(t, "Node rejoin successful", ctx, 60*time.Second, func(ctx context.Context) error {
146 // Ensure nodes rejoin the cluster after a reboot by reboting the 1st node.
147 if err := cluster.RebootNode(ctx, 1); err != nil {
148 return fmt.Errorf("while rebooting a node: %w", err)
149 }
150 return nil
151 })
152 util.TestEventual(t, "Heartbeat test successful", ctx, 20*time.Second, cluster.AllNodesHealthy)
153 util.TestEventual(t, "Prometheus node metrics retrieved", ctx, smallTestTimeout, func(ctx context.Context) error {
154 pool := x509.NewCertPool()
155 pool.AddCert(cluster.CACertificate)
156 cl := http.Client{
157 Transport: &http.Transport{
158 TLSClientConfig: &tls.Config{
159 Certificates: []tls.Certificate{cluster.Owner},
160 RootCAs: pool,
161 },
162 DialContext: func(ctx context.Context, _, addr string) (net.Conn, error) {
163 return cluster.DialNode(ctx, addr)
164 },
165 },
166 }
167 u := url.URL{
168 Scheme: "https",
169 Host: net.JoinHostPort(cluster.NodeIDs[0], common.MetricsPort.PortString()),
170 Path: "/metrics/node",
171 }
172 res, err := cl.Get(u.String())
173 if err != nil {
174 return err
175 }
176 defer res.Body.Close()
177 if res.StatusCode != 200 {
178 return fmt.Errorf("status code %d", res.StatusCode)
179 }
180
181 body, err := io.ReadAll(res.Body)
182 if err != nil {
183 return err
184 }
185 needle := "node_uname_info"
186 if !strings.Contains(string(body), needle) {
187 return util.Permanent(fmt.Errorf("could not find %q in returned response", needle))
188 }
189 return nil
190 })
191}
192
Serge Bazanski37cfcc12024-03-21 11:59:07 +0100193// TestE2ECoreHA exercises the basics of a high-availability control plane by
194// starting up a 3-node cluster, turning all nodes into ConsensusMembers, then
195// performing a rolling restart.
196func TestE2ECoreHA(t *testing.T) {
197 // Set a global timeout to make sure this terminates
198 ctx, cancel := context.WithTimeout(context.Background(), globalTestTimeout)
199 defer cancel()
200
201 rPath, err := runfiles.Rlocation("_main/metropolis/test/e2e/testimages_manifest.prototxt")
202 if err != nil {
203 t.Fatalf("Resolving registry manifest failed: %v", err)
204 }
205 df, err := os.ReadFile(rPath)
206 if err != nil {
207 t.Fatalf("Reading registry manifest failed: %v", err)
208 }
209 lr, err := localregistry.FromBazelManifest(df)
210 if err != nil {
211 t.Fatalf("Creating test image registry failed: %v", err)
212 }
213 // Launch cluster.
214 clusterOptions := cluster.ClusterOptions{
215 NumNodes: 3,
216 LocalRegistry: lr,
217 NodeLogsToFiles: true,
218 }
219 cluster, err := cluster.LaunchCluster(ctx, clusterOptions)
220 if err != nil {
221 t.Fatalf("LaunchCluster failed: %v", err)
222 }
223 defer func() {
224 err := cluster.Close()
225 if err != nil {
226 t.Fatalf("cluster Close failed: %v", err)
227 }
228 }()
229
230 launch.Log("E2E: Cluster running, starting tests...")
231
232 util.MustTestEventual(t, "Add ConsensusMember roles", ctx, smallTestTimeout, func(ctx context.Context) error {
233 // Make everything but the first node into ConsensusMember.
234 for i := 1; i < clusterOptions.NumNodes; i++ {
235 err := cluster.MakeConsensusMember(ctx, cluster.NodeIDs[i])
236 if err != nil {
237 return fmt.Errorf("MakeConsensusMember(%d/%s): %w", i, cluster.NodeIDs[i], err)
238 }
239 }
240 return nil
241 })
242 util.TestEventual(t, "Heartbeat test successful", ctx, 20*time.Second, cluster.AllNodesHealthy)
243
244 // Perform rolling restart of all nodes. When a node rejoins it must be able to
245 // contact the cluster, so this also exercises that the cluster is serving even
246 // with the node having rebooted.
247 for i := 0; i < clusterOptions.NumNodes; i++ {
248 util.MustTestEventual(t, fmt.Sprintf("Node %d rejoin successful", i), ctx, 60*time.Second, func(ctx context.Context) error {
249 // Ensure nodes rejoin the cluster after a reboot by reboting the 1st node.
250 if err := cluster.RebootNode(ctx, i); err != nil {
251 return fmt.Errorf("while rebooting a node: %w", err)
252 }
253 return nil
254 })
255 }
256}
257
258// TestE2EKubernetes exercises the Kubernetes functionality of Metropolis.
Serge Bazanskia0bc6d32023-06-28 18:57:40 +0200259//
260// The tests are performed against an in-memory cluster.
261func TestE2EKubernetes(t *testing.T) {
262 // Set a global timeout to make sure this terminates
263 ctx, cancel := context.WithTimeout(context.Background(), globalTestTimeout)
264 defer cancel()
265
Tim Windelschmidt2a1d1b22024-02-06 07:07:42 +0100266 rPath, err := runfiles.Rlocation("_main/metropolis/test/e2e/testimages_manifest.prototxt")
267 if err != nil {
268 t.Fatalf("Resolving registry manifest failed: %v", err)
269 }
270 df, err := os.ReadFile(rPath)
271 if err != nil {
272 t.Fatalf("Reading registry manifest failed: %v", err)
273 }
274 lr, err := localregistry.FromBazelManifest(df)
Lorenz Brun150f24a2023-07-13 20:11:06 +0200275 if err != nil {
276 t.Fatalf("Creating test image registry failed: %v", err)
277 }
278
Serge Bazanskia0bc6d32023-06-28 18:57:40 +0200279 // Launch cluster.
280 clusterOptions := cluster.ClusterOptions{
Lorenz Brun150f24a2023-07-13 20:11:06 +0200281 NumNodes: 2,
282 LocalRegistry: lr,
Serge Bazanskia0bc6d32023-06-28 18:57:40 +0200283 }
284 cluster, err := cluster.LaunchCluster(ctx, clusterOptions)
285 if err != nil {
286 t.Fatalf("LaunchCluster failed: %v", err)
287 }
288 defer func() {
289 err := cluster.Close()
290 if err != nil {
291 t.Fatalf("cluster Close failed: %v", err)
292 }
293 }()
294
295 clientSet, err := cluster.GetKubeClientSet()
296 if err != nil {
297 t.Fatal(err)
298 }
299 util.TestEventual(t, "Add KubernetesWorker roles", ctx, smallTestTimeout, func(ctx context.Context) error {
300 // Make everything but the first node into KubernetesWorkers.
301 for i := 1; i < clusterOptions.NumNodes; i++ {
302 err := cluster.MakeKubernetesWorker(ctx, cluster.NodeIDs[i])
303 if err != nil {
304 return util.Permanent(fmt.Errorf("MakeKubernetesWorker: %w", err))
305 }
306 }
307 return nil
308 })
309 util.TestEventual(t, "Node is registered and ready", ctx, largeTestTimeout, func(ctx context.Context) error {
310 nodes, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
311 if err != nil {
312 return err
313 }
314 if len(nodes.Items) < 1 {
315 return errors.New("node not yet registered")
316 }
317 node := nodes.Items[0]
318 for _, cond := range node.Status.Conditions {
319 if cond.Type != corev1.NodeReady {
320 continue
321 }
322 if cond.Status != corev1.ConditionTrue {
323 return fmt.Errorf("node not ready: %v", cond.Message)
324 }
325 }
326 return nil
327 })
328 util.TestEventual(t, "Simple deployment", ctx, largeTestTimeout, func(ctx context.Context) error {
329 _, err := clientSet.AppsV1().Deployments("default").Create(ctx, makeTestDeploymentSpec("test-deploy-1"), metav1.CreateOptions{})
330 return err
331 })
332 util.TestEventual(t, "Simple deployment is running", ctx, largeTestTimeout, func(ctx context.Context) error {
333 res, err := clientSet.CoreV1().Pods("default").List(ctx, metav1.ListOptions{LabelSelector: "name=test-deploy-1"})
334 if err != nil {
335 return err
336 }
337 if len(res.Items) == 0 {
338 return errors.New("pod didn't get created")
339 }
340 pod := res.Items[0]
341 if podv1.IsPodAvailable(&pod, 1, metav1.NewTime(time.Now())) {
342 return nil
343 }
344 events, err := clientSet.CoreV1().Events("default").List(ctx, metav1.ListOptions{FieldSelector: fmt.Sprintf("involvedObject.name=%s,involvedObject.namespace=default", pod.Name)})
345 if err != nil || len(events.Items) == 0 {
346 return fmt.Errorf("pod is not ready: %v", pod.Status.Phase)
347 } else {
348 return fmt.Errorf("pod is not ready: %v", events.Items[0].Message)
349 }
350 })
351 util.TestEventual(t, "Simple deployment with gvisor", ctx, largeTestTimeout, func(ctx context.Context) error {
352 deployment := makeTestDeploymentSpec("test-deploy-2")
353 gvisorStr := "gvisor"
354 deployment.Spec.Template.Spec.RuntimeClassName = &gvisorStr
355 _, err := clientSet.AppsV1().Deployments("default").Create(ctx, deployment, metav1.CreateOptions{})
356 return err
357 })
358 util.TestEventual(t, "Simple deployment is running on gvisor", ctx, largeTestTimeout, func(ctx context.Context) error {
359 res, err := clientSet.CoreV1().Pods("default").List(ctx, metav1.ListOptions{LabelSelector: "name=test-deploy-2"})
360 if err != nil {
361 return err
362 }
363 if len(res.Items) == 0 {
364 return errors.New("pod didn't get created")
365 }
366 pod := res.Items[0]
367 if podv1.IsPodAvailable(&pod, 1, metav1.NewTime(time.Now())) {
368 return nil
369 }
370 events, err := clientSet.CoreV1().Events("default").List(ctx, metav1.ListOptions{FieldSelector: fmt.Sprintf("involvedObject.name=%s,involvedObject.namespace=default", pod.Name)})
371 if err != nil || len(events.Items) == 0 {
372 return fmt.Errorf("pod is not ready: %v", pod.Status.Phase)
373 } else {
374 var errorMsg strings.Builder
375 for _, msg := range events.Items {
376 errorMsg.WriteString(" | ")
377 errorMsg.WriteString(msg.Message)
378 }
379 return fmt.Errorf("pod is not ready: %v", errorMsg.String())
380 }
381 })
382 util.TestEventual(t, "Simple StatefulSet with PVC", ctx, largeTestTimeout, func(ctx context.Context) error {
383 _, err := clientSet.AppsV1().StatefulSets("default").Create(ctx, makeTestStatefulSet("test-statefulset-1", corev1.PersistentVolumeFilesystem), metav1.CreateOptions{})
384 return err
385 })
386 util.TestEventual(t, "Simple StatefulSet with PVC is running", ctx, largeTestTimeout, func(ctx context.Context) error {
387 res, err := clientSet.CoreV1().Pods("default").List(ctx, metav1.ListOptions{LabelSelector: "name=test-statefulset-1"})
388 if err != nil {
389 return err
390 }
391 if len(res.Items) == 0 {
392 return errors.New("pod didn't get created")
393 }
394 pod := res.Items[0]
395 if podv1.IsPodAvailable(&pod, 1, metav1.NewTime(time.Now())) {
396 return nil
397 }
398 events, err := clientSet.CoreV1().Events("default").List(ctx, metav1.ListOptions{FieldSelector: fmt.Sprintf("involvedObject.name=%s,involvedObject.namespace=default", pod.Name)})
399 if err != nil || len(events.Items) == 0 {
400 return fmt.Errorf("pod is not ready: %v", pod.Status.Phase)
401 } else {
402 return fmt.Errorf("pod is not ready: %v", events.Items[0].Message)
403 }
404 })
405 util.TestEventual(t, "Simple StatefulSet with Block PVC", ctx, largeTestTimeout, func(ctx context.Context) error {
406 _, err := clientSet.AppsV1().StatefulSets("default").Create(ctx, makeTestStatefulSet("test-statefulset-2", corev1.PersistentVolumeBlock), metav1.CreateOptions{})
407 return err
408 })
409 util.TestEventual(t, "Simple StatefulSet with Block PVC is running", ctx, largeTestTimeout, func(ctx context.Context) error {
410 res, err := clientSet.CoreV1().Pods("default").List(ctx, metav1.ListOptions{LabelSelector: "name=test-statefulset-2"})
411 if err != nil {
412 return err
413 }
414 if len(res.Items) == 0 {
415 return errors.New("pod didn't get created")
416 }
417 pod := res.Items[0]
418 if podv1.IsPodAvailable(&pod, 1, metav1.NewTime(time.Now())) {
419 return nil
420 }
421 events, err := clientSet.CoreV1().Events("default").List(ctx, metav1.ListOptions{FieldSelector: fmt.Sprintf("involvedObject.name=%s,involvedObject.namespace=default", pod.Name)})
422 if err != nil || len(events.Items) == 0 {
423 return fmt.Errorf("pod is not ready: %v", pod.Status.Phase)
424 } else {
425 return fmt.Errorf("pod is not ready: %v", events.Items[0].Message)
426 }
427 })
428 util.TestEventual(t, "In-cluster self-test job", ctx, smallTestTimeout, func(ctx context.Context) error {
429 _, err := clientSet.BatchV1().Jobs("default").Create(ctx, makeSelftestSpec("selftest"), metav1.CreateOptions{})
430 return err
431 })
432 util.TestEventual(t, "In-cluster self-test job passed", ctx, smallTestTimeout, func(ctx context.Context) error {
433 res, err := clientSet.BatchV1().Jobs("default").Get(ctx, "selftest", metav1.GetOptions{})
434 if err != nil {
435 return err
436 }
437 if res.Status.Failed > 0 {
438 pods, err := clientSet.CoreV1().Pods("default").List(ctx, metav1.ListOptions{
439 LabelSelector: "job-name=selftest",
440 })
441 if err != nil {
442 return util.Permanent(fmt.Errorf("job failed but failed to find pod: %w", err))
443 }
444 if len(pods.Items) < 1 {
445 return fmt.Errorf("job failed but pod does not exist")
446 }
447 lines, err := getPodLogLines(ctx, clientSet, pods.Items[0].Name, 1)
448 if err != nil {
449 return fmt.Errorf("job failed but could not get logs: %w", err)
450 }
451 if len(lines) > 0 {
452 return util.Permanent(fmt.Errorf("job failed, last log line: %s", lines[0]))
453 }
454 return util.Permanent(fmt.Errorf("job failed, empty log"))
455 }
456 if res.Status.Succeeded > 0 {
457 return nil
458 }
459 return fmt.Errorf("job still running")
460 })
Lorenz Brun276a7462023-07-12 21:28:54 +0200461 util.TestEventual(t, "Start NodePort test setup", ctx, smallTestTimeout, func(ctx context.Context) error {
462 _, err := clientSet.AppsV1().Deployments("default").Create(ctx, makeHTTPServerDeploymentSpec("nodeport-server"), metav1.CreateOptions{})
463 if err != nil && !kerrors.IsAlreadyExists(err) {
464 return err
465 }
466 _, err = clientSet.CoreV1().Services("default").Create(ctx, makeHTTPServerNodePortService("nodeport-server"), metav1.CreateOptions{})
467 if err != nil && !kerrors.IsAlreadyExists(err) {
468 return err
469 }
470 return nil
471 })
472 util.TestEventual(t, "NodePort accessible from all nodes", ctx, smallTestTimeout, func(ctx context.Context) error {
473 nodes, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
474 if err != nil {
475 return err
476 }
477 // Use a new client for each attempt
478 hc := http.Client{
479 Timeout: 2 * time.Second,
480 Transport: &http.Transport{
481 Dial: cluster.SOCKSDialer.Dial,
482 },
483 }
484 for _, n := range nodes.Items {
485 var addr string
486 for _, a := range n.Status.Addresses {
487 if a.Type == corev1.NodeInternalIP {
488 addr = a.Address
489 }
490 }
491 u := url.URL{Scheme: "http", Host: addr, Path: "/"}
492 res, err := hc.Get(u.String())
493 if err != nil {
494 return fmt.Errorf("failed getting from node %q: %w", n.Name, err)
495 }
496 if res.StatusCode != http.StatusOK {
497 return fmt.Errorf("getting from node %q: HTTP %d", n.Name, res.StatusCode)
498 }
499 t.Logf("Got response from %q", n.Name)
500 }
501 return nil
502 })
Tim Windelschmidt3bdb5fc2024-03-14 18:47:35 +0100503 util.TestEventual(t, "containerd metrics retrieved", ctx, smallTestTimeout, func(ctx context.Context) error {
504 pool := x509.NewCertPool()
505 pool.AddCert(cluster.CACertificate)
506 cl := http.Client{
507 Transport: &http.Transport{
508 TLSClientConfig: &tls.Config{
509 Certificates: []tls.Certificate{cluster.Owner},
510 RootCAs: pool,
511 },
512 DialContext: func(ctx context.Context, _, addr string) (net.Conn, error) {
513 return cluster.DialNode(ctx, addr)
514 },
515 },
516 }
517 u := url.URL{
518 Scheme: "https",
519 Host: net.JoinHostPort(cluster.NodeIDs[1], common.MetricsPort.PortString()),
520 Path: "/metrics/containerd",
521 }
522 res, err := cl.Get(u.String())
523 if err != nil {
524 return err
525 }
526 defer res.Body.Close()
527 if res.StatusCode != 200 {
528 return fmt.Errorf("status code %d", res.StatusCode)
529 }
530
531 body, err := io.ReadAll(res.Body)
532 if err != nil {
533 return err
534 }
535 needle := "containerd_build_info_total"
536 if !strings.Contains(string(body), needle) {
537 return util.Permanent(fmt.Errorf("could not find %q in returned response", needle))
538 }
539 return nil
540 })
Serge Bazanskia0bc6d32023-06-28 18:57:40 +0200541 if os.Getenv("HAVE_NESTED_KVM") != "" {
542 util.TestEventual(t, "Pod for KVM/QEMU smoke test", ctx, smallTestTimeout, func(ctx context.Context) error {
543 runcRuntimeClass := "runc"
544 _, err := clientSet.CoreV1().Pods("default").Create(ctx, &corev1.Pod{
545 ObjectMeta: metav1.ObjectMeta{
546 Name: "vm-smoketest",
547 },
548 Spec: corev1.PodSpec{
549 Containers: []corev1.Container{{
550 Name: "vm-smoketest",
551 ImagePullPolicy: corev1.PullNever,
Lorenz Brun150f24a2023-07-13 20:11:06 +0200552 Image: "test.monogon.internal/metropolis/vm/smoketest:smoketest_container",
Serge Bazanskia0bc6d32023-06-28 18:57:40 +0200553 Resources: corev1.ResourceRequirements{
554 Limits: corev1.ResourceList{
555 "devices.monogon.dev/kvm": *resource.NewQuantity(1, ""),
556 },
557 },
558 }},
559 RuntimeClassName: &runcRuntimeClass,
560 RestartPolicy: corev1.RestartPolicyNever,
561 },
562 }, metav1.CreateOptions{})
563 return err
564 })
565 util.TestEventual(t, "KVM/QEMU smoke test completion", ctx, smallTestTimeout, func(ctx context.Context) error {
566 pod, err := clientSet.CoreV1().Pods("default").Get(ctx, "vm-smoketest", metav1.GetOptions{})
567 if err != nil {
568 return fmt.Errorf("failed to get pod: %w", err)
569 }
570 if pod.Status.Phase == corev1.PodSucceeded {
571 return nil
572 }
573 events, err := clientSet.CoreV1().Events("default").List(ctx, metav1.ListOptions{FieldSelector: fmt.Sprintf("involvedObject.name=%s,involvedObject.namespace=default", pod.Name)})
574 if err != nil || len(events.Items) == 0 {
575 return fmt.Errorf("pod is not ready: %v", pod.Status.Phase)
576 } else {
577 return fmt.Errorf("pod is not ready: %v", events.Items[len(events.Items)-1].Message)
578 }
579 })
580 }
Lorenz Brunfc5dbc62020-05-28 12:18:07 +0200581}