blob: b76125704a968f0410c0d9a1aad6e481a629406e [file] [log] [blame]
Lorenz Bruned0503c2020-07-28 17:21:25 +02001Copyright 2020 The Monogon Project Authors.
2
3Licensed under the Apache License, Version 2.0 (the "License");
4you may not use this file except in compliance with the License.
5You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9Unless required by applicable law or agreed to in writing, software
10distributed under the License is distributed on an "AS IS" BASIS,
11WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12See the License for the specific language governing permissions and
13limitations under the License.
14
15
Serge Bazanski19eb0002021-01-21 14:25:25 +010016From 65e40a970e3f33f44423653767c9ca8ff792bf70 Mon Sep 17 00:00:00 2001
Lorenz Bruned0503c2020-07-28 17:21:25 +020017From: Lorenz Brun <lorenz@nexantic.com>
18Date: Mon, 20 Jul 2020 16:50:56 +0200
19Subject: [PATCH] POC Make e2e test suite support providerless
20
21---
22 .../custom_metrics_stackdriver_autoscaling.go | 2 +
23 test/e2e/cloud/imports.go | 2 +
24 test/e2e/e2e.go | 10 -
25 test/e2e/e2e_providers.go | 32 +
26 .../framework/providers/gce/firewall_test.go | 2 +
Serge Bazanski19eb0002021-01-21 14:25:25 +010027 test/e2e/instrumentation/logging/imports.go | 2 +
Lorenz Bruned0503c2020-07-28 17:21:25 +020028 .../instrumentation/monitoring/accelerator.go | 2 +
29 .../monitoring/custom_metrics_deployments.go | 2 +
30 .../monitoring/custom_metrics_stackdriver.go | 4 +-
31 .../instrumentation/monitoring/stackdriver.go | 2 +
32 .../monitoring/stackdriver_metadata_agent.go | 4 +-
33 test/e2e/network/firewall.go | 2 +
34 test/e2e/network/ingress.go | 2 +
35 test/e2e/network/ingress_scale.go | 2 +
36 test/e2e/network/network_tiers.go | 2 +
37 test/e2e/network/scale/ingress.go | 2 +
38 .../network/scale/localrun/ingress_scale.go | 2 +-
Serge Bazanski19eb0002021-01-21 14:25:25 +010039 test/e2e/network/service.go | 955 -----------------
Lorenz Bruned0503c2020-07-28 17:21:25 +020040 test/e2e/network/service_providers.go | 980 ++++++++++++++++++
41 test/e2e/node/recreate_node.go | 2 +
42 test/e2e/scheduling/nvidia-gpus.go | 2 +
43 test/e2e/scheduling/ubernetes_lite_volumes.go | 2 +
44 test/e2e/storage/drivers/in_tree.go | 732 -------------
45 test/e2e/storage/drivers/in_tree_providers.go | 751 ++++++++++++++
46 test/e2e/storage/in_tree_volumes.go | 5 -
47 test/e2e/storage/in_tree_volumes_providers.go | 46 +
48 .../nfs_persistent_volume-disruptive.go | 2 +-
49 test/e2e/storage/pd.go | 2 +
Serge Bazanski19eb0002021-01-21 14:25:25 +010050 test/e2e/storage/persistent_volumes-gce.go | 2 +
Lorenz Bruned0503c2020-07-28 17:21:25 +020051 test/e2e/storage/regional_pd.go | 3 +
Serge Bazanski19eb0002021-01-21 14:25:25 +010052 test/e2e/storage/utils/BUILD | 3 -
53 test/e2e/storage/utils/ebs.go | 2 +
Lorenz Bruned0503c2020-07-28 17:21:25 +020054 test/e2e/storage/volume_provisioning.go | 527 ----------
55 .../storage/volume_provisioning_providers.go | 577 +++++++++++
56 test/e2e/upgrades/nvidia-gpu.go | 2 +
Serge Bazanski19eb0002021-01-21 14:25:25 +010057 35 files changed, 2435 insertions(+), 2236 deletions(-)
Lorenz Bruned0503c2020-07-28 17:21:25 +020058 create mode 100644 test/e2e/e2e_providers.go
59 create mode 100644 test/e2e/network/service_providers.go
60 create mode 100644 test/e2e/storage/drivers/in_tree_providers.go
61 create mode 100644 test/e2e/storage/in_tree_volumes_providers.go
62 create mode 100644 test/e2e/storage/volume_provisioning_providers.go
63
64diff --git a/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go b/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go
65index d3a7862d338..8bacec7fe1d 100644
66--- a/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go
67+++ b/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go
68@@ -1,3 +1,5 @@
69+// +build !providerless
70+
71 /*
72 Copyright 2017 The Kubernetes Authors.
73
74diff --git a/test/e2e/cloud/imports.go b/test/e2e/cloud/imports.go
75index 5aa1def97d1..382cb1a2264 100644
76--- a/test/e2e/cloud/imports.go
77+++ b/test/e2e/cloud/imports.go
78@@ -1,3 +1,5 @@
79+// +build !providerless
80+
81 /*
82 Copyright 2019 The Kubernetes Authors.
83
84diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go
85index d1e23325d69..f5717e417e7 100644
86--- a/test/e2e/e2e.go
87+++ b/test/e2e/e2e.go
88@@ -53,16 +53,6 @@ import (
89 utilnet "k8s.io/utils/net"
90
91 clientset "k8s.io/client-go/kubernetes"
92- // ensure auth plugins are loaded
93- _ "k8s.io/client-go/plugin/pkg/client/auth"
94-
95- // ensure that cloud providers are loaded
96- _ "k8s.io/kubernetes/test/e2e/framework/providers/aws"
97- _ "k8s.io/kubernetes/test/e2e/framework/providers/azure"
98- _ "k8s.io/kubernetes/test/e2e/framework/providers/gce"
99- _ "k8s.io/kubernetes/test/e2e/framework/providers/kubemark"
100- _ "k8s.io/kubernetes/test/e2e/framework/providers/openstack"
101- _ "k8s.io/kubernetes/test/e2e/framework/providers/vsphere"
102 )
103
104 const (
105diff --git a/test/e2e/e2e_providers.go b/test/e2e/e2e_providers.go
106new file mode 100644
107index 00000000000..cf96642b110
108--- /dev/null
109+++ b/test/e2e/e2e_providers.go
110@@ -0,0 +1,32 @@
111+// +build !providerless
112+
113+/*
114+Copyright 2020 The Kubernetes Authors.
115+
116+Licensed under the Apache License, Version 2.0 (the "License");
117+you may not use this file except in compliance with the License.
118+You may obtain a copy of the License at
119+
120+ http://www.apache.org/licenses/LICENSE-2.0
121+
122+Unless required by applicable law or agreed to in writing, software
123+distributed under the License is distributed on an "AS IS" BASIS,
124+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
125+See the License for the specific language governing permissions and
126+limitations under the License.
127+*/
128+
129+package e2e
130+
131+import (
132+ // ensure auth plugins are loaded
133+ _ "k8s.io/client-go/plugin/pkg/client/auth"
134+
135+ // ensure that cloud providers are loaded
136+ _ "k8s.io/kubernetes/test/e2e/framework/providers/aws"
137+ _ "k8s.io/kubernetes/test/e2e/framework/providers/azure"
138+ _ "k8s.io/kubernetes/test/e2e/framework/providers/gce"
139+ _ "k8s.io/kubernetes/test/e2e/framework/providers/kubemark"
140+ _ "k8s.io/kubernetes/test/e2e/framework/providers/openstack"
141+ _ "k8s.io/kubernetes/test/e2e/framework/providers/vsphere"
142+)
143diff --git a/test/e2e/framework/providers/gce/firewall_test.go b/test/e2e/framework/providers/gce/firewall_test.go
144index 647441dc962..2a92543a5a7 100644
145--- a/test/e2e/framework/providers/gce/firewall_test.go
146+++ b/test/e2e/framework/providers/gce/firewall_test.go
147@@ -1,3 +1,5 @@
148+// +build !providerless
149+
150 /*
151 Copyright 2018 The Kubernetes Authors.
152
Serge Bazanski19eb0002021-01-21 14:25:25 +0100153diff --git a/test/e2e/instrumentation/logging/imports.go b/test/e2e/instrumentation/logging/imports.go
154index 5dd66717db1..fc15c04bfef 100644
155--- a/test/e2e/instrumentation/logging/imports.go
156+++ b/test/e2e/instrumentation/logging/imports.go
157@@ -1,3 +1,5 @@
158+// +build !providerless
159+
160 /*
161 Copyright 2017 The Kubernetes Authors.
162
Lorenz Bruned0503c2020-07-28 17:21:25 +0200163diff --git a/test/e2e/instrumentation/monitoring/accelerator.go b/test/e2e/instrumentation/monitoring/accelerator.go
164index 90047e46ea1..6fa094e6a18 100644
165--- a/test/e2e/instrumentation/monitoring/accelerator.go
166+++ b/test/e2e/instrumentation/monitoring/accelerator.go
167@@ -1,3 +1,5 @@
168+// +build !providerless
169+
170 /*
171 Copyright 2017 The Kubernetes Authors.
172
173diff --git a/test/e2e/instrumentation/monitoring/custom_metrics_deployments.go b/test/e2e/instrumentation/monitoring/custom_metrics_deployments.go
174index de80b129315..8d96b93bf11 100644
175--- a/test/e2e/instrumentation/monitoring/custom_metrics_deployments.go
176+++ b/test/e2e/instrumentation/monitoring/custom_metrics_deployments.go
177@@ -1,3 +1,5 @@
178+// +build !providerless
179+
180 /*
181 Copyright 2017 The Kubernetes Authors.
182
183diff --git a/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go b/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go
184index 277b5a0ab24..ddbc3f20802 100644
185--- a/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go
186+++ b/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go
187@@ -1,3 +1,5 @@
188+// +build !providerless
189+
190 /*
191 Copyright 2017 The Kubernetes Authors.
192
193@@ -21,7 +23,7 @@ import (
194 "time"
195
196 gcm "google.golang.org/api/monitoring/v3"
197- "k8s.io/api/core/v1"
198+ v1 "k8s.io/api/core/v1"
199 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
200 "k8s.io/apimachinery/pkg/labels"
201 "k8s.io/apimachinery/pkg/runtime/schema"
202diff --git a/test/e2e/instrumentation/monitoring/stackdriver.go b/test/e2e/instrumentation/monitoring/stackdriver.go
203index dbc5e51c20d..3db0120900b 100644
204--- a/test/e2e/instrumentation/monitoring/stackdriver.go
205+++ b/test/e2e/instrumentation/monitoring/stackdriver.go
206@@ -1,3 +1,5 @@
207+// +build !providerless
208+
209 /*
210 Copyright 2017 The Kubernetes Authors.
211
212diff --git a/test/e2e/instrumentation/monitoring/stackdriver_metadata_agent.go b/test/e2e/instrumentation/monitoring/stackdriver_metadata_agent.go
213index 321591344db..bad9be5b5bf 100644
214--- a/test/e2e/instrumentation/monitoring/stackdriver_metadata_agent.go
215+++ b/test/e2e/instrumentation/monitoring/stackdriver_metadata_agent.go
216@@ -1,3 +1,5 @@
217+// +build !providerless
218+
219 /*
220 Copyright 2017 The Kubernetes Authors.
221
222@@ -24,7 +26,7 @@ import (
223 "reflect"
224 "time"
225
226- "k8s.io/api/core/v1"
227+ v1 "k8s.io/api/core/v1"
228 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
229 clientset "k8s.io/client-go/kubernetes"
230 "k8s.io/kubernetes/test/e2e/framework"
231diff --git a/test/e2e/network/firewall.go b/test/e2e/network/firewall.go
232index f4200f5a30c..f8612ed75a9 100644
233--- a/test/e2e/network/firewall.go
234+++ b/test/e2e/network/firewall.go
235@@ -1,3 +1,5 @@
236+// +build !providerless
237+
238 /*
239 Copyright 2016 The Kubernetes Authors.
240
241diff --git a/test/e2e/network/ingress.go b/test/e2e/network/ingress.go
Serge Bazanski19eb0002021-01-21 14:25:25 +0100242index 6c3b09e41f2..8485f8ce50e 100644
Lorenz Bruned0503c2020-07-28 17:21:25 +0200243--- a/test/e2e/network/ingress.go
244+++ b/test/e2e/network/ingress.go
245@@ -1,3 +1,5 @@
246+// +build !providerless
247+
248 /*
249 Copyright 2015 The Kubernetes Authors.
250
251diff --git a/test/e2e/network/ingress_scale.go b/test/e2e/network/ingress_scale.go
252index 6cc8585b7b2..867c834868c 100644
253--- a/test/e2e/network/ingress_scale.go
254+++ b/test/e2e/network/ingress_scale.go
255@@ -1,3 +1,5 @@
256+// +build !providerless
257+
258 /*
259 Copyright 2018 The Kubernetes Authors.
260
261diff --git a/test/e2e/network/network_tiers.go b/test/e2e/network/network_tiers.go
262index 5ae68a5a1ee..f3ea1f72a6b 100644
263--- a/test/e2e/network/network_tiers.go
264+++ b/test/e2e/network/network_tiers.go
265@@ -1,3 +1,5 @@
266+// +build !providerless
267+
268 /*
269 Copyright 2017 The Kubernetes Authors.
270
271diff --git a/test/e2e/network/scale/ingress.go b/test/e2e/network/scale/ingress.go
272index 954296beb52..43ad9c9b618 100644
273--- a/test/e2e/network/scale/ingress.go
274+++ b/test/e2e/network/scale/ingress.go
275@@ -1,3 +1,5 @@
276+// +build !providerless
277+
278 /*
279 Copyright 2018 The Kubernetes Authors.
280
281diff --git a/test/e2e/network/scale/localrun/ingress_scale.go b/test/e2e/network/scale/localrun/ingress_scale.go
282index 2e2c39884da..5a27f5f4cb2 100644
283--- a/test/e2e/network/scale/localrun/ingress_scale.go
284+++ b/test/e2e/network/scale/localrun/ingress_scale.go
285@@ -27,7 +27,7 @@ import (
286
287 "k8s.io/klog/v2"
288
289- "k8s.io/api/core/v1"
290+ v1 "k8s.io/api/core/v1"
291 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
292 clientset "k8s.io/client-go/kubernetes"
293 "k8s.io/client-go/tools/clientcmd"
294diff --git a/test/e2e/network/service.go b/test/e2e/network/service.go
Serge Bazanski19eb0002021-01-21 14:25:25 +0100295index 35ac43001d2..b458347a9f9 100644
Lorenz Bruned0503c2020-07-28 17:21:25 +0200296--- a/test/e2e/network/service.go
297+++ b/test/e2e/network/service.go
Serge Bazanski19eb0002021-01-21 14:25:25 +0100298@@ -31,8 +31,6 @@ import (
Lorenz Bruned0503c2020-07-28 17:21:25 +0200299
300 utilnet "k8s.io/apimachinery/pkg/util/net"
301
302- compute "google.golang.org/api/compute/v1"
303-
Serge Bazanski19eb0002021-01-21 14:25:25 +0100304 "k8s.io/client-go/tools/cache"
305
Lorenz Bruned0503c2020-07-28 17:21:25 +0200306 appsv1 "k8s.io/api/apps/v1"
Serge Bazanski19eb0002021-01-21 14:25:25 +0100307@@ -52,11 +50,9 @@ import (
Lorenz Bruned0503c2020-07-28 17:21:25 +0200308 e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
309 e2eendpoints "k8s.io/kubernetes/test/e2e/framework/endpoints"
Serge Bazanski19eb0002021-01-21 14:25:25 +0100310 e2eendpointslice "k8s.io/kubernetes/test/e2e/framework/endpointslice"
Lorenz Bruned0503c2020-07-28 17:21:25 +0200311- e2ekubesystem "k8s.io/kubernetes/test/e2e/framework/kubesystem"
312 e2enetwork "k8s.io/kubernetes/test/e2e/framework/network"
313 e2enode "k8s.io/kubernetes/test/e2e/framework/node"
314 e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
315- "k8s.io/kubernetes/test/e2e/framework/providers/gce"
316 e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
317 e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
318 e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
Serge Bazanski19eb0002021-01-21 14:25:25 +0100319@@ -64,7 +60,6 @@ import (
Lorenz Bruned0503c2020-07-28 17:21:25 +0200320 "k8s.io/kubernetes/test/e2e/storage/utils"
321 testutils "k8s.io/kubernetes/test/utils"
322 imageutils "k8s.io/kubernetes/test/utils/image"
323- gcecloud "k8s.io/legacy-cloud-providers/gce"
324
325 "github.com/onsi/ginkgo"
326 "github.com/onsi/gomega"
Serge Bazanski19eb0002021-01-21 14:25:25 +0100327@@ -1242,375 +1237,6 @@ var _ = SIGDescribe("Services", func() {
Lorenz Bruned0503c2020-07-28 17:21:25 +0200328 framework.ExpectNoError(err)
329 })
330
331- // TODO: Get rid of [DisabledForLargeClusters] tag when issue #56138 is fixed.
332- ginkgo.It("should be able to change the type and ports of a service [Slow] [DisabledForLargeClusters]", func() {
333- // requires cloud load-balancer support
334- e2eskipper.SkipUnlessProviderIs("gce", "gke", "aws")
335-
336- loadBalancerSupportsUDP := !framework.ProviderIs("aws")
337-
338- loadBalancerLagTimeout := e2eservice.LoadBalancerLagTimeoutDefault
339- if framework.ProviderIs("aws") {
340- loadBalancerLagTimeout = e2eservice.LoadBalancerLagTimeoutAWS
341- }
342- loadBalancerCreateTimeout := e2eservice.GetServiceLoadBalancerCreationTimeout(cs)
343-
344- // This test is more monolithic than we'd like because LB turnup can be
345- // very slow, so we lumped all the tests into one LB lifecycle.
346-
347- serviceName := "mutability-test"
348- ns1 := f.Namespace.Name // LB1 in ns1 on TCP
349- framework.Logf("namespace for TCP test: %s", ns1)
350-
351- ginkgo.By("creating a second namespace")
352- namespacePtr, err := f.CreateNamespace("services", nil)
353- framework.ExpectNoError(err, "failed to create namespace")
354- ns2 := namespacePtr.Name // LB2 in ns2 on UDP
355- framework.Logf("namespace for UDP test: %s", ns2)
356-
357- nodeIP, err := e2enode.PickIP(cs) // for later
358- framework.ExpectNoError(err)
359-
360- // Test TCP and UDP Services. Services with the same name in different
361- // namespaces should get different node ports and load balancers.
362-
363- ginkgo.By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns1)
364- tcpJig := e2eservice.NewTestJig(cs, ns1, serviceName)
365- tcpService, err := tcpJig.CreateTCPService(nil)
366- framework.ExpectNoError(err)
367-
368- ginkgo.By("creating a UDP service " + serviceName + " with type=ClusterIP in namespace " + ns2)
369- udpJig := e2eservice.NewTestJig(cs, ns2, serviceName)
370- udpService, err := udpJig.CreateUDPService(nil)
371- framework.ExpectNoError(err)
372-
373- ginkgo.By("verifying that TCP and UDP use the same port")
374- if tcpService.Spec.Ports[0].Port != udpService.Spec.Ports[0].Port {
375- framework.Failf("expected to use the same port for TCP and UDP")
376- }
377- svcPort := int(tcpService.Spec.Ports[0].Port)
378- framework.Logf("service port (TCP and UDP): %d", svcPort)
379-
380- ginkgo.By("creating a pod to be part of the TCP service " + serviceName)
381- _, err = tcpJig.Run(nil)
382- framework.ExpectNoError(err)
383-
384- ginkgo.By("creating a pod to be part of the UDP service " + serviceName)
385- _, err = udpJig.Run(nil)
386- framework.ExpectNoError(err)
387-
388- // Change the services to NodePort.
389-
390- ginkgo.By("changing the TCP service to type=NodePort")
391- tcpService, err = tcpJig.UpdateService(func(s *v1.Service) {
392- s.Spec.Type = v1.ServiceTypeNodePort
393- })
394- framework.ExpectNoError(err)
395- tcpNodePort := int(tcpService.Spec.Ports[0].NodePort)
396- framework.Logf("TCP node port: %d", tcpNodePort)
397-
398- ginkgo.By("changing the UDP service to type=NodePort")
399- udpService, err = udpJig.UpdateService(func(s *v1.Service) {
400- s.Spec.Type = v1.ServiceTypeNodePort
401- })
402- framework.ExpectNoError(err)
403- udpNodePort := int(udpService.Spec.Ports[0].NodePort)
404- framework.Logf("UDP node port: %d", udpNodePort)
405-
406- ginkgo.By("hitting the TCP service's NodePort")
407- e2eservice.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
408-
409- ginkgo.By("hitting the UDP service's NodePort")
410- testReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout)
411-
412- // Change the services to LoadBalancer.
413-
414- // Here we test that LoadBalancers can receive static IP addresses. This isn't
415- // necessary, but is an additional feature this monolithic test checks.
416- requestedIP := ""
417- staticIPName := ""
418- if framework.ProviderIs("gce", "gke") {
419- ginkgo.By("creating a static load balancer IP")
420- staticIPName = fmt.Sprintf("e2e-external-lb-test-%s", framework.RunID)
421- gceCloud, err := gce.GetGCECloud()
422- framework.ExpectNoError(err, "failed to get GCE cloud provider")
423-
424- err = gceCloud.ReserveRegionAddress(&compute.Address{Name: staticIPName}, gceCloud.Region())
425- defer func() {
426- if staticIPName != "" {
427- // Release GCE static IP - this is not kube-managed and will not be automatically released.
428- if err := gceCloud.DeleteRegionAddress(staticIPName, gceCloud.Region()); err != nil {
429- framework.Logf("failed to release static IP %s: %v", staticIPName, err)
430- }
431- }
432- }()
433- framework.ExpectNoError(err, "failed to create region address: %s", staticIPName)
434- reservedAddr, err := gceCloud.GetRegionAddress(staticIPName, gceCloud.Region())
435- framework.ExpectNoError(err, "failed to get region address: %s", staticIPName)
436-
437- requestedIP = reservedAddr.Address
438- framework.Logf("Allocated static load balancer IP: %s", requestedIP)
439- }
440-
441- ginkgo.By("changing the TCP service to type=LoadBalancer")
442- tcpService, err = tcpJig.UpdateService(func(s *v1.Service) {
443- s.Spec.LoadBalancerIP = requestedIP // will be "" if not applicable
444- s.Spec.Type = v1.ServiceTypeLoadBalancer
445- })
446- framework.ExpectNoError(err)
447-
448- if loadBalancerSupportsUDP {
449- ginkgo.By("changing the UDP service to type=LoadBalancer")
450- udpService, err = udpJig.UpdateService(func(s *v1.Service) {
451- s.Spec.Type = v1.ServiceTypeLoadBalancer
452- })
453- framework.ExpectNoError(err)
454- }
455- serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(tcpService))
456- if loadBalancerSupportsUDP {
457- serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(udpService))
458- }
459-
460- ginkgo.By("waiting for the TCP service to have a load balancer")
461- // Wait for the load balancer to be created asynchronously
462- tcpService, err = tcpJig.WaitForLoadBalancer(loadBalancerCreateTimeout)
463- framework.ExpectNoError(err)
464- if int(tcpService.Spec.Ports[0].NodePort) != tcpNodePort {
465- framework.Failf("TCP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", tcpNodePort, tcpService.Spec.Ports[0].NodePort)
466- }
467- if requestedIP != "" && e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != requestedIP {
468- framework.Failf("unexpected TCP Status.LoadBalancer.Ingress (expected %s, got %s)", requestedIP, e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]))
469- }
470- tcpIngressIP := e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])
471- framework.Logf("TCP load balancer: %s", tcpIngressIP)
472-
473- if framework.ProviderIs("gce", "gke") {
474- // Do this as early as possible, which overrides the `defer` above.
475- // This is mostly out of fear of leaking the IP in a timeout case
476- // (as of this writing we're not 100% sure where the leaks are
477- // coming from, so this is first-aid rather than surgery).
478- ginkgo.By("demoting the static IP to ephemeral")
479- if staticIPName != "" {
480- gceCloud, err := gce.GetGCECloud()
481- framework.ExpectNoError(err, "failed to get GCE cloud provider")
482- // Deleting it after it is attached "demotes" it to an
483- // ephemeral IP, which can be auto-released.
484- if err := gceCloud.DeleteRegionAddress(staticIPName, gceCloud.Region()); err != nil {
485- framework.Failf("failed to release static IP %s: %v", staticIPName, err)
486- }
487- staticIPName = ""
488- }
489- }
490-
491- var udpIngressIP string
492- if loadBalancerSupportsUDP {
493- ginkgo.By("waiting for the UDP service to have a load balancer")
494- // 2nd one should be faster since they ran in parallel.
495- udpService, err = udpJig.WaitForLoadBalancer(loadBalancerCreateTimeout)
496- framework.ExpectNoError(err)
497- if int(udpService.Spec.Ports[0].NodePort) != udpNodePort {
498- framework.Failf("UDP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", udpNodePort, udpService.Spec.Ports[0].NodePort)
499- }
500- udpIngressIP = e2eservice.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0])
501- framework.Logf("UDP load balancer: %s", udpIngressIP)
502-
503- ginkgo.By("verifying that TCP and UDP use different load balancers")
504- if tcpIngressIP == udpIngressIP {
505- framework.Failf("Load balancers are not different: %s", e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]))
506- }
507- }
508-
509- ginkgo.By("hitting the TCP service's NodePort")
510- e2eservice.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
511-
512- ginkgo.By("hitting the UDP service's NodePort")
513- testReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout)
514-
515- ginkgo.By("hitting the TCP service's LoadBalancer")
516- e2eservice.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout)
517-
518- if loadBalancerSupportsUDP {
519- ginkgo.By("hitting the UDP service's LoadBalancer")
520- testReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout)
521- }
522-
523- // Change the services' node ports.
524-
525- ginkgo.By("changing the TCP service's NodePort")
526- tcpService, err = tcpJig.ChangeServiceNodePort(tcpNodePort)
527- framework.ExpectNoError(err)
528- tcpNodePortOld := tcpNodePort
529- tcpNodePort = int(tcpService.Spec.Ports[0].NodePort)
530- if tcpNodePort == tcpNodePortOld {
531- framework.Failf("TCP Spec.Ports[0].NodePort (%d) did not change", tcpNodePort)
532- }
533- if e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != tcpIngressIP {
534- framework.Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]))
535- }
536- framework.Logf("TCP node port: %d", tcpNodePort)
537-
538- ginkgo.By("changing the UDP service's NodePort")
539- udpService, err = udpJig.ChangeServiceNodePort(udpNodePort)
540- framework.ExpectNoError(err)
541- udpNodePortOld := udpNodePort
542- udpNodePort = int(udpService.Spec.Ports[0].NodePort)
543- if udpNodePort == udpNodePortOld {
544- framework.Failf("UDP Spec.Ports[0].NodePort (%d) did not change", udpNodePort)
545- }
546- if loadBalancerSupportsUDP && e2eservice.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) != udpIngressIP {
547- framework.Failf("UDP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", udpIngressIP, e2eservice.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]))
548- }
549- framework.Logf("UDP node port: %d", udpNodePort)
550-
551- ginkgo.By("hitting the TCP service's new NodePort")
552- e2eservice.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
553-
554- ginkgo.By("hitting the UDP service's new NodePort")
555- testReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout)
556-
557- ginkgo.By("checking the old TCP NodePort is closed")
558- testNotReachableHTTP(nodeIP, tcpNodePortOld, e2eservice.KubeProxyLagTimeout)
559-
560- ginkgo.By("checking the old UDP NodePort is closed")
561- testNotReachableUDP(nodeIP, udpNodePortOld, e2eservice.KubeProxyLagTimeout)
562-
563- ginkgo.By("hitting the TCP service's LoadBalancer")
564- e2eservice.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout)
565-
566- if loadBalancerSupportsUDP {
567- ginkgo.By("hitting the UDP service's LoadBalancer")
568- testReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout)
569- }
570-
571- // Change the services' main ports.
572-
573- ginkgo.By("changing the TCP service's port")
574- tcpService, err = tcpJig.UpdateService(func(s *v1.Service) {
575- s.Spec.Ports[0].Port++
576- })
577- framework.ExpectNoError(err)
578- svcPortOld := svcPort
579- svcPort = int(tcpService.Spec.Ports[0].Port)
580- if svcPort == svcPortOld {
581- framework.Failf("TCP Spec.Ports[0].Port (%d) did not change", svcPort)
582- }
583- if int(tcpService.Spec.Ports[0].NodePort) != tcpNodePort {
584- framework.Failf("TCP Spec.Ports[0].NodePort (%d) changed", tcpService.Spec.Ports[0].NodePort)
585- }
586- if e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != tcpIngressIP {
587- framework.Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]))
588- }
589-
590- ginkgo.By("changing the UDP service's port")
591- udpService, err = udpJig.UpdateService(func(s *v1.Service) {
592- s.Spec.Ports[0].Port++
593- })
594- framework.ExpectNoError(err)
595- if int(udpService.Spec.Ports[0].Port) != svcPort {
596- framework.Failf("UDP Spec.Ports[0].Port (%d) did not change", udpService.Spec.Ports[0].Port)
597- }
598- if int(udpService.Spec.Ports[0].NodePort) != udpNodePort {
599- framework.Failf("UDP Spec.Ports[0].NodePort (%d) changed", udpService.Spec.Ports[0].NodePort)
600- }
601- if loadBalancerSupportsUDP && e2eservice.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) != udpIngressIP {
602- framework.Failf("UDP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", udpIngressIP, e2eservice.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]))
603- }
604-
605- framework.Logf("service port (TCP and UDP): %d", svcPort)
606-
607- ginkgo.By("hitting the TCP service's NodePort")
608- e2eservice.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
609-
610- ginkgo.By("hitting the UDP service's NodePort")
611- testReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout)
612-
613- ginkgo.By("hitting the TCP service's LoadBalancer")
614- e2eservice.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout)
615-
616- if loadBalancerSupportsUDP {
617- ginkgo.By("hitting the UDP service's LoadBalancer")
618- testReachableUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout)
619- }
620-
621- ginkgo.By("Scaling the pods to 0")
622- err = tcpJig.Scale(0)
623- framework.ExpectNoError(err)
624- err = udpJig.Scale(0)
625- framework.ExpectNoError(err)
626-
627- ginkgo.By("looking for ICMP REJECT on the TCP service's NodePort")
628- testRejectedHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
629-
630- ginkgo.By("looking for ICMP REJECT on the UDP service's NodePort")
631- testRejectedUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout)
632-
633- ginkgo.By("looking for ICMP REJECT on the TCP service's LoadBalancer")
634- testRejectedHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout)
635-
636- if loadBalancerSupportsUDP {
637- ginkgo.By("looking for ICMP REJECT on the UDP service's LoadBalancer")
638- testRejectedUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout)
639- }
640-
641- ginkgo.By("Scaling the pods to 1")
642- err = tcpJig.Scale(1)
643- framework.ExpectNoError(err)
644- err = udpJig.Scale(1)
645- framework.ExpectNoError(err)
646-
647- ginkgo.By("hitting the TCP service's NodePort")
648- e2eservice.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
649-
650- ginkgo.By("hitting the UDP service's NodePort")
651- testReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout)
652-
653- ginkgo.By("hitting the TCP service's LoadBalancer")
654- e2eservice.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout)
655-
656- if loadBalancerSupportsUDP {
657- ginkgo.By("hitting the UDP service's LoadBalancer")
658- testReachableUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout)
659- }
660-
661- // Change the services back to ClusterIP.
662-
663- ginkgo.By("changing TCP service back to type=ClusterIP")
664- _, err = tcpJig.UpdateService(func(s *v1.Service) {
665- s.Spec.Type = v1.ServiceTypeClusterIP
666- s.Spec.Ports[0].NodePort = 0
667- })
668- framework.ExpectNoError(err)
669- // Wait for the load balancer to be destroyed asynchronously
670- _, err = tcpJig.WaitForLoadBalancerDestroy(tcpIngressIP, svcPort, loadBalancerCreateTimeout)
671- framework.ExpectNoError(err)
672-
673- ginkgo.By("changing UDP service back to type=ClusterIP")
674- _, err = udpJig.UpdateService(func(s *v1.Service) {
675- s.Spec.Type = v1.ServiceTypeClusterIP
676- s.Spec.Ports[0].NodePort = 0
677- })
678- framework.ExpectNoError(err)
679- if loadBalancerSupportsUDP {
680- // Wait for the load balancer to be destroyed asynchronously
681- _, err = udpJig.WaitForLoadBalancerDestroy(udpIngressIP, svcPort, loadBalancerCreateTimeout)
682- framework.ExpectNoError(err)
683- }
684-
685- ginkgo.By("checking the TCP NodePort is closed")
686- testNotReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
687-
688- ginkgo.By("checking the UDP NodePort is closed")
689- testNotReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout)
690-
691- ginkgo.By("checking the TCP LoadBalancer is closed")
692- testNotReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout)
693-
694- if loadBalancerSupportsUDP {
695- ginkgo.By("checking the UDP LoadBalancer is closed")
696- testNotReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout)
697- }
698- })
699-
700 /*
701 Testname: Service, update NodePort, same port different protocol
702 Description: Create a service to accept TCP requests. By default, created service MUST be of type ClusterIP and an ClusterIP MUST be assigned to the service.
Serge Bazanski19eb0002021-01-21 14:25:25 +0100703@@ -2253,199 +1879,6 @@ var _ = SIGDescribe("Services", func() {
Lorenz Bruned0503c2020-07-28 17:21:25 +0200704 checkReachabilityFromPod(true, normalReachabilityTimeout, namespace, dropPod.Name, svcIP)
705 })
706
707- ginkgo.It("should be able to create an internal type load balancer [Slow]", func() {
708- e2eskipper.SkipUnlessProviderIs("azure", "gke", "gce")
709-
710- createTimeout := e2eservice.GetServiceLoadBalancerCreationTimeout(cs)
711- pollInterval := framework.Poll * 10
712-
713- namespace := f.Namespace.Name
714- serviceName := "lb-internal"
715- jig := e2eservice.NewTestJig(cs, namespace, serviceName)
716-
717- ginkgo.By("creating pod to be part of service " + serviceName)
718- _, err := jig.Run(nil)
719- framework.ExpectNoError(err)
720-
721- enableILB, disableILB := enableAndDisableInternalLB()
722-
723- isInternalEndpoint := func(lbIngress *v1.LoadBalancerIngress) bool {
724- ingressEndpoint := e2eservice.GetIngressPoint(lbIngress)
725- // Needs update for providers using hostname as endpoint.
726- return strings.HasPrefix(ingressEndpoint, "10.")
727- }
728-
729- ginkgo.By("creating a service with type LoadBalancer and cloud specific Internal-LB annotation enabled")
730- svc, err := jig.CreateTCPService(func(svc *v1.Service) {
731- svc.Spec.Type = v1.ServiceTypeLoadBalancer
732- enableILB(svc)
733- })
734- framework.ExpectNoError(err)
735-
736- defer func() {
737- ginkgo.By("Clean up loadbalancer service")
738- e2eservice.WaitForServiceDeletedWithFinalizer(cs, svc.Namespace, svc.Name)
739- }()
740-
741- svc, err = jig.WaitForLoadBalancer(createTimeout)
742- framework.ExpectNoError(err)
743- lbIngress := &svc.Status.LoadBalancer.Ingress[0]
744- svcPort := int(svc.Spec.Ports[0].Port)
745- // should have an internal IP.
746- framework.ExpectEqual(isInternalEndpoint(lbIngress), true)
747-
748- // ILBs are not accessible from the test orchestrator, so it's necessary to use
749- // a pod to test the service.
750- ginkgo.By("hitting the internal load balancer from pod")
751- framework.Logf("creating pod with host network")
752- hostExec := launchHostExecPod(f.ClientSet, f.Namespace.Name, "ilb-host-exec")
753-
754- framework.Logf("Waiting up to %v for service %q's internal LB to respond to requests", createTimeout, serviceName)
755- tcpIngressIP := e2eservice.GetIngressPoint(lbIngress)
756- if pollErr := wait.PollImmediate(pollInterval, createTimeout, func() (bool, error) {
757- cmd := fmt.Sprintf(`curl -m 5 'http://%v:%v/echo?msg=hello'`, tcpIngressIP, svcPort)
758- stdout, err := framework.RunHostCmd(hostExec.Namespace, hostExec.Name, cmd)
759- if err != nil {
760- framework.Logf("error curling; stdout: %v. err: %v", stdout, err)
761- return false, nil
762- }
763-
764- if !strings.Contains(stdout, "hello") {
765- framework.Logf("Expected output to contain 'hello', got %q; retrying...", stdout)
766- return false, nil
767- }
768-
769- framework.Logf("Successful curl; stdout: %v", stdout)
770- return true, nil
771- }); pollErr != nil {
772- framework.Failf("ginkgo.Failed to hit ILB IP, err: %v", pollErr)
773- }
774-
775- ginkgo.By("switching to external type LoadBalancer")
776- svc, err = jig.UpdateService(func(svc *v1.Service) {
777- disableILB(svc)
778- })
779- framework.ExpectNoError(err)
780- framework.Logf("Waiting up to %v for service %q to have an external LoadBalancer", createTimeout, serviceName)
781- if pollErr := wait.PollImmediate(pollInterval, createTimeout, func() (bool, error) {
782- svc, err := cs.CoreV1().Services(namespace).Get(context.TODO(), serviceName, metav1.GetOptions{})
783- if err != nil {
784- return false, err
785- }
786- lbIngress = &svc.Status.LoadBalancer.Ingress[0]
787- return !isInternalEndpoint(lbIngress), nil
788- }); pollErr != nil {
789- framework.Failf("Loadbalancer IP not changed to external.")
790- }
791- // should have an external IP.
792- gomega.Expect(isInternalEndpoint(lbIngress)).To(gomega.BeFalse())
793-
794- ginkgo.By("hitting the external load balancer")
795- framework.Logf("Waiting up to %v for service %q's external LB to respond to requests", createTimeout, serviceName)
796- tcpIngressIP = e2eservice.GetIngressPoint(lbIngress)
797- e2eservice.TestReachableHTTP(tcpIngressIP, svcPort, e2eservice.LoadBalancerLagTimeoutDefault)
798-
799- // GCE cannot test a specific IP because the test may not own it. This cloud specific condition
800- // will be removed when GCP supports similar functionality.
801- if framework.ProviderIs("azure") {
802- ginkgo.By("switching back to interal type LoadBalancer, with static IP specified.")
803- internalStaticIP := "10.240.11.11"
804- svc, err = jig.UpdateService(func(svc *v1.Service) {
805- svc.Spec.LoadBalancerIP = internalStaticIP
806- enableILB(svc)
807- })
808- framework.ExpectNoError(err)
809- framework.Logf("Waiting up to %v for service %q to have an internal LoadBalancer", createTimeout, serviceName)
810- if pollErr := wait.PollImmediate(pollInterval, createTimeout, func() (bool, error) {
811- svc, err := cs.CoreV1().Services(namespace).Get(context.TODO(), serviceName, metav1.GetOptions{})
812- if err != nil {
813- return false, err
814- }
815- lbIngress = &svc.Status.LoadBalancer.Ingress[0]
816- return isInternalEndpoint(lbIngress), nil
817- }); pollErr != nil {
818- framework.Failf("Loadbalancer IP not changed to internal.")
819- }
820- // should have the given static internal IP.
821- framework.ExpectEqual(e2eservice.GetIngressPoint(lbIngress), internalStaticIP)
822- }
823- })
824-
825- // This test creates a load balancer, make sure its health check interval
826- // equals to gceHcCheckIntervalSeconds. Then the interval is manipulated
827- // to be something else, see if the interval will be reconciled.
828- ginkgo.It("should reconcile LB health check interval [Slow][Serial]", func() {
829- const gceHcCheckIntervalSeconds = int64(8)
830- // This test is for clusters on GCE.
831- // (It restarts kube-controller-manager, which we don't support on GKE)
832- e2eskipper.SkipUnlessProviderIs("gce")
833- e2eskipper.SkipUnlessSSHKeyPresent()
834-
835- clusterID, err := gce.GetClusterID(cs)
836- if err != nil {
837- framework.Failf("framework.GetClusterID(cs) = _, %v; want nil", err)
838- }
839- gceCloud, err := gce.GetGCECloud()
840- if err != nil {
841- framework.Failf("framework.GetGCECloud() = _, %v; want nil", err)
842- }
843-
844- namespace := f.Namespace.Name
845- serviceName := "lb-hc-int"
846- jig := e2eservice.NewTestJig(cs, namespace, serviceName)
847-
848- ginkgo.By("create load balancer service")
849- // Create loadbalancer service with source range from node[0] and podAccept
850- svc, err := jig.CreateTCPService(func(svc *v1.Service) {
851- svc.Spec.Type = v1.ServiceTypeLoadBalancer
852- })
853- framework.ExpectNoError(err)
854-
855- defer func() {
856- ginkgo.By("Clean up loadbalancer service")
857- e2eservice.WaitForServiceDeletedWithFinalizer(cs, svc.Namespace, svc.Name)
858- }()
859-
860- svc, err = jig.WaitForLoadBalancer(e2eservice.GetServiceLoadBalancerCreationTimeout(cs))
861- framework.ExpectNoError(err)
862-
863- hcName := gcecloud.MakeNodesHealthCheckName(clusterID)
864- hc, err := gceCloud.GetHTTPHealthCheck(hcName)
865- if err != nil {
866- framework.Failf("gceCloud.GetHttpHealthCheck(%q) = _, %v; want nil", hcName, err)
867- }
868- framework.ExpectEqual(hc.CheckIntervalSec, gceHcCheckIntervalSeconds)
869-
870- ginkgo.By("modify the health check interval")
871- hc.CheckIntervalSec = gceHcCheckIntervalSeconds - 1
872- if err = gceCloud.UpdateHTTPHealthCheck(hc); err != nil {
873- framework.Failf("gcecloud.UpdateHttpHealthCheck(%#v) = %v; want nil", hc, err)
874- }
875-
876- ginkgo.By("restart kube-controller-manager")
877- if err := e2ekubesystem.RestartControllerManager(); err != nil {
878- framework.Failf("e2ekubesystem.RestartControllerManager() = %v; want nil", err)
879- }
880- if err := e2ekubesystem.WaitForControllerManagerUp(); err != nil {
881- framework.Failf("e2ekubesystem.WaitForControllerManagerUp() = %v; want nil", err)
882- }
883-
884- ginkgo.By("health check should be reconciled")
885- pollInterval := framework.Poll * 10
886- loadBalancerPropagationTimeout := e2eservice.GetServiceLoadBalancerPropagationTimeout(cs)
887- if pollErr := wait.PollImmediate(pollInterval, loadBalancerPropagationTimeout, func() (bool, error) {
888- hc, err := gceCloud.GetHTTPHealthCheck(hcName)
889- if err != nil {
890- framework.Logf("ginkgo.Failed to get HttpHealthCheck(%q): %v", hcName, err)
891- return false, err
892- }
893- framework.Logf("hc.CheckIntervalSec = %v", hc.CheckIntervalSec)
894- return hc.CheckIntervalSec == gceHcCheckIntervalSeconds, nil
895- }); pollErr != nil {
896- framework.Failf("Health check %q does not reconcile its check interval to %d.", hcName, gceHcCheckIntervalSeconds)
897- }
898- })
899-
900 /*
901 Release: v1.19
902 Testname: Service, ClusterIP type, session affinity to ClientIP
Serge Bazanski19eb0002021-01-21 14:25:25 +0100903@@ -2999,394 +2432,6 @@ var _ = SIGDescribe("Services", func() {
Lorenz Bruned0503c2020-07-28 17:21:25 +0200904 })
905 })
906
907-var _ = SIGDescribe("ESIPP [Slow]", func() {
908- f := framework.NewDefaultFramework("esipp")
909- var loadBalancerCreateTimeout time.Duration
910-
911- var cs clientset.Interface
912- serviceLBNames := []string{}
913-
914- ginkgo.BeforeEach(func() {
915- // requires cloud load-balancer support - this feature currently supported only on GCE/GKE
916- e2eskipper.SkipUnlessProviderIs("gce", "gke")
917-
918- cs = f.ClientSet
919- loadBalancerCreateTimeout = e2eservice.GetServiceLoadBalancerCreationTimeout(cs)
920- })
921-
922- ginkgo.AfterEach(func() {
923- if ginkgo.CurrentGinkgoTestDescription().Failed {
924- DescribeSvc(f.Namespace.Name)
925- }
926- for _, lb := range serviceLBNames {
927- framework.Logf("cleaning load balancer resource for %s", lb)
928- e2eservice.CleanupServiceResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone)
929- }
930- //reset serviceLBNames
931- serviceLBNames = []string{}
932- })
933-
934- ginkgo.It("should work for type=LoadBalancer", func() {
935- namespace := f.Namespace.Name
936- serviceName := "external-local-lb"
937- jig := e2eservice.NewTestJig(cs, namespace, serviceName)
938-
939- svc, err := jig.CreateOnlyLocalLoadBalancerService(loadBalancerCreateTimeout, true, nil)
940- framework.ExpectNoError(err)
941- serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc))
942- healthCheckNodePort := int(svc.Spec.HealthCheckNodePort)
943- if healthCheckNodePort == 0 {
944- framework.Failf("Service HealthCheck NodePort was not allocated")
945- }
946- defer func() {
947- err = jig.ChangeServiceType(v1.ServiceTypeClusterIP, loadBalancerCreateTimeout)
948- framework.ExpectNoError(err)
949-
950- // Make sure we didn't leak the health check node port.
Serge Bazanski19eb0002021-01-21 14:25:25 +0100951- const threshold = 2
952- nodes, err := getEndpointNodesWithInternalIP(jig)
Lorenz Bruned0503c2020-07-28 17:21:25 +0200953- framework.ExpectNoError(err)
Serge Bazanski19eb0002021-01-21 14:25:25 +0100954- config := e2enetwork.NewNetworkingTestConfig(f, false, false)
955- for _, internalIP := range nodes {
956- err := testHTTPHealthCheckNodePortFromTestContainer(
957- config,
958- internalIP,
959- healthCheckNodePort,
960- e2eservice.KubeProxyLagTimeout,
961- false,
962- threshold)
Lorenz Bruned0503c2020-07-28 17:21:25 +0200963- framework.ExpectNoError(err)
964- }
965- err = cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{})
966- framework.ExpectNoError(err)
967- }()
968-
969- svcTCPPort := int(svc.Spec.Ports[0].Port)
970- ingressIP := e2eservice.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0])
971-
972- ginkgo.By("reading clientIP using the TCP service's service port via its external VIP")
973- content := GetHTTPContent(ingressIP, svcTCPPort, e2eservice.KubeProxyLagTimeout, "/clientip")
974- clientIP := content.String()
975- framework.Logf("ClientIP detected by target pod using VIP:SvcPort is %s", clientIP)
976-
977- ginkgo.By("checking if Source IP is preserved")
978- if strings.HasPrefix(clientIP, "10.") {
979- framework.Failf("Source IP was NOT preserved")
980- }
981- })
982-
983- ginkgo.It("should work for type=NodePort", func() {
984- namespace := f.Namespace.Name
985- serviceName := "external-local-nodeport"
986- jig := e2eservice.NewTestJig(cs, namespace, serviceName)
987-
988- svc, err := jig.CreateOnlyLocalNodePortService(true)
989- framework.ExpectNoError(err)
990- defer func() {
991- err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{})
992- framework.ExpectNoError(err)
993- }()
994-
995- tcpNodePort := int(svc.Spec.Ports[0].NodePort)
Lorenz Bruned0503c2020-07-28 17:21:25 +0200996-
Serge Bazanski19eb0002021-01-21 14:25:25 +0100997- endpointsNodeMap, err := getEndpointNodesWithInternalIP(jig)
998- framework.ExpectNoError(err)
999-
1000- dialCmd := "clientip"
1001- config := e2enetwork.NewNetworkingTestConfig(f, false, false)
1002-
1003- for nodeName, nodeIP := range endpointsNodeMap {
1004- ginkgo.By(fmt.Sprintf("reading clientIP using the TCP service's NodePort, on node %v: %v:%v/%v", nodeName, nodeIP, tcpNodePort, dialCmd))
1005- clientIP, err := GetHTTPContentFromTestContainer(config, nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout, dialCmd)
1006- framework.ExpectNoError(err)
1007- framework.Logf("ClientIP detected by target pod using NodePort is %s, the ip of test container is %s", clientIP, config.TestContainerPod.Status.PodIP)
1008- // the clientIP returned by agnhost contains port
1009- if !strings.HasPrefix(clientIP, config.TestContainerPod.Status.PodIP) {
Lorenz Bruned0503c2020-07-28 17:21:25 +02001010- framework.Failf("Source IP was NOT preserved")
1011- }
1012- }
1013- })
1014-
1015- ginkgo.It("should only target nodes with endpoints", func() {
1016- namespace := f.Namespace.Name
1017- serviceName := "external-local-nodes"
1018- jig := e2eservice.NewTestJig(cs, namespace, serviceName)
1019- nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, e2eservice.MaxNodesForEndpointsTests)
1020- framework.ExpectNoError(err)
1021-
1022- svc, err := jig.CreateOnlyLocalLoadBalancerService(loadBalancerCreateTimeout, false,
1023- func(svc *v1.Service) {
1024- // Change service port to avoid collision with opened hostPorts
1025- // in other tests that run in parallel.
1026- if len(svc.Spec.Ports) != 0 {
1027- svc.Spec.Ports[0].TargetPort = intstr.FromInt(int(svc.Spec.Ports[0].Port))
1028- svc.Spec.Ports[0].Port = 8081
1029- }
1030-
1031- })
1032- framework.ExpectNoError(err)
1033- serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc))
1034- defer func() {
1035- err = jig.ChangeServiceType(v1.ServiceTypeClusterIP, loadBalancerCreateTimeout)
1036- framework.ExpectNoError(err)
1037- err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{})
1038- framework.ExpectNoError(err)
1039- }()
1040-
1041- healthCheckNodePort := int(svc.Spec.HealthCheckNodePort)
1042- if healthCheckNodePort == 0 {
1043- framework.Failf("Service HealthCheck NodePort was not allocated")
1044- }
1045-
Serge Bazanski19eb0002021-01-21 14:25:25 +01001046- ips := e2enode.CollectAddresses(nodes, v1.NodeInternalIP)
Lorenz Bruned0503c2020-07-28 17:21:25 +02001047-
1048- ingressIP := e2eservice.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0])
1049- svcTCPPort := int(svc.Spec.Ports[0].Port)
1050-
Serge Bazanski19eb0002021-01-21 14:25:25 +01001051- const threshold = 2
1052- config := e2enetwork.NewNetworkingTestConfig(f, false, false)
Lorenz Bruned0503c2020-07-28 17:21:25 +02001053- for i := 0; i < len(nodes.Items); i++ {
1054- endpointNodeName := nodes.Items[i].Name
1055-
1056- ginkgo.By("creating a pod to be part of the service " + serviceName + " on node " + endpointNodeName)
1057- _, err = jig.Run(func(rc *v1.ReplicationController) {
1058- rc.Name = serviceName
1059- if endpointNodeName != "" {
1060- rc.Spec.Template.Spec.NodeName = endpointNodeName
1061- }
1062- })
1063- framework.ExpectNoError(err)
1064-
1065- ginkgo.By(fmt.Sprintf("waiting for service endpoint on node %v", endpointNodeName))
1066- err = jig.WaitForEndpointOnNode(endpointNodeName)
1067- framework.ExpectNoError(err)
1068-
1069- // HealthCheck should pass only on the node where num(endpoints) > 0
1070- // All other nodes should fail the healthcheck on the service healthCheckNodePort
Serge Bazanski19eb0002021-01-21 14:25:25 +01001071- for n, internalIP := range ips {
Lorenz Bruned0503c2020-07-28 17:21:25 +02001072- // Make sure the loadbalancer picked up the health check change.
1073- // Confirm traffic can reach backend through LB before checking healthcheck nodeport.
1074- e2eservice.TestReachableHTTP(ingressIP, svcTCPPort, e2eservice.KubeProxyLagTimeout)
1075- expectedSuccess := nodes.Items[n].Name == endpointNodeName
1076- port := strconv.Itoa(healthCheckNodePort)
Serge Bazanski19eb0002021-01-21 14:25:25 +01001077- ipPort := net.JoinHostPort(internalIP, port)
1078- framework.Logf("Health checking %s, http://%s/healthz, expectedSuccess %v", nodes.Items[n].Name, ipPort, expectedSuccess)
1079- err := testHTTPHealthCheckNodePortFromTestContainer(
1080- config,
1081- internalIP,
1082- healthCheckNodePort,
1083- e2eservice.KubeProxyEndpointLagTimeout,
1084- expectedSuccess,
1085- threshold)
Lorenz Bruned0503c2020-07-28 17:21:25 +02001086- framework.ExpectNoError(err)
1087- }
1088- framework.ExpectNoError(e2erc.DeleteRCAndWaitForGC(f.ClientSet, namespace, serviceName))
1089- }
1090- })
1091-
1092- ginkgo.It("should work from pods", func() {
1093- var err error
1094- namespace := f.Namespace.Name
1095- serviceName := "external-local-pods"
1096- jig := e2eservice.NewTestJig(cs, namespace, serviceName)
1097-
1098- svc, err := jig.CreateOnlyLocalLoadBalancerService(loadBalancerCreateTimeout, true, nil)
1099- framework.ExpectNoError(err)
1100- serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc))
1101- defer func() {
1102- err = jig.ChangeServiceType(v1.ServiceTypeClusterIP, loadBalancerCreateTimeout)
1103- framework.ExpectNoError(err)
1104- err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{})
1105- framework.ExpectNoError(err)
1106- }()
1107-
1108- ingressIP := e2eservice.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0])
1109- port := strconv.Itoa(int(svc.Spec.Ports[0].Port))
1110- ipPort := net.JoinHostPort(ingressIP, port)
1111- path := fmt.Sprintf("%s/clientip", ipPort)
1112-
1113- ginkgo.By("Creating pause pod deployment to make sure, pausePods are in desired state")
1114- deployment := createPausePodDeployment(cs, "pause-pod-deployment", namespace, 1)
1115- framework.ExpectNoError(e2edeployment.WaitForDeploymentComplete(cs, deployment), "Failed to complete pause pod deployment")
1116-
1117- defer func() {
1118- framework.Logf("Deleting deployment")
1119- err = cs.AppsV1().Deployments(namespace).Delete(context.TODO(), deployment.Name, metav1.DeleteOptions{})
1120- framework.ExpectNoError(err, "Failed to delete deployment %s", deployment.Name)
1121- }()
1122-
1123- deployment, err = cs.AppsV1().Deployments(namespace).Get(context.TODO(), deployment.Name, metav1.GetOptions{})
1124- framework.ExpectNoError(err, "Error in retrieving pause pod deployment")
1125- labelSelector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
1126- framework.ExpectNoError(err, "Error in setting LabelSelector as selector from deployment")
1127-
1128- pausePods, err := cs.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector.String()})
1129- framework.ExpectNoError(err, "Error in listing pods associated with pause pod deployments")
1130-
1131- pausePod := pausePods.Items[0]
1132- framework.Logf("Waiting up to %v curl %v", e2eservice.KubeProxyLagTimeout, path)
1133- cmd := fmt.Sprintf(`curl -q -s --connect-timeout 30 %v`, path)
1134-
1135- var srcIP string
1136- loadBalancerPropagationTimeout := e2eservice.GetServiceLoadBalancerPropagationTimeout(cs)
1137- ginkgo.By(fmt.Sprintf("Hitting external lb %v from pod %v on node %v", ingressIP, pausePod.Name, pausePod.Spec.NodeName))
1138- if pollErr := wait.PollImmediate(framework.Poll, loadBalancerPropagationTimeout, func() (bool, error) {
1139- stdout, err := framework.RunHostCmd(pausePod.Namespace, pausePod.Name, cmd)
1140- if err != nil {
1141- framework.Logf("got err: %v, retry until timeout", err)
1142- return false, nil
1143- }
1144- srcIP = strings.TrimSpace(strings.Split(stdout, ":")[0])
1145- return srcIP == pausePod.Status.PodIP, nil
1146- }); pollErr != nil {
1147- framework.Failf("Source IP not preserved from %v, expected '%v' got '%v'", pausePod.Name, pausePod.Status.PodIP, srcIP)
1148- }
1149- })
1150-
Serge Bazanski19eb0002021-01-21 14:25:25 +01001151- ginkgo.It("should handle updates to ExternalTrafficPolicy field", func() {
Lorenz Bruned0503c2020-07-28 17:21:25 +02001152- namespace := f.Namespace.Name
1153- serviceName := "external-local-update"
1154- jig := e2eservice.NewTestJig(cs, namespace, serviceName)
1155-
1156- nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, e2eservice.MaxNodesForEndpointsTests)
1157- framework.ExpectNoError(err)
1158- if len(nodes.Items) < 2 {
1159- framework.Failf("Need at least 2 nodes to verify source ip from a node without endpoint")
1160- }
1161-
1162- svc, err := jig.CreateOnlyLocalLoadBalancerService(loadBalancerCreateTimeout, true, nil)
1163- framework.ExpectNoError(err)
1164- serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc))
1165- defer func() {
1166- err = jig.ChangeServiceType(v1.ServiceTypeClusterIP, loadBalancerCreateTimeout)
1167- framework.ExpectNoError(err)
1168- err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{})
1169- framework.ExpectNoError(err)
1170- }()
1171-
1172- // save the health check node port because it disappears when ESIPP is turned off.
1173- healthCheckNodePort := int(svc.Spec.HealthCheckNodePort)
1174-
1175- ginkgo.By("turning ESIPP off")
1176- svc, err = jig.UpdateService(func(svc *v1.Service) {
1177- svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeCluster
1178- })
1179- framework.ExpectNoError(err)
1180- if svc.Spec.HealthCheckNodePort > 0 {
1181- framework.Failf("Service HealthCheck NodePort still present")
1182- }
1183-
Serge Bazanski19eb0002021-01-21 14:25:25 +01001184- epNodes, err := jig.ListNodesWithEndpoint()
Lorenz Bruned0503c2020-07-28 17:21:25 +02001185- framework.ExpectNoError(err)
Serge Bazanski19eb0002021-01-21 14:25:25 +01001186- // map from name of nodes with endpoint to internal ip
1187- // it is assumed that there is only a single node with the endpoint
1188- endpointNodeMap := make(map[string]string)
1189- // map from name of nodes without endpoint to internal ip
1190- noEndpointNodeMap := make(map[string]string)
1191- for _, node := range epNodes {
1192- ips := e2enode.GetAddresses(&node, v1.NodeInternalIP)
1193- if len(ips) < 1 {
1194- framework.Failf("No internal ip found for node %s", node.Name)
Lorenz Bruned0503c2020-07-28 17:21:25 +02001195- }
Serge Bazanski19eb0002021-01-21 14:25:25 +01001196- endpointNodeMap[node.Name] = ips[0]
Lorenz Bruned0503c2020-07-28 17:21:25 +02001197- }
Serge Bazanski19eb0002021-01-21 14:25:25 +01001198- for _, n := range nodes.Items {
1199- ips := e2enode.GetAddresses(&n, v1.NodeInternalIP)
1200- if len(ips) < 1 {
1201- framework.Failf("No internal ip found for node %s", n.Name)
1202- }
1203- if _, ok := endpointNodeMap[n.Name]; !ok {
1204- noEndpointNodeMap[n.Name] = ips[0]
1205- }
1206- }
1207- framework.ExpectNotEqual(len(endpointNodeMap), 0)
1208- framework.ExpectNotEqual(len(noEndpointNodeMap), 0)
Lorenz Bruned0503c2020-07-28 17:21:25 +02001209-
1210- svcTCPPort := int(svc.Spec.Ports[0].Port)
1211- svcNodePort := int(svc.Spec.Ports[0].NodePort)
1212- ingressIP := e2eservice.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0])
1213- path := "/clientip"
Serge Bazanski19eb0002021-01-21 14:25:25 +01001214- dialCmd := "clientip"
1215-
1216- config := e2enetwork.NewNetworkingTestConfig(f, false, false)
Lorenz Bruned0503c2020-07-28 17:21:25 +02001217-
1218- ginkgo.By(fmt.Sprintf("endpoints present on nodes %v, absent on nodes %v", endpointNodeMap, noEndpointNodeMap))
Serge Bazanski19eb0002021-01-21 14:25:25 +01001219- for nodeName, nodeIP := range noEndpointNodeMap {
1220- ginkgo.By(fmt.Sprintf("Checking %v (%v:%v/%v) proxies to endpoints on another node", nodeName, nodeIP[0], svcNodePort, dialCmd))
1221- _, err := GetHTTPContentFromTestContainer(config, nodeIP, svcNodePort, e2eservice.KubeProxyLagTimeout, dialCmd)
1222- framework.ExpectNoError(err, "Could not reach HTTP service through %v:%v/%v after %v", nodeIP, svcNodePort, dialCmd, e2eservice.KubeProxyLagTimeout)
Lorenz Bruned0503c2020-07-28 17:21:25 +02001223- }
1224-
Serge Bazanski19eb0002021-01-21 14:25:25 +01001225- for nodeName, nodeIP := range endpointNodeMap {
1226- ginkgo.By(fmt.Sprintf("checking kube-proxy health check fails on node with endpoint (%s), public IP %s", nodeName, nodeIP))
1227- var body string
1228- pollFn := func() (bool, error) {
1229- // we expect connection failure here, but not other errors
1230- resp, err := config.GetResponseFromTestContainer(
1231- "http",
1232- "healthz",
1233- nodeIP,
1234- healthCheckNodePort)
1235- if err != nil {
1236- return false, nil
1237- }
1238- if len(resp.Errors) > 0 {
Lorenz Bruned0503c2020-07-28 17:21:25 +02001239- return true, nil
1240- }
Serge Bazanski19eb0002021-01-21 14:25:25 +01001241- if len(resp.Responses) > 0 {
1242- body = resp.Responses[0]
1243- }
Lorenz Bruned0503c2020-07-28 17:21:25 +02001244- return false, nil
1245- }
Serge Bazanski19eb0002021-01-21 14:25:25 +01001246- if pollErr := wait.PollImmediate(framework.Poll, e2eservice.TestTimeout, pollFn); pollErr != nil {
Lorenz Bruned0503c2020-07-28 17:21:25 +02001247- framework.Failf("Kube-proxy still exposing health check on node %v:%v, after ESIPP was turned off. body %s",
Serge Bazanski19eb0002021-01-21 14:25:25 +01001248- nodeName, healthCheckNodePort, body)
Lorenz Bruned0503c2020-07-28 17:21:25 +02001249- }
1250- }
1251-
1252- // Poll till kube-proxy re-adds the MASQUERADE rule on the node.
1253- ginkgo.By(fmt.Sprintf("checking source ip is NOT preserved through loadbalancer %v", ingressIP))
1254- var clientIP string
1255- pollErr := wait.PollImmediate(framework.Poll, e2eservice.KubeProxyLagTimeout, func() (bool, error) {
1256- content := GetHTTPContent(ingressIP, svcTCPPort, e2eservice.KubeProxyLagTimeout, "/clientip")
1257- clientIP = content.String()
1258- if strings.HasPrefix(clientIP, "10.") {
1259- return true, nil
1260- }
1261- return false, nil
1262- })
1263- if pollErr != nil {
1264- framework.Failf("Source IP WAS preserved even after ESIPP turned off. Got %v, expected a ten-dot cluster ip.", clientIP)
1265- }
1266-
1267- // TODO: We need to attempt to create another service with the previously
1268- // allocated healthcheck nodePort. If the health check nodePort has been
1269- // freed, the new service creation will succeed, upon which we cleanup.
1270- // If the health check nodePort has NOT been freed, the new service
1271- // creation will fail.
1272-
1273- ginkgo.By("setting ExternalTraffic field back to OnlyLocal")
1274- svc, err = jig.UpdateService(func(svc *v1.Service) {
1275- svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
1276- // Request the same healthCheckNodePort as before, to test the user-requested allocation path
1277- svc.Spec.HealthCheckNodePort = int32(healthCheckNodePort)
1278- })
1279- framework.ExpectNoError(err)
1280- pollErr = wait.PollImmediate(framework.Poll, e2eservice.KubeProxyLagTimeout, func() (bool, error) {
1281- content := GetHTTPContent(ingressIP, svcTCPPort, e2eservice.KubeProxyLagTimeout, path)
1282- clientIP = content.String()
1283- ginkgo.By(fmt.Sprintf("Endpoint %v:%v%v returned client ip %v", ingressIP, svcTCPPort, path, clientIP))
1284- if !strings.HasPrefix(clientIP, "10.") {
1285- return true, nil
1286- }
1287- return false, nil
1288- })
1289- if pollErr != nil {
1290- framework.Failf("Source IP (%v) is not the client IP even after ESIPP turned on, expected a public IP.", clientIP)
1291- }
1292- })
1293-})
1294-
Serge Bazanski19eb0002021-01-21 14:25:25 +01001295 // execAffinityTestForSessionAffinityTimeout is a helper function that wrap the logic of
1296 // affinity test for non-load-balancer services. Session afinity will be
1297 // enabled when the service is created and a short timeout will be configured so
Lorenz Bruned0503c2020-07-28 17:21:25 +02001298diff --git a/test/e2e/network/service_providers.go b/test/e2e/network/service_providers.go
1299new file mode 100644
1300index 00000000000..b7eae6feb2c
1301--- /dev/null
1302+++ b/test/e2e/network/service_providers.go
1303@@ -0,0 +1,980 @@
1304+// +build !providerless
1305+
1306+/*
1307+Copyright 2020 The Kubernetes Authors.
1308+
1309+Licensed under the Apache License, Version 2.0 (the "License");
1310+you may not use this file except in compliance with the License.
1311+You may obtain a copy of the License at
1312+
1313+ http://www.apache.org/licenses/LICENSE-2.0
1314+
1315+Unless required by applicable law or agreed to in writing, software
1316+distributed under the License is distributed on an "AS IS" BASIS,
1317+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1318+See the License for the specific language governing permissions and
1319+limitations under the License.
1320+*/
1321+
1322+package network
1323+
1324+import (
1325+ "bytes"
1326+ "context"
1327+ "fmt"
1328+ "net"
1329+ "strconv"
1330+ "strings"
1331+ "time"
1332+
1333+ compute "google.golang.org/api/compute/v1"
1334+ v1 "k8s.io/api/core/v1"
1335+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
1336+ "k8s.io/apimachinery/pkg/util/intstr"
1337+ "k8s.io/apimachinery/pkg/util/wait"
1338+ clientset "k8s.io/client-go/kubernetes"
1339+ cloudprovider "k8s.io/cloud-provider"
1340+ "k8s.io/kubernetes/test/e2e/framework"
1341+ e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
1342+ e2ekubesystem "k8s.io/kubernetes/test/e2e/framework/kubesystem"
1343+ e2enetwork "k8s.io/kubernetes/test/e2e/framework/network"
1344+ e2enode "k8s.io/kubernetes/test/e2e/framework/node"
1345+ "k8s.io/kubernetes/test/e2e/framework/providers/gce"
1346+ e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
1347+ e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
1348+ e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
1349+ gcecloud "k8s.io/legacy-cloud-providers/gce"
1350+
1351+ "github.com/onsi/ginkgo"
1352+ "github.com/onsi/gomega"
1353+)
1354+
1355+var _ = SIGDescribe("Services with Cloud LoadBalancers", func() {
1356+
1357+ f := framework.NewDefaultFramework("services")
1358+
1359+ var cs clientset.Interface
1360+ serviceLBNames := []string{}
1361+
1362+ ginkgo.BeforeEach(func() {
1363+ cs = f.ClientSet
1364+ })
1365+
1366+ ginkgo.AfterEach(func() {
1367+ if ginkgo.CurrentGinkgoTestDescription().Failed {
1368+ DescribeSvc(f.Namespace.Name)
1369+ }
1370+ for _, lb := range serviceLBNames {
1371+ framework.Logf("cleaning load balancer resource for %s", lb)
1372+ e2eservice.CleanupServiceResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone)
1373+ }
1374+ //reset serviceLBNames
1375+ serviceLBNames = []string{}
1376+ })
1377+
1378+ // TODO: Get rid of [DisabledForLargeClusters] tag when issue #56138 is fixed
1379+ ginkgo.It("should be able to change the type and ports of a service [Slow] [DisabledForLargeClusters]", func() {
1380+ // requires cloud load-balancer support
1381+ e2eskipper.SkipUnlessProviderIs("gce", "gke", "aws")
1382+
1383+ loadBalancerSupportsUDP := !framework.ProviderIs("aws")
1384+
1385+ loadBalancerLagTimeout := e2eservice.LoadBalancerLagTimeoutDefault
1386+ if framework.ProviderIs("aws") {
1387+ loadBalancerLagTimeout = e2eservice.LoadBalancerLagTimeoutAWS
1388+ }
1389+ loadBalancerCreateTimeout := e2eservice.GetServiceLoadBalancerCreationTimeout(cs)
1390+
1391+ // This test is more monolithic than we'd like because LB turnup can be
1392+ // very slow, so we lumped all the tests into one LB lifecycle.
1393+
1394+ serviceName := "mutability-test"
1395+ ns1 := f.Namespace.Name // LB1 in ns1 on TCP
1396+ framework.Logf("namespace for TCP test: %s", ns1)
1397+
1398+ ginkgo.By("creating a second namespace")
1399+ namespacePtr, err := f.CreateNamespace("services", nil)
1400+ framework.ExpectNoError(err, "failed to create namespace")
1401+ ns2 := namespacePtr.Name // LB2 in ns2 on UDP
1402+ framework.Logf("namespace for UDP test: %s", ns2)
1403+
1404+ nodeIP, err := e2enode.PickIP(cs) // for later
1405+ framework.ExpectNoError(err)
1406+
1407+ // Test TCP and UDP Services. Services with the same name in different
1408+ // namespaces should get different node ports and load balancers.
1409+
1410+ ginkgo.By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns1)
1411+ tcpJig := e2eservice.NewTestJig(cs, ns1, serviceName)
1412+ tcpService, err := tcpJig.CreateTCPService(nil)
1413+ framework.ExpectNoError(err)
1414+
1415+ ginkgo.By("creating a UDP service " + serviceName + " with type=ClusterIP in namespace " + ns2)
1416+ udpJig := e2eservice.NewTestJig(cs, ns2, serviceName)
1417+ udpService, err := udpJig.CreateUDPService(nil)
1418+ framework.ExpectNoError(err)
1419+
1420+ ginkgo.By("verifying that TCP and UDP use the same port")
1421+ if tcpService.Spec.Ports[0].Port != udpService.Spec.Ports[0].Port {
1422+ framework.Failf("expected to use the same port for TCP and UDP")
1423+ }
1424+ svcPort := int(tcpService.Spec.Ports[0].Port)
1425+ framework.Logf("service port (TCP and UDP): %d", svcPort)
1426+
1427+ ginkgo.By("creating a pod to be part of the TCP service " + serviceName)
1428+ _, err = tcpJig.Run(nil)
1429+ framework.ExpectNoError(err)
1430+
1431+ ginkgo.By("creating a pod to be part of the UDP service " + serviceName)
1432+ _, err = udpJig.Run(nil)
1433+ framework.ExpectNoError(err)
1434+
1435+ // Change the services to NodePort.
1436+
1437+ ginkgo.By("changing the TCP service to type=NodePort")
1438+ tcpService, err = tcpJig.UpdateService(func(s *v1.Service) {
1439+ s.Spec.Type = v1.ServiceTypeNodePort
1440+ })
1441+ framework.ExpectNoError(err)
1442+ tcpNodePort := int(tcpService.Spec.Ports[0].NodePort)
1443+ framework.Logf("TCP node port: %d", tcpNodePort)
1444+
1445+ ginkgo.By("changing the UDP service to type=NodePort")
1446+ udpService, err = udpJig.UpdateService(func(s *v1.Service) {
1447+ s.Spec.Type = v1.ServiceTypeNodePort
1448+ })
1449+ framework.ExpectNoError(err)
1450+ udpNodePort := int(udpService.Spec.Ports[0].NodePort)
1451+ framework.Logf("UDP node port: %d", udpNodePort)
1452+
1453+ ginkgo.By("hitting the TCP service's NodePort")
1454+ e2eservice.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
1455+
1456+ ginkgo.By("hitting the UDP service's NodePort")
1457+ testReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout)
1458+
1459+ // Change the services to LoadBalancer.
1460+
1461+ // Here we test that LoadBalancers can receive static IP addresses. This isn't
1462+ // necessary, but is an additional feature this monolithic test checks.
1463+ requestedIP := ""
1464+ staticIPName := ""
1465+ if framework.ProviderIs("gce", "gke") {
1466+ ginkgo.By("creating a static load balancer IP")
1467+ staticIPName = fmt.Sprintf("e2e-external-lb-test-%s", framework.RunID)
1468+ gceCloud, err := gce.GetGCECloud()
1469+ framework.ExpectNoError(err, "failed to get GCE cloud provider")
1470+
1471+ err = gceCloud.ReserveRegionAddress(&compute.Address{Name: staticIPName}, gceCloud.Region())
1472+ defer func() {
1473+ if staticIPName != "" {
1474+ // Release GCE static IP - this is not kube-managed and will not be automatically released.
1475+ if err := gceCloud.DeleteRegionAddress(staticIPName, gceCloud.Region()); err != nil {
1476+ framework.Logf("failed to release static IP %s: %v", staticIPName, err)
1477+ }
1478+ }
1479+ }()
1480+ framework.ExpectNoError(err, "failed to create region address: %s", staticIPName)
1481+ reservedAddr, err := gceCloud.GetRegionAddress(staticIPName, gceCloud.Region())
1482+ framework.ExpectNoError(err, "failed to get region address: %s", staticIPName)
1483+
1484+ requestedIP = reservedAddr.Address
1485+ framework.Logf("Allocated static load balancer IP: %s", requestedIP)
1486+ }
1487+
1488+ ginkgo.By("changing the TCP service to type=LoadBalancer")
1489+ tcpService, err = tcpJig.UpdateService(func(s *v1.Service) {
1490+ s.Spec.LoadBalancerIP = requestedIP // will be "" if not applicable
1491+ s.Spec.Type = v1.ServiceTypeLoadBalancer
1492+ })
1493+ framework.ExpectNoError(err)
1494+
1495+ if loadBalancerSupportsUDP {
1496+ ginkgo.By("changing the UDP service to type=LoadBalancer")
1497+ udpService, err = udpJig.UpdateService(func(s *v1.Service) {
1498+ s.Spec.Type = v1.ServiceTypeLoadBalancer
1499+ })
1500+ framework.ExpectNoError(err)
1501+ }
1502+ serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(tcpService))
1503+ if loadBalancerSupportsUDP {
1504+ serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(udpService))
1505+ }
1506+
1507+ ginkgo.By("waiting for the TCP service to have a load balancer")
1508+ // Wait for the load balancer to be created asynchronously
1509+ tcpService, err = tcpJig.WaitForLoadBalancer(loadBalancerCreateTimeout)
1510+ framework.ExpectNoError(err)
1511+ if int(tcpService.Spec.Ports[0].NodePort) != tcpNodePort {
1512+ framework.Failf("TCP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", tcpNodePort, tcpService.Spec.Ports[0].NodePort)
1513+ }
1514+ if requestedIP != "" && e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != requestedIP {
1515+ framework.Failf("unexpected TCP Status.LoadBalancer.Ingress (expected %s, got %s)", requestedIP, e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]))
1516+ }
1517+ tcpIngressIP := e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])
1518+ framework.Logf("TCP load balancer: %s", tcpIngressIP)
1519+
1520+ if framework.ProviderIs("gce", "gke") {
1521+ // Do this as early as possible, which overrides the `defer` above.
1522+ // This is mostly out of fear of leaking the IP in a timeout case
1523+ // (as of this writing we're not 100% sure where the leaks are
1524+ // coming from, so this is first-aid rather than surgery).
1525+ ginkgo.By("demoting the static IP to ephemeral")
1526+ if staticIPName != "" {
1527+ gceCloud, err := gce.GetGCECloud()
1528+ framework.ExpectNoError(err, "failed to get GCE cloud provider")
1529+ // Deleting it after it is attached "demotes" it to an
1530+ // ephemeral IP, which can be auto-released.
1531+ if err := gceCloud.DeleteRegionAddress(staticIPName, gceCloud.Region()); err != nil {
1532+ framework.Failf("failed to release static IP %s: %v", staticIPName, err)
1533+ }
1534+ staticIPName = ""
1535+ }
1536+ }
1537+
1538+ var udpIngressIP string
1539+ if loadBalancerSupportsUDP {
1540+ ginkgo.By("waiting for the UDP service to have a load balancer")
1541+ // 2nd one should be faster since they ran in parallel.
1542+ udpService, err = udpJig.WaitForLoadBalancer(loadBalancerCreateTimeout)
1543+ framework.ExpectNoError(err)
1544+ if int(udpService.Spec.Ports[0].NodePort) != udpNodePort {
1545+ framework.Failf("UDP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", udpNodePort, udpService.Spec.Ports[0].NodePort)
1546+ }
1547+ udpIngressIP = e2eservice.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0])
1548+ framework.Logf("UDP load balancer: %s", udpIngressIP)
1549+
1550+ ginkgo.By("verifying that TCP and UDP use different load balancers")
1551+ if tcpIngressIP == udpIngressIP {
1552+ framework.Failf("Load balancers are not different: %s", e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]))
1553+ }
1554+ }
1555+
1556+ ginkgo.By("hitting the TCP service's NodePort")
1557+ e2eservice.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
1558+
1559+ ginkgo.By("hitting the UDP service's NodePort")
1560+ testReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout)
1561+
1562+ ginkgo.By("hitting the TCP service's LoadBalancer")
1563+ e2eservice.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout)
1564+
1565+ if loadBalancerSupportsUDP {
1566+ ginkgo.By("hitting the UDP service's LoadBalancer")
1567+ testReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout)
1568+ }
1569+
1570+ // Change the services' node ports.
1571+
1572+ ginkgo.By("changing the TCP service's NodePort")
1573+ tcpService, err = tcpJig.ChangeServiceNodePort(tcpNodePort)
1574+ framework.ExpectNoError(err)
1575+ tcpNodePortOld := tcpNodePort
1576+ tcpNodePort = int(tcpService.Spec.Ports[0].NodePort)
1577+ if tcpNodePort == tcpNodePortOld {
1578+ framework.Failf("TCP Spec.Ports[0].NodePort (%d) did not change", tcpNodePort)
1579+ }
1580+ if e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != tcpIngressIP {
1581+ framework.Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]))
1582+ }
1583+ framework.Logf("TCP node port: %d", tcpNodePort)
1584+
1585+ ginkgo.By("changing the UDP service's NodePort")
1586+ udpService, err = udpJig.ChangeServiceNodePort(udpNodePort)
1587+ framework.ExpectNoError(err)
1588+ udpNodePortOld := udpNodePort
1589+ udpNodePort = int(udpService.Spec.Ports[0].NodePort)
1590+ if udpNodePort == udpNodePortOld {
1591+ framework.Failf("UDP Spec.Ports[0].NodePort (%d) did not change", udpNodePort)
1592+ }
1593+ if loadBalancerSupportsUDP && e2eservice.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) != udpIngressIP {
1594+ framework.Failf("UDP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", udpIngressIP, e2eservice.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]))
1595+ }
1596+ framework.Logf("UDP node port: %d", udpNodePort)
1597+
1598+ ginkgo.By("hitting the TCP service's new NodePort")
1599+ e2eservice.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
1600+
1601+ ginkgo.By("hitting the UDP service's new NodePort")
1602+ testReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout)
1603+
1604+ ginkgo.By("checking the old TCP NodePort is closed")
1605+ testNotReachableHTTP(nodeIP, tcpNodePortOld, e2eservice.KubeProxyLagTimeout)
1606+
1607+ ginkgo.By("checking the old UDP NodePort is closed")
1608+ testNotReachableUDP(nodeIP, udpNodePortOld, e2eservice.KubeProxyLagTimeout)
1609+
1610+ ginkgo.By("hitting the TCP service's LoadBalancer")
1611+ e2eservice.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout)
1612+
1613+ if loadBalancerSupportsUDP {
1614+ ginkgo.By("hitting the UDP service's LoadBalancer")
1615+ testReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout)
1616+ }
1617+
1618+ // Change the services' main ports.
1619+
1620+ ginkgo.By("changing the TCP service's port")
1621+ tcpService, err = tcpJig.UpdateService(func(s *v1.Service) {
1622+ s.Spec.Ports[0].Port++
1623+ })
1624+ framework.ExpectNoError(err)
1625+ svcPortOld := svcPort
1626+ svcPort = int(tcpService.Spec.Ports[0].Port)
1627+ if svcPort == svcPortOld {
1628+ framework.Failf("TCP Spec.Ports[0].Port (%d) did not change", svcPort)
1629+ }
1630+ if int(tcpService.Spec.Ports[0].NodePort) != tcpNodePort {
1631+ framework.Failf("TCP Spec.Ports[0].NodePort (%d) changed", tcpService.Spec.Ports[0].NodePort)
1632+ }
1633+ if e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != tcpIngressIP {
1634+ framework.Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]))
1635+ }
1636+
1637+ ginkgo.By("changing the UDP service's port")
1638+ udpService, err = udpJig.UpdateService(func(s *v1.Service) {
1639+ s.Spec.Ports[0].Port++
1640+ })
1641+ framework.ExpectNoError(err)
1642+ if int(udpService.Spec.Ports[0].Port) != svcPort {
1643+ framework.Failf("UDP Spec.Ports[0].Port (%d) did not change", udpService.Spec.Ports[0].Port)
1644+ }
1645+ if int(udpService.Spec.Ports[0].NodePort) != udpNodePort {
1646+ framework.Failf("UDP Spec.Ports[0].NodePort (%d) changed", udpService.Spec.Ports[0].NodePort)
1647+ }
1648+ if loadBalancerSupportsUDP && e2eservice.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) != udpIngressIP {
1649+ framework.Failf("UDP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", udpIngressIP, e2eservice.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]))
1650+ }
1651+
1652+ framework.Logf("service port (TCP and UDP): %d", svcPort)
1653+
1654+ ginkgo.By("hitting the TCP service's NodePort")
1655+ e2eservice.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
1656+
1657+ ginkgo.By("hitting the UDP service's NodePort")
1658+ testReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout)
1659+
1660+ ginkgo.By("hitting the TCP service's LoadBalancer")
1661+ e2eservice.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout)
1662+
1663+ if loadBalancerSupportsUDP {
1664+ ginkgo.By("hitting the UDP service's LoadBalancer")
1665+ testReachableUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout)
1666+ }
1667+
1668+ ginkgo.By("Scaling the pods to 0")
1669+ err = tcpJig.Scale(0)
1670+ framework.ExpectNoError(err)
1671+ err = udpJig.Scale(0)
1672+ framework.ExpectNoError(err)
1673+
1674+ ginkgo.By("looking for ICMP REJECT on the TCP service's NodePort")
1675+ testRejectedHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
1676+
1677+ ginkgo.By("looking for ICMP REJECT on the UDP service's NodePort")
1678+ testRejectedUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout)
1679+
1680+ ginkgo.By("looking for ICMP REJECT on the TCP service's LoadBalancer")
1681+ testRejectedHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout)
1682+
1683+ if loadBalancerSupportsUDP {
1684+ ginkgo.By("looking for ICMP REJECT on the UDP service's LoadBalancer")
1685+ testRejectedUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout)
1686+ }
1687+
1688+ ginkgo.By("Scaling the pods to 1")
1689+ err = tcpJig.Scale(1)
1690+ framework.ExpectNoError(err)
1691+ err = udpJig.Scale(1)
1692+ framework.ExpectNoError(err)
1693+
1694+ ginkgo.By("hitting the TCP service's NodePort")
1695+ e2eservice.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
1696+
1697+ ginkgo.By("hitting the UDP service's NodePort")
1698+ testReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout)
1699+
1700+ ginkgo.By("hitting the TCP service's LoadBalancer")
1701+ e2eservice.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout)
1702+
1703+ if loadBalancerSupportsUDP {
1704+ ginkgo.By("hitting the UDP service's LoadBalancer")
1705+ testReachableUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout)
1706+ }
1707+
1708+ // Change the services back to ClusterIP.
1709+
1710+ ginkgo.By("changing TCP service back to type=ClusterIP")
1711+ _, err = tcpJig.UpdateService(func(s *v1.Service) {
1712+ s.Spec.Type = v1.ServiceTypeClusterIP
1713+ s.Spec.Ports[0].NodePort = 0
1714+ })
1715+ framework.ExpectNoError(err)
1716+ // Wait for the load balancer to be destroyed asynchronously
1717+ _, err = tcpJig.WaitForLoadBalancerDestroy(tcpIngressIP, svcPort, loadBalancerCreateTimeout)
1718+ framework.ExpectNoError(err)
1719+
1720+ ginkgo.By("changing UDP service back to type=ClusterIP")
1721+ _, err = udpJig.UpdateService(func(s *v1.Service) {
1722+ s.Spec.Type = v1.ServiceTypeClusterIP
1723+ s.Spec.Ports[0].NodePort = 0
1724+ })
1725+ framework.ExpectNoError(err)
1726+ if loadBalancerSupportsUDP {
1727+ // Wait for the load balancer to be destroyed asynchronously
1728+ _, err = udpJig.WaitForLoadBalancerDestroy(udpIngressIP, svcPort, loadBalancerCreateTimeout)
1729+ framework.ExpectNoError(err)
1730+ }
1731+
1732+ ginkgo.By("checking the TCP NodePort is closed")
1733+ testNotReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
1734+
1735+ ginkgo.By("checking the UDP NodePort is closed")
1736+ testNotReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout)
1737+
1738+ ginkgo.By("checking the TCP LoadBalancer is closed")
1739+ testNotReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout)
1740+
1741+ if loadBalancerSupportsUDP {
1742+ ginkgo.By("checking the UDP LoadBalancer is closed")
1743+ testNotReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout)
1744+ }
1745+ })
1746+
1747+ ginkgo.It("should be able to create an internal type load balancer [Slow]", func() {
1748+ e2eskipper.SkipUnlessProviderIs("azure", "gke", "gce")
1749+
1750+ createTimeout := e2eservice.GetServiceLoadBalancerCreationTimeout(cs)
1751+ pollInterval := framework.Poll * 10
1752+
1753+ namespace := f.Namespace.Name
1754+ serviceName := "lb-internal"
1755+ jig := e2eservice.NewTestJig(cs, namespace, serviceName)
1756+
1757+ ginkgo.By("creating pod to be part of service " + serviceName)
1758+ _, err := jig.Run(nil)
1759+ framework.ExpectNoError(err)
1760+
1761+ enableILB, disableILB := enableAndDisableInternalLB()
1762+
1763+ isInternalEndpoint := func(lbIngress *v1.LoadBalancerIngress) bool {
1764+ ingressEndpoint := e2eservice.GetIngressPoint(lbIngress)
1765+ // Needs update for providers using hostname as endpoint.
1766+ return strings.HasPrefix(ingressEndpoint, "10.")
1767+ }
1768+
1769+ ginkgo.By("creating a service with type LoadBalancer and cloud specific Internal-LB annotation enabled")
1770+ svc, err := jig.CreateTCPService(func(svc *v1.Service) {
1771+ svc.Spec.Type = v1.ServiceTypeLoadBalancer
1772+ enableILB(svc)
1773+ })
1774+ framework.ExpectNoError(err)
1775+
1776+ defer func() {
1777+ ginkgo.By("Clean up loadbalancer service")
1778+ e2eservice.WaitForServiceDeletedWithFinalizer(cs, svc.Namespace, svc.Name)
1779+ }()
1780+
1781+ svc, err = jig.WaitForLoadBalancer(createTimeout)
1782+ framework.ExpectNoError(err)
1783+ lbIngress := &svc.Status.LoadBalancer.Ingress[0]
1784+ svcPort := int(svc.Spec.Ports[0].Port)
1785+ // should have an internal IP.
1786+ framework.ExpectEqual(isInternalEndpoint(lbIngress), true)
1787+
1788+ // ILBs are not accessible from the test orchestrator, so it's necessary to use
1789+ // a pod to test the service.
1790+ ginkgo.By("hitting the internal load balancer from pod")
1791+ framework.Logf("creating pod with host network")
1792+ hostExec := launchHostExecPod(f.ClientSet, f.Namespace.Name, "ilb-host-exec")
1793+
1794+ framework.Logf("Waiting up to %v for service %q's internal LB to respond to requests", createTimeout, serviceName)
1795+ tcpIngressIP := e2eservice.GetIngressPoint(lbIngress)
1796+ if pollErr := wait.PollImmediate(pollInterval, createTimeout, func() (bool, error) {
1797+ cmd := fmt.Sprintf(`curl -m 5 'http://%v:%v/echo?msg=hello'`, tcpIngressIP, svcPort)
1798+ stdout, err := framework.RunHostCmd(hostExec.Namespace, hostExec.Name, cmd)
1799+ if err != nil {
1800+ framework.Logf("error curling; stdout: %v. err: %v", stdout, err)
1801+ return false, nil
1802+ }
1803+
1804+ if !strings.Contains(stdout, "hello") {
1805+ framework.Logf("Expected output to contain 'hello', got %q; retrying...", stdout)
1806+ return false, nil
1807+ }
1808+
1809+ framework.Logf("Successful curl; stdout: %v", stdout)
1810+ return true, nil
1811+ }); pollErr != nil {
1812+ framework.Failf("ginkgo.Failed to hit ILB IP, err: %v", pollErr)
1813+ }
1814+
1815+ ginkgo.By("switching to external type LoadBalancer")
1816+ svc, err = jig.UpdateService(func(svc *v1.Service) {
1817+ disableILB(svc)
1818+ })
1819+ framework.ExpectNoError(err)
1820+ framework.Logf("Waiting up to %v for service %q to have an external LoadBalancer", createTimeout, serviceName)
1821+ if pollErr := wait.PollImmediate(pollInterval, createTimeout, func() (bool, error) {
1822+ svc, err := cs.CoreV1().Services(namespace).Get(context.TODO(), serviceName, metav1.GetOptions{})
1823+ if err != nil {
1824+ return false, err
1825+ }
1826+ lbIngress = &svc.Status.LoadBalancer.Ingress[0]
1827+ return !isInternalEndpoint(lbIngress), nil
1828+ }); pollErr != nil {
1829+ framework.Failf("Loadbalancer IP not changed to external.")
1830+ }
1831+ // should have an external IP.
1832+ gomega.Expect(isInternalEndpoint(lbIngress)).To(gomega.BeFalse())
1833+
1834+ ginkgo.By("hitting the external load balancer")
1835+ framework.Logf("Waiting up to %v for service %q's external LB to respond to requests", createTimeout, serviceName)
1836+ tcpIngressIP = e2eservice.GetIngressPoint(lbIngress)
1837+ e2eservice.TestReachableHTTP(tcpIngressIP, svcPort, e2eservice.LoadBalancerLagTimeoutDefault)
1838+
1839+ // GCE cannot test a specific IP because the test may not own it. This cloud specific condition
1840+ // will be removed when GCP supports similar functionality.
1841+ if framework.ProviderIs("azure") {
1842+ ginkgo.By("switching back to interal type LoadBalancer, with static IP specified.")
1843+ internalStaticIP := "10.240.11.11"
1844+ svc, err = jig.UpdateService(func(svc *v1.Service) {
1845+ svc.Spec.LoadBalancerIP = internalStaticIP
1846+ enableILB(svc)
1847+ })
1848+ framework.ExpectNoError(err)
1849+ framework.Logf("Waiting up to %v for service %q to have an internal LoadBalancer", createTimeout, serviceName)
1850+ if pollErr := wait.PollImmediate(pollInterval, createTimeout, func() (bool, error) {
1851+ svc, err := cs.CoreV1().Services(namespace).Get(context.TODO(), serviceName, metav1.GetOptions{})
1852+ if err != nil {
1853+ return false, err
1854+ }
1855+ lbIngress = &svc.Status.LoadBalancer.Ingress[0]
1856+ return isInternalEndpoint(lbIngress), nil
1857+ }); pollErr != nil {
1858+ framework.Failf("Loadbalancer IP not changed to internal.")
1859+ }
1860+ // should have the given static internal IP.
1861+ framework.ExpectEqual(e2eservice.GetIngressPoint(lbIngress), internalStaticIP)
1862+ }
1863+ })
1864+
1865+ // This test creates a load balancer, make sure its health check interval
1866+ // equals to gceHcCheckIntervalSeconds. Then the interval is manipulated
1867+ // to be something else, see if the interval will be reconciled.
1868+ ginkgo.It("should reconcile LB health check interval [Slow][Serial]", func() {
1869+ const gceHcCheckIntervalSeconds = int64(8)
1870+ // This test is for clusters on GCE.
1871+ // (It restarts kube-controller-manager, which we don't support on GKE)
1872+ e2eskipper.SkipUnlessProviderIs("gce")
1873+ e2eskipper.SkipUnlessSSHKeyPresent()
1874+
1875+ clusterID, err := gce.GetClusterID(cs)
1876+ if err != nil {
1877+ framework.Failf("framework.GetClusterID(cs) = _, %v; want nil", err)
1878+ }
1879+ gceCloud, err := gce.GetGCECloud()
1880+ if err != nil {
1881+ framework.Failf("framework.GetGCECloud() = _, %v; want nil", err)
1882+ }
1883+
1884+ namespace := f.Namespace.Name
1885+ serviceName := "lb-hc-int"
1886+ jig := e2eservice.NewTestJig(cs, namespace, serviceName)
1887+
1888+ ginkgo.By("create load balancer service")
1889+ // Create loadbalancer service with source range from node[0] and podAccept
1890+ svc, err := jig.CreateTCPService(func(svc *v1.Service) {
1891+ svc.Spec.Type = v1.ServiceTypeLoadBalancer
1892+ })
1893+ framework.ExpectNoError(err)
1894+
1895+ defer func() {
1896+ ginkgo.By("Clean up loadbalancer service")
1897+ e2eservice.WaitForServiceDeletedWithFinalizer(cs, svc.Namespace, svc.Name)
1898+ }()
1899+
1900+ svc, err = jig.WaitForLoadBalancer(e2eservice.GetServiceLoadBalancerCreationTimeout(cs))
1901+ framework.ExpectNoError(err)
1902+
1903+ hcName := gcecloud.MakeNodesHealthCheckName(clusterID)
1904+ hc, err := gceCloud.GetHTTPHealthCheck(hcName)
1905+ if err != nil {
1906+ framework.Failf("gceCloud.GetHttpHealthCheck(%q) = _, %v; want nil", hcName, err)
1907+ }
1908+ framework.ExpectEqual(hc.CheckIntervalSec, gceHcCheckIntervalSeconds)
1909+
1910+ ginkgo.By("modify the health check interval")
1911+ hc.CheckIntervalSec = gceHcCheckIntervalSeconds - 1
1912+ if err = gceCloud.UpdateHTTPHealthCheck(hc); err != nil {
1913+ framework.Failf("gcecloud.UpdateHttpHealthCheck(%#v) = %v; want nil", hc, err)
1914+ }
1915+
1916+ ginkgo.By("restart kube-controller-manager")
1917+ if err := e2ekubesystem.RestartControllerManager(); err != nil {
1918+ framework.Failf("e2ekubesystem.RestartControllerManager() = %v; want nil", err)
1919+ }
1920+ if err := e2ekubesystem.WaitForControllerManagerUp(); err != nil {
1921+ framework.Failf("e2ekubesystem.WaitForControllerManagerUp() = %v; want nil", err)
1922+ }
1923+
1924+ ginkgo.By("health check should be reconciled")
1925+ pollInterval := framework.Poll * 10
1926+ loadBalancerPropagationTimeout := e2eservice.GetServiceLoadBalancerPropagationTimeout(cs)
1927+ if pollErr := wait.PollImmediate(pollInterval, loadBalancerPropagationTimeout, func() (bool, error) {
1928+ hc, err := gceCloud.GetHTTPHealthCheck(hcName)
1929+ if err != nil {
1930+ framework.Logf("ginkgo.Failed to get HttpHealthCheck(%q): %v", hcName, err)
1931+ return false, err
1932+ }
1933+ framework.Logf("hc.CheckIntervalSec = %v", hc.CheckIntervalSec)
1934+ return hc.CheckIntervalSec == gceHcCheckIntervalSeconds, nil
1935+ }); pollErr != nil {
1936+ framework.Failf("Health check %q does not reconcile its check interval to %d.", hcName, gceHcCheckIntervalSeconds)
1937+ }
1938+ })
1939+
1940+ var _ = SIGDescribe("ESIPP [Slow]", func() {
1941+ f := framework.NewDefaultFramework("esipp")
1942+ var loadBalancerCreateTimeout time.Duration
1943+
1944+ var cs clientset.Interface
1945+ serviceLBNames := []string{}
1946+
1947+ ginkgo.BeforeEach(func() {
1948+ // requires cloud load-balancer support - this feature currently supported only on GCE/GKE
1949+ e2eskipper.SkipUnlessProviderIs("gce", "gke")
1950+
1951+ cs = f.ClientSet
1952+ loadBalancerCreateTimeout = e2eservice.GetServiceLoadBalancerCreationTimeout(cs)
1953+ })
1954+
1955+ ginkgo.AfterEach(func() {
1956+ if ginkgo.CurrentGinkgoTestDescription().Failed {
1957+ DescribeSvc(f.Namespace.Name)
1958+ }
1959+ for _, lb := range serviceLBNames {
1960+ framework.Logf("cleaning load balancer resource for %s", lb)
1961+ e2eservice.CleanupServiceResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone)
1962+ }
1963+ //reset serviceLBNames
1964+ serviceLBNames = []string{}
1965+ })
1966+
1967+ ginkgo.It("should work for type=LoadBalancer", func() {
1968+ namespace := f.Namespace.Name
1969+ serviceName := "external-local-lb"
1970+ jig := e2eservice.NewTestJig(cs, namespace, serviceName)
1971+
1972+ svc, err := jig.CreateOnlyLocalLoadBalancerService(loadBalancerCreateTimeout, true, nil)
1973+ framework.ExpectNoError(err)
1974+ serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc))
1975+ healthCheckNodePort := int(svc.Spec.HealthCheckNodePort)
1976+ if healthCheckNodePort == 0 {
1977+ framework.Failf("Service HealthCheck NodePort was not allocated")
1978+ }
1979+ defer func() {
1980+ err = jig.ChangeServiceType(v1.ServiceTypeClusterIP, loadBalancerCreateTimeout)
1981+ framework.ExpectNoError(err)
1982+
1983+ // Make sure we didn't leak the health check node port.
1984+ threshold := 2
1985+ nodes, err := jig.GetEndpointNodes()
1986+ framework.ExpectNoError(err)
1987+ for _, ips := range nodes {
1988+ err := TestHTTPHealthCheckNodePort(ips[0], healthCheckNodePort, "/healthz", e2eservice.KubeProxyEndpointLagTimeout, false, threshold)
1989+ framework.ExpectNoError(err)
1990+ }
1991+ err = cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{})
1992+ framework.ExpectNoError(err)
1993+ }()
1994+
1995+ svcTCPPort := int(svc.Spec.Ports[0].Port)
1996+ ingressIP := e2eservice.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0])
1997+
1998+ ginkgo.By("reading clientIP using the TCP service's service port via its external VIP")
1999+ content := GetHTTPContent(ingressIP, svcTCPPort, e2eservice.KubeProxyLagTimeout, "/clientip")
2000+ clientIP := content.String()
2001+ framework.Logf("ClientIP detected by target pod using VIP:SvcPort is %s", clientIP)
2002+
2003+ ginkgo.By("checking if Source IP is preserved")
2004+ if strings.HasPrefix(clientIP, "10.") {
2005+ framework.Failf("Source IP was NOT preserved")
2006+ }
2007+ })
2008+
2009+ ginkgo.It("should work for type=NodePort", func() {
2010+ namespace := f.Namespace.Name
2011+ serviceName := "external-local-nodeport"
2012+ jig := e2eservice.NewTestJig(cs, namespace, serviceName)
2013+
2014+ svc, err := jig.CreateOnlyLocalNodePortService(true)
2015+ framework.ExpectNoError(err)
2016+ defer func() {
2017+ err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{})
2018+ framework.ExpectNoError(err)
2019+ }()
2020+
2021+ tcpNodePort := int(svc.Spec.Ports[0].NodePort)
2022+ endpointsNodeMap, err := jig.GetEndpointNodes()
2023+ framework.ExpectNoError(err)
2024+ path := "/clientip"
2025+
2026+ for nodeName, nodeIPs := range endpointsNodeMap {
2027+ nodeIP := nodeIPs[0]
2028+ ginkgo.By(fmt.Sprintf("reading clientIP using the TCP service's NodePort, on node %v: %v%v%v", nodeName, nodeIP, tcpNodePort, path))
2029+ content := GetHTTPContent(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout, path)
2030+ clientIP := content.String()
2031+ framework.Logf("ClientIP detected by target pod using NodePort is %s", clientIP)
2032+ if strings.HasPrefix(clientIP, "10.") {
2033+ framework.Failf("Source IP was NOT preserved")
2034+ }
2035+ }
2036+ })
2037+
2038+ ginkgo.It("should only target nodes with endpoints", func() {
2039+ namespace := f.Namespace.Name
2040+ serviceName := "external-local-nodes"
2041+ jig := e2eservice.NewTestJig(cs, namespace, serviceName)
2042+ nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, e2eservice.MaxNodesForEndpointsTests)
2043+ framework.ExpectNoError(err)
2044+
2045+ svc, err := jig.CreateOnlyLocalLoadBalancerService(loadBalancerCreateTimeout, false,
2046+ func(svc *v1.Service) {
2047+ // Change service port to avoid collision with opened hostPorts
2048+ // in other tests that run in parallel.
2049+ if len(svc.Spec.Ports) != 0 {
2050+ svc.Spec.Ports[0].TargetPort = intstr.FromInt(int(svc.Spec.Ports[0].Port))
2051+ svc.Spec.Ports[0].Port = 8081
2052+ }
2053+
2054+ })
2055+ framework.ExpectNoError(err)
2056+ serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc))
2057+ defer func() {
2058+ err = jig.ChangeServiceType(v1.ServiceTypeClusterIP, loadBalancerCreateTimeout)
2059+ framework.ExpectNoError(err)
2060+ err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{})
2061+ framework.ExpectNoError(err)
2062+ }()
2063+
2064+ healthCheckNodePort := int(svc.Spec.HealthCheckNodePort)
2065+ if healthCheckNodePort == 0 {
2066+ framework.Failf("Service HealthCheck NodePort was not allocated")
2067+ }
2068+
2069+ ips := e2enode.CollectAddresses(nodes, v1.NodeExternalIP)
2070+
2071+ ingressIP := e2eservice.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0])
2072+ svcTCPPort := int(svc.Spec.Ports[0].Port)
2073+
2074+ threshold := 2
2075+ path := "/healthz"
2076+ for i := 0; i < len(nodes.Items); i++ {
2077+ endpointNodeName := nodes.Items[i].Name
2078+
2079+ ginkgo.By("creating a pod to be part of the service " + serviceName + " on node " + endpointNodeName)
2080+ _, err = jig.Run(func(rc *v1.ReplicationController) {
2081+ rc.Name = serviceName
2082+ if endpointNodeName != "" {
2083+ rc.Spec.Template.Spec.NodeName = endpointNodeName
2084+ }
2085+ })
2086+ framework.ExpectNoError(err)
2087+
2088+ ginkgo.By(fmt.Sprintf("waiting for service endpoint on node %v", endpointNodeName))
2089+ err = jig.WaitForEndpointOnNode(endpointNodeName)
2090+ framework.ExpectNoError(err)
2091+
2092+ // HealthCheck should pass only on the node where num(endpoints) > 0
2093+ // All other nodes should fail the healthcheck on the service healthCheckNodePort
2094+ for n, publicIP := range ips {
2095+ // Make sure the loadbalancer picked up the health check change.
2096+ // Confirm traffic can reach backend through LB before checking healthcheck nodeport.
2097+ e2eservice.TestReachableHTTP(ingressIP, svcTCPPort, e2eservice.KubeProxyLagTimeout)
2098+ expectedSuccess := nodes.Items[n].Name == endpointNodeName
2099+ port := strconv.Itoa(healthCheckNodePort)
2100+ ipPort := net.JoinHostPort(publicIP, port)
2101+ framework.Logf("Health checking %s, http://%s%s, expectedSuccess %v", nodes.Items[n].Name, ipPort, path, expectedSuccess)
2102+ err := TestHTTPHealthCheckNodePort(publicIP, healthCheckNodePort, path, e2eservice.KubeProxyEndpointLagTimeout, expectedSuccess, threshold)
2103+ framework.ExpectNoError(err)
2104+ }
2105+ framework.ExpectNoError(e2erc.DeleteRCAndWaitForGC(f.ClientSet, namespace, serviceName))
2106+ }
2107+ })
2108+
2109+ ginkgo.It("should work from pods", func() {
2110+ var err error
2111+ namespace := f.Namespace.Name
2112+ serviceName := "external-local-pods"
2113+ jig := e2eservice.NewTestJig(cs, namespace, serviceName)
2114+
2115+ svc, err := jig.CreateOnlyLocalLoadBalancerService(loadBalancerCreateTimeout, true, nil)
2116+ framework.ExpectNoError(err)
2117+ serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc))
2118+ defer func() {
2119+ err = jig.ChangeServiceType(v1.ServiceTypeClusterIP, loadBalancerCreateTimeout)
2120+ framework.ExpectNoError(err)
2121+ err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{})
2122+ framework.ExpectNoError(err)
2123+ }()
2124+
2125+ ingressIP := e2eservice.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0])
2126+ port := strconv.Itoa(int(svc.Spec.Ports[0].Port))
2127+ ipPort := net.JoinHostPort(ingressIP, port)
2128+ path := fmt.Sprintf("%s/clientip", ipPort)
2129+
2130+ ginkgo.By("Creating pause pod deployment to make sure, pausePods are in desired state")
2131+ deployment := createPausePodDeployment(cs, "pause-pod-deployment", namespace, 1)
2132+ framework.ExpectNoError(e2edeployment.WaitForDeploymentComplete(cs, deployment), "Failed to complete pause pod deployment")
2133+
2134+ defer func() {
2135+ framework.Logf("Deleting deployment")
2136+ err = cs.AppsV1().Deployments(namespace).Delete(context.TODO(), deployment.Name, metav1.DeleteOptions{})
2137+ framework.ExpectNoError(err, "Failed to delete deployment %s", deployment.Name)
2138+ }()
2139+
2140+ deployment, err = cs.AppsV1().Deployments(namespace).Get(context.TODO(), deployment.Name, metav1.GetOptions{})
2141+ framework.ExpectNoError(err, "Error in retrieving pause pod deployment")
2142+ labelSelector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
2143+ framework.ExpectNoError(err, "Error in setting LabelSelector as selector from deployment")
2144+
2145+ pausePods, err := cs.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector.String()})
2146+ framework.ExpectNoError(err, "Error in listing pods associated with pause pod deployments")
2147+
2148+ pausePod := pausePods.Items[0]
2149+ framework.Logf("Waiting up to %v curl %v", e2eservice.KubeProxyLagTimeout, path)
2150+ cmd := fmt.Sprintf(`curl -q -s --connect-timeout 30 %v`, path)
2151+
2152+ var srcIP string
2153+ loadBalancerPropagationTimeout := e2eservice.GetServiceLoadBalancerPropagationTimeout(cs)
2154+ ginkgo.By(fmt.Sprintf("Hitting external lb %v from pod %v on node %v", ingressIP, pausePod.Name, pausePod.Spec.NodeName))
2155+ if pollErr := wait.PollImmediate(framework.Poll, loadBalancerPropagationTimeout, func() (bool, error) {
2156+ stdout, err := framework.RunHostCmd(pausePod.Namespace, pausePod.Name, cmd)
2157+ if err != nil {
2158+ framework.Logf("got err: %v, retry until timeout", err)
2159+ return false, nil
2160+ }
2161+ srcIP = strings.TrimSpace(strings.Split(stdout, ":")[0])
2162+ return srcIP == pausePod.Status.PodIP, nil
2163+ }); pollErr != nil {
2164+ framework.Failf("Source IP not preserved from %v, expected '%v' got '%v'", pausePod.Name, pausePod.Status.PodIP, srcIP)
2165+ }
2166+ })
2167+
2168+ // TODO: Get rid of [DisabledForLargeClusters] tag when issue #90047 is fixed.
2169+ ginkgo.It("should handle updates to ExternalTrafficPolicy field [DisabledForLargeClusters]", func() {
2170+ namespace := f.Namespace.Name
2171+ serviceName := "external-local-update"
2172+ jig := e2eservice.NewTestJig(cs, namespace, serviceName)
2173+
2174+ nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, e2eservice.MaxNodesForEndpointsTests)
2175+ framework.ExpectNoError(err)
2176+ if len(nodes.Items) < 2 {
2177+ framework.Failf("Need at least 2 nodes to verify source ip from a node without endpoint")
2178+ }
2179+
2180+ svc, err := jig.CreateOnlyLocalLoadBalancerService(loadBalancerCreateTimeout, true, nil)
2181+ framework.ExpectNoError(err)
2182+ serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc))
2183+ defer func() {
2184+ err = jig.ChangeServiceType(v1.ServiceTypeClusterIP, loadBalancerCreateTimeout)
2185+ framework.ExpectNoError(err)
2186+ err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{})
2187+ framework.ExpectNoError(err)
2188+ }()
2189+
2190+ // save the health check node port because it disappears when ESIPP is turned off.
2191+ healthCheckNodePort := int(svc.Spec.HealthCheckNodePort)
2192+
2193+ ginkgo.By("turning ESIPP off")
2194+ svc, err = jig.UpdateService(func(svc *v1.Service) {
2195+ svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeCluster
2196+ })
2197+ framework.ExpectNoError(err)
2198+ if svc.Spec.HealthCheckNodePort > 0 {
2199+ framework.Failf("Service HealthCheck NodePort still present")
2200+ }
2201+
2202+ endpointNodeMap, err := jig.GetEndpointNodes()
2203+ framework.ExpectNoError(err)
2204+ noEndpointNodeMap := map[string][]string{}
2205+ for _, n := range nodes.Items {
2206+ if _, ok := endpointNodeMap[n.Name]; ok {
2207+ continue
2208+ }
2209+ noEndpointNodeMap[n.Name] = e2enode.GetAddresses(&n, v1.NodeExternalIP)
2210+ }
2211+
2212+ svcTCPPort := int(svc.Spec.Ports[0].Port)
2213+ svcNodePort := int(svc.Spec.Ports[0].NodePort)
2214+ ingressIP := e2eservice.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0])
2215+ path := "/clientip"
2216+
2217+ ginkgo.By(fmt.Sprintf("endpoints present on nodes %v, absent on nodes %v", endpointNodeMap, noEndpointNodeMap))
2218+ for nodeName, nodeIPs := range noEndpointNodeMap {
2219+ ginkgo.By(fmt.Sprintf("Checking %v (%v:%v%v) proxies to endpoints on another node", nodeName, nodeIPs[0], svcNodePort, path))
2220+ GetHTTPContent(nodeIPs[0], svcNodePort, e2eservice.KubeProxyLagTimeout, path)
2221+ }
2222+
2223+ for nodeName, nodeIPs := range endpointNodeMap {
2224+ ginkgo.By(fmt.Sprintf("checking kube-proxy health check fails on node with endpoint (%s), public IP %s", nodeName, nodeIPs[0]))
2225+ var body bytes.Buffer
2226+ pollfn := func() (bool, error) {
2227+ result := e2enetwork.PokeHTTP(nodeIPs[0], healthCheckNodePort, "/healthz", nil)
2228+ if result.Code == 0 {
2229+ return true, nil
2230+ }
2231+ body.Reset()
2232+ body.Write(result.Body)
2233+ return false, nil
2234+ }
2235+ if pollErr := wait.PollImmediate(framework.Poll, e2eservice.TestTimeout, pollfn); pollErr != nil {
2236+ framework.Failf("Kube-proxy still exposing health check on node %v:%v, after ESIPP was turned off. body %s",
2237+ nodeName, healthCheckNodePort, body.String())
2238+ }
2239+ }
2240+
2241+ // Poll till kube-proxy re-adds the MASQUERADE rule on the node.
2242+ ginkgo.By(fmt.Sprintf("checking source ip is NOT preserved through loadbalancer %v", ingressIP))
2243+ var clientIP string
2244+ pollErr := wait.PollImmediate(framework.Poll, e2eservice.KubeProxyLagTimeout, func() (bool, error) {
2245+ content := GetHTTPContent(ingressIP, svcTCPPort, e2eservice.KubeProxyLagTimeout, "/clientip")
2246+ clientIP = content.String()
2247+ if strings.HasPrefix(clientIP, "10.") {
2248+ return true, nil
2249+ }
2250+ return false, nil
2251+ })
2252+ if pollErr != nil {
2253+ framework.Failf("Source IP WAS preserved even after ESIPP turned off. Got %v, expected a ten-dot cluster ip.", clientIP)
2254+ }
2255+
2256+ // TODO: We need to attempt to create another service with the previously
2257+ // allocated healthcheck nodePort. If the health check nodePort has been
2258+ // freed, the new service creation will succeed, upon which we cleanup.
2259+ // If the health check nodePort has NOT been freed, the new service
2260+ // creation will fail.
2261+
2262+ ginkgo.By("setting ExternalTraffic field back to OnlyLocal")
2263+ svc, err = jig.UpdateService(func(svc *v1.Service) {
2264+ svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
2265+ // Request the same healthCheckNodePort as before, to test the user-requested allocation path
2266+ svc.Spec.HealthCheckNodePort = int32(healthCheckNodePort)
2267+ })
2268+ framework.ExpectNoError(err)
2269+ pollErr = wait.PollImmediate(framework.Poll, e2eservice.KubeProxyLagTimeout, func() (bool, error) {
2270+ content := GetHTTPContent(ingressIP, svcTCPPort, e2eservice.KubeProxyLagTimeout, path)
2271+ clientIP = content.String()
2272+ ginkgo.By(fmt.Sprintf("Endpoint %v:%v%v returned client ip %v", ingressIP, svcTCPPort, path, clientIP))
2273+ if !strings.HasPrefix(clientIP, "10.") {
2274+ return true, nil
2275+ }
2276+ return false, nil
2277+ })
2278+ if pollErr != nil {
2279+ framework.Failf("Source IP (%v) is not the client IP even after ESIPP turned on, expected a public IP.", clientIP)
2280+ }
2281+ })
2282+ })
2283+})
2284diff --git a/test/e2e/node/recreate_node.go b/test/e2e/node/recreate_node.go
2285index da3fc974485..b403fa7f737 100644
2286--- a/test/e2e/node/recreate_node.go
2287+++ b/test/e2e/node/recreate_node.go
2288@@ -1,3 +1,5 @@
2289+// +build !providerless
2290+
2291 /*
2292 Copyright 2019 The Kubernetes Authors.
2293
2294diff --git a/test/e2e/scheduling/nvidia-gpus.go b/test/e2e/scheduling/nvidia-gpus.go
Serge Bazanski19eb0002021-01-21 14:25:25 +01002295index 313e773b8e2..bbf66b59aac 100644
Lorenz Bruned0503c2020-07-28 17:21:25 +02002296--- a/test/e2e/scheduling/nvidia-gpus.go
2297+++ b/test/e2e/scheduling/nvidia-gpus.go
2298@@ -1,3 +1,5 @@
2299+// +build !providerless
2300+
2301 /*
2302 Copyright 2017 The Kubernetes Authors.
2303
2304diff --git a/test/e2e/scheduling/ubernetes_lite_volumes.go b/test/e2e/scheduling/ubernetes_lite_volumes.go
2305index 513ed07543f..78c0f081990 100644
2306--- a/test/e2e/scheduling/ubernetes_lite_volumes.go
2307+++ b/test/e2e/scheduling/ubernetes_lite_volumes.go
2308@@ -1,3 +1,5 @@
2309+// +build !providerless
2310+
2311 /*
2312 Copyright 2017 The Kubernetes Authors.
2313
2314diff --git a/test/e2e/storage/drivers/in_tree.go b/test/e2e/storage/drivers/in_tree.go
Serge Bazanski19eb0002021-01-21 14:25:25 +01002315index a866266c1f1..28e26a10c35 100644
Lorenz Bruned0503c2020-07-28 17:21:25 +02002316--- a/test/e2e/storage/drivers/in_tree.go
2317+++ b/test/e2e/storage/drivers/in_tree.go
2318@@ -38,10 +38,8 @@ package drivers
2319 import (
2320 "context"
2321 "fmt"
2322- "os/exec"
2323 "strconv"
2324 "strings"
2325- "time"
2326
2327 "github.com/onsi/ginkgo"
2328 v1 "k8s.io/api/core/v1"
2329@@ -57,13 +55,11 @@ import (
2330 e2eauth "k8s.io/kubernetes/test/e2e/framework/auth"
2331 e2enode "k8s.io/kubernetes/test/e2e/framework/node"
2332 e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
2333- e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
2334 e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
2335 e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
2336 "k8s.io/kubernetes/test/e2e/storage/testpatterns"
2337 "k8s.io/kubernetes/test/e2e/storage/testsuites"
2338 "k8s.io/kubernetes/test/e2e/storage/utils"
2339- vspheretest "k8s.io/kubernetes/test/e2e/storage/vsphere"
2340 imageutils "k8s.io/kubernetes/test/utils/image"
2341 )
2342
Serge Bazanski19eb0002021-01-21 14:25:25 +01002343@@ -1044,734 +1040,6 @@ func (e *emptydirDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTes
Lorenz Bruned0503c2020-07-28 17:21:25 +02002344 }, func() {}
2345 }
2346
2347-// Cinder
2348-// This driver assumes that OpenStack client tools are installed
2349-// (/usr/bin/nova, /usr/bin/cinder and /usr/bin/keystone)
2350-// and that the usual OpenStack authentication env. variables are set
2351-// (OS_USERNAME, OS_PASSWORD, OS_TENANT_NAME at least).
2352-type cinderDriver struct {
2353- driverInfo testsuites.DriverInfo
2354-}
2355-
2356-type cinderVolume struct {
2357- volumeName string
2358- volumeID string
2359-}
2360-
2361-var _ testsuites.TestDriver = &cinderDriver{}
2362-var _ testsuites.PreprovisionedVolumeTestDriver = &cinderDriver{}
2363-var _ testsuites.InlineVolumeTestDriver = &cinderDriver{}
2364-var _ testsuites.PreprovisionedPVTestDriver = &cinderDriver{}
2365-var _ testsuites.DynamicPVTestDriver = &cinderDriver{}
2366-
2367-// InitCinderDriver returns cinderDriver that implements TestDriver interface
2368-func InitCinderDriver() testsuites.TestDriver {
2369- return &cinderDriver{
2370- driverInfo: testsuites.DriverInfo{
2371- Name: "cinder",
2372- InTreePluginName: "kubernetes.io/cinder",
2373- MaxFileSize: testpatterns.FileSizeMedium,
2374- SupportedSizeRange: e2evolume.SizeRange{
2375- Min: "5Gi",
2376- },
2377- SupportedFsType: sets.NewString(
2378- "", // Default fsType
2379- "ext3",
2380- ),
2381- TopologyKeys: []string{v1.LabelZoneFailureDomain},
2382- Capabilities: map[testsuites.Capability]bool{
2383- testsuites.CapPersistence: true,
2384- testsuites.CapFsGroup: true,
2385- testsuites.CapExec: true,
2386- testsuites.CapBlock: true,
2387- // Cinder supports volume limits, but the test creates large
2388- // number of volumes and times out test suites.
2389- testsuites.CapVolumeLimits: false,
2390- testsuites.CapTopology: true,
2391- },
2392- },
2393- }
2394-}
2395-
2396-func (c *cinderDriver) GetDriverInfo() *testsuites.DriverInfo {
2397- return &c.driverInfo
2398-}
2399-
2400-func (c *cinderDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
2401- e2eskipper.SkipUnlessProviderIs("openstack")
2402-}
2403-
2404-func (c *cinderDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource {
2405- cv, ok := e2evolume.(*cinderVolume)
2406- framework.ExpectEqual(ok, true, "Failed to cast test volume to Cinder test volume")
2407-
2408- volSource := v1.VolumeSource{
2409- Cinder: &v1.CinderVolumeSource{
2410- VolumeID: cv.volumeID,
2411- ReadOnly: readOnly,
2412- },
2413- }
2414- if fsType != "" {
2415- volSource.Cinder.FSType = fsType
2416- }
2417- return &volSource
2418-}
2419-
2420-func (c *cinderDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
2421- cv, ok := e2evolume.(*cinderVolume)
2422- framework.ExpectEqual(ok, true, "Failed to cast test volume to Cinder test volume")
2423-
2424- pvSource := v1.PersistentVolumeSource{
2425- Cinder: &v1.CinderPersistentVolumeSource{
2426- VolumeID: cv.volumeID,
2427- ReadOnly: readOnly,
2428- },
2429- }
2430- if fsType != "" {
2431- pvSource.Cinder.FSType = fsType
2432- }
2433- return &pvSource, nil
2434-}
2435-
2436-func (c *cinderDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
2437- provisioner := "kubernetes.io/cinder"
2438- parameters := map[string]string{}
2439- if fsType != "" {
2440- parameters["fsType"] = fsType
2441- }
2442- ns := config.Framework.Namespace.Name
2443- suffix := fmt.Sprintf("%s-sc", c.driverInfo.Name)
2444-
2445- return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix)
2446-}
2447-
2448-func (c *cinderDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
2449- return &testsuites.PerTestConfig{
2450- Driver: c,
2451- Prefix: "cinder",
2452- Framework: f,
2453- }, func() {}
2454-}
2455-
2456-func (c *cinderDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume {
2457- f := config.Framework
2458- ns := f.Namespace
2459-
2460- // We assume that namespace.Name is a random string
2461- volumeName := ns.Name
2462- ginkgo.By("creating a test Cinder volume")
2463- output, err := exec.Command("cinder", "create", "--display-name="+volumeName, "1").CombinedOutput()
2464- outputString := string(output[:])
2465- framework.Logf("cinder output:\n%s", outputString)
2466- framework.ExpectNoError(err)
2467-
2468- // Parse 'id'' from stdout. Expected format:
2469- // | attachments | [] |
2470- // | availability_zone | nova |
2471- // ...
2472- // | id | 1d6ff08f-5d1c-41a4-ad72-4ef872cae685 |
2473- volumeID := ""
2474- for _, line := range strings.Split(outputString, "\n") {
2475- fields := strings.Fields(line)
2476- if len(fields) != 5 {
2477- continue
2478- }
2479- if fields[1] != "id" {
2480- continue
2481- }
2482- volumeID = fields[3]
2483- break
2484- }
2485- framework.Logf("Volume ID: %s", volumeID)
2486- framework.ExpectNotEqual(volumeID, "")
2487- return &cinderVolume{
2488- volumeName: volumeName,
2489- volumeID: volumeID,
2490- }
2491-}
2492-
2493-func (v *cinderVolume) DeleteVolume() {
2494- name := v.volumeName
2495-
2496- // Try to delete the volume for several seconds - it takes
2497- // a while for the plugin to detach it.
2498- var output []byte
2499- var err error
2500- timeout := time.Second * 120
2501-
2502- framework.Logf("Waiting up to %v for removal of cinder volume %s", timeout, name)
2503- for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
2504- output, err = exec.Command("cinder", "delete", name).CombinedOutput()
2505- if err == nil {
2506- framework.Logf("Cinder volume %s deleted", name)
2507- return
2508- }
2509- framework.Logf("Failed to delete volume %s: %v", name, err)
2510- }
2511- framework.Logf("Giving up deleting volume %s: %v\n%s", name, err, string(output[:]))
2512-}
2513-
2514-// GCE
2515-type gcePdDriver struct {
2516- driverInfo testsuites.DriverInfo
2517-}
2518-
2519-type gcePdVolume struct {
2520- volumeName string
2521-}
2522-
2523-var _ testsuites.TestDriver = &gcePdDriver{}
2524-var _ testsuites.PreprovisionedVolumeTestDriver = &gcePdDriver{}
2525-var _ testsuites.InlineVolumeTestDriver = &gcePdDriver{}
2526-var _ testsuites.PreprovisionedPVTestDriver = &gcePdDriver{}
2527-var _ testsuites.DynamicPVTestDriver = &gcePdDriver{}
2528-
2529-// InitGcePdDriver returns gcePdDriver that implements TestDriver interface
2530-func InitGcePdDriver() testsuites.TestDriver {
2531- // In current test structure, it first initialize the driver and then set up
2532- // the new framework, so we cannot get the correct OS here. So here set to
2533- // support all fs types including both linux and windows. We have code to check Node OS later
2534- // during test.
2535- supportedTypes := sets.NewString(
2536- "", // Default fsType
2537- "ext2",
2538- "ext3",
2539- "ext4",
2540- "xfs",
2541- "ntfs",
2542- )
2543- return &gcePdDriver{
2544- driverInfo: testsuites.DriverInfo{
2545- Name: "gcepd",
2546- InTreePluginName: "kubernetes.io/gce-pd",
2547- MaxFileSize: testpatterns.FileSizeMedium,
2548- SupportedSizeRange: e2evolume.SizeRange{
2549- Min: "5Gi",
2550- },
2551- SupportedFsType: supportedTypes,
2552- SupportedMountOption: sets.NewString("debug", "nouid32"),
2553- TopologyKeys: []string{v1.LabelZoneFailureDomain},
2554- Capabilities: map[testsuites.Capability]bool{
2555- testsuites.CapPersistence: true,
2556- testsuites.CapFsGroup: true,
2557- testsuites.CapBlock: true,
2558- testsuites.CapExec: true,
2559- testsuites.CapMultiPODs: true,
2560- testsuites.CapControllerExpansion: true,
2561- testsuites.CapNodeExpansion: true,
2562- // GCE supports volume limits, but the test creates large
2563- // number of volumes and times out test suites.
2564- testsuites.CapVolumeLimits: false,
2565- testsuites.CapTopology: true,
2566- },
2567- },
2568- }
2569-}
2570-
2571-func (g *gcePdDriver) GetDriverInfo() *testsuites.DriverInfo {
2572- return &g.driverInfo
2573-}
2574-
2575-func (g *gcePdDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
2576- e2eskipper.SkipUnlessProviderIs("gce", "gke")
2577- if pattern.FeatureTag == "[sig-windows]" {
2578- e2eskipper.SkipUnlessNodeOSDistroIs("windows")
2579- }
2580-}
2581-
2582-func (g *gcePdDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource {
2583- gv, ok := e2evolume.(*gcePdVolume)
2584- framework.ExpectEqual(ok, true, "Failed to cast test volume to GCE PD test volume")
2585- volSource := v1.VolumeSource{
2586- GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
2587- PDName: gv.volumeName,
2588- ReadOnly: readOnly,
2589- },
2590- }
2591- if fsType != "" {
2592- volSource.GCEPersistentDisk.FSType = fsType
2593- }
2594- return &volSource
2595-}
2596-
2597-func (g *gcePdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
2598- gv, ok := e2evolume.(*gcePdVolume)
2599- framework.ExpectEqual(ok, true, "Failed to cast test volume to GCE PD test volume")
2600- pvSource := v1.PersistentVolumeSource{
2601- GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
2602- PDName: gv.volumeName,
2603- ReadOnly: readOnly,
2604- },
2605- }
2606- if fsType != "" {
2607- pvSource.GCEPersistentDisk.FSType = fsType
2608- }
2609- return &pvSource, nil
2610-}
2611-
2612-func (g *gcePdDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
2613- provisioner := "kubernetes.io/gce-pd"
2614- parameters := map[string]string{}
2615- if fsType != "" {
2616- parameters["fsType"] = fsType
2617- }
2618- ns := config.Framework.Namespace.Name
2619- suffix := fmt.Sprintf("%s-sc", g.driverInfo.Name)
2620- delayedBinding := storagev1.VolumeBindingWaitForFirstConsumer
2621-
2622- return testsuites.GetStorageClass(provisioner, parameters, &delayedBinding, ns, suffix)
2623-}
2624-
2625-func (g *gcePdDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
2626- config := &testsuites.PerTestConfig{
2627- Driver: g,
2628- Prefix: "gcepd",
2629- Framework: f,
2630- }
2631-
2632- if framework.NodeOSDistroIs("windows") {
2633- config.ClientNodeSelection = e2epod.NodeSelection{
2634- Selector: map[string]string{
2635- "kubernetes.io/os": "windows",
2636- },
2637- }
2638- }
2639- return config, func() {}
2640-
2641-}
2642-
2643-func (g *gcePdDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume {
2644- zone := getInlineVolumeZone(config.Framework)
2645- if volType == testpatterns.InlineVolume {
2646- // PD will be created in framework.TestContext.CloudConfig.Zone zone,
2647- // so pods should be also scheduled there.
2648- config.ClientNodeSelection = e2epod.NodeSelection{
2649- Selector: map[string]string{
2650- v1.LabelZoneFailureDomain: zone,
2651- },
2652- }
2653- }
2654- ginkgo.By("creating a test gce pd volume")
2655- vname, err := e2epv.CreatePDWithRetryAndZone(zone)
2656- framework.ExpectNoError(err)
2657- return &gcePdVolume{
2658- volumeName: vname,
2659- }
2660-}
2661-
2662-func (v *gcePdVolume) DeleteVolume() {
2663- e2epv.DeletePDWithRetry(v.volumeName)
2664-}
2665-
2666-// vSphere
2667-type vSphereDriver struct {
2668- driverInfo testsuites.DriverInfo
2669-}
2670-
2671-type vSphereVolume struct {
2672- volumePath string
2673- nodeInfo *vspheretest.NodeInfo
2674-}
2675-
2676-var _ testsuites.TestDriver = &vSphereDriver{}
2677-var _ testsuites.PreprovisionedVolumeTestDriver = &vSphereDriver{}
2678-var _ testsuites.InlineVolumeTestDriver = &vSphereDriver{}
2679-var _ testsuites.PreprovisionedPVTestDriver = &vSphereDriver{}
2680-var _ testsuites.DynamicPVTestDriver = &vSphereDriver{}
2681-
2682-// InitVSphereDriver returns vSphereDriver that implements TestDriver interface
2683-func InitVSphereDriver() testsuites.TestDriver {
2684- return &vSphereDriver{
2685- driverInfo: testsuites.DriverInfo{
2686- Name: "vsphere",
2687- InTreePluginName: "kubernetes.io/vsphere-volume",
2688- MaxFileSize: testpatterns.FileSizeMedium,
2689- SupportedSizeRange: e2evolume.SizeRange{
2690- Min: "5Gi",
2691- },
2692- SupportedFsType: sets.NewString(
2693- "", // Default fsType
2694- "ext4",
2695- ),
2696- TopologyKeys: []string{v1.LabelZoneFailureDomain},
2697- Capabilities: map[testsuites.Capability]bool{
2698- testsuites.CapPersistence: true,
2699- testsuites.CapFsGroup: true,
2700- testsuites.CapExec: true,
2701- testsuites.CapMultiPODs: true,
2702- testsuites.CapTopology: true,
2703- },
2704- },
2705- }
2706-}
2707-func (v *vSphereDriver) GetDriverInfo() *testsuites.DriverInfo {
2708- return &v.driverInfo
2709-}
2710-
2711-func (v *vSphereDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
2712- e2eskipper.SkipUnlessProviderIs("vsphere")
2713-}
2714-
2715-func (v *vSphereDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource {
2716- vsv, ok := e2evolume.(*vSphereVolume)
2717- framework.ExpectEqual(ok, true, "Failed to cast test volume to vSphere test volume")
2718-
2719- // vSphere driver doesn't seem to support readOnly volume
2720- // TODO: check if it is correct
2721- if readOnly {
2722- return nil
2723- }
2724- volSource := v1.VolumeSource{
2725- VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
2726- VolumePath: vsv.volumePath,
2727- },
2728- }
2729- if fsType != "" {
2730- volSource.VsphereVolume.FSType = fsType
2731- }
2732- return &volSource
2733-}
2734-
2735-func (v *vSphereDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
2736- vsv, ok := e2evolume.(*vSphereVolume)
2737- framework.ExpectEqual(ok, true, "Failed to cast test volume to vSphere test volume")
2738-
2739- // vSphere driver doesn't seem to support readOnly volume
2740- // TODO: check if it is correct
2741- if readOnly {
2742- return nil, nil
2743- }
2744- pvSource := v1.PersistentVolumeSource{
2745- VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
2746- VolumePath: vsv.volumePath,
2747- },
2748- }
2749- if fsType != "" {
2750- pvSource.VsphereVolume.FSType = fsType
2751- }
2752- return &pvSource, nil
2753-}
2754-
2755-func (v *vSphereDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
2756- provisioner := "kubernetes.io/vsphere-volume"
2757- parameters := map[string]string{}
2758- if fsType != "" {
2759- parameters["fsType"] = fsType
2760- }
2761- ns := config.Framework.Namespace.Name
2762- suffix := fmt.Sprintf("%s-sc", v.driverInfo.Name)
2763-
2764- return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix)
2765-}
2766-
2767-func (v *vSphereDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
2768- return &testsuites.PerTestConfig{
2769- Driver: v,
2770- Prefix: "vsphere",
2771- Framework: f,
2772- }, func() {}
2773-}
2774-
2775-func (v *vSphereDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume {
2776- f := config.Framework
2777- vspheretest.Bootstrap(f)
2778- nodeInfo := vspheretest.GetReadySchedulableRandomNodeInfo()
2779- volumePath, err := nodeInfo.VSphere.CreateVolume(&vspheretest.VolumeOptions{}, nodeInfo.DataCenterRef)
2780- framework.ExpectNoError(err)
2781- return &vSphereVolume{
2782- volumePath: volumePath,
2783- nodeInfo: nodeInfo,
2784- }
2785-}
2786-
2787-func (v *vSphereVolume) DeleteVolume() {
2788- v.nodeInfo.VSphere.DeleteVolume(v.volumePath, v.nodeInfo.DataCenterRef)
2789-}
2790-
2791-// Azure Disk
2792-type azureDiskDriver struct {
2793- driverInfo testsuites.DriverInfo
2794-}
2795-
2796-type azureDiskVolume struct {
2797- volumeName string
2798-}
2799-
2800-var _ testsuites.TestDriver = &azureDiskDriver{}
2801-var _ testsuites.PreprovisionedVolumeTestDriver = &azureDiskDriver{}
2802-var _ testsuites.InlineVolumeTestDriver = &azureDiskDriver{}
2803-var _ testsuites.PreprovisionedPVTestDriver = &azureDiskDriver{}
2804-var _ testsuites.DynamicPVTestDriver = &azureDiskDriver{}
2805-
2806-// InitAzureDiskDriver returns azureDiskDriver that implements TestDriver interface
2807-func InitAzureDiskDriver() testsuites.TestDriver {
2808- return &azureDiskDriver{
2809- driverInfo: testsuites.DriverInfo{
2810- Name: "azure-disk",
2811- InTreePluginName: "kubernetes.io/azure-disk",
2812- MaxFileSize: testpatterns.FileSizeMedium,
2813- SupportedSizeRange: e2evolume.SizeRange{
2814- Min: "5Gi",
2815- },
2816- SupportedFsType: sets.NewString(
2817- "", // Default fsType
2818- "ext3",
2819- "ext4",
2820- "xfs",
2821- ),
2822- TopologyKeys: []string{v1.LabelZoneFailureDomain},
2823- Capabilities: map[testsuites.Capability]bool{
2824- testsuites.CapPersistence: true,
2825- testsuites.CapFsGroup: true,
2826- testsuites.CapBlock: true,
2827- testsuites.CapExec: true,
2828- testsuites.CapMultiPODs: true,
2829- // Azure supports volume limits, but the test creates large
2830- // number of volumes and times out test suites.
2831- testsuites.CapVolumeLimits: false,
2832- testsuites.CapTopology: true,
2833- },
2834- },
2835- }
2836-}
2837-
2838-func (a *azureDiskDriver) GetDriverInfo() *testsuites.DriverInfo {
2839- return &a.driverInfo
2840-}
2841-
2842-func (a *azureDiskDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
2843- e2eskipper.SkipUnlessProviderIs("azure")
2844-}
2845-
2846-func (a *azureDiskDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource {
2847- av, ok := e2evolume.(*azureDiskVolume)
2848- framework.ExpectEqual(ok, true, "Failed to cast test volume to Azure test volume")
2849- diskName := av.volumeName[(strings.LastIndex(av.volumeName, "/") + 1):]
2850-
2851- kind := v1.AzureManagedDisk
2852- volSource := v1.VolumeSource{
2853- AzureDisk: &v1.AzureDiskVolumeSource{
2854- DiskName: diskName,
2855- DataDiskURI: av.volumeName,
2856- Kind: &kind,
2857- ReadOnly: &readOnly,
2858- },
2859- }
2860- if fsType != "" {
2861- volSource.AzureDisk.FSType = &fsType
2862- }
2863- return &volSource
2864-}
2865-
2866-func (a *azureDiskDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
2867- av, ok := e2evolume.(*azureDiskVolume)
2868- framework.ExpectEqual(ok, true, "Failed to cast test volume to Azure test volume")
2869-
2870- diskName := av.volumeName[(strings.LastIndex(av.volumeName, "/") + 1):]
2871-
2872- kind := v1.AzureManagedDisk
2873- pvSource := v1.PersistentVolumeSource{
2874- AzureDisk: &v1.AzureDiskVolumeSource{
2875- DiskName: diskName,
2876- DataDiskURI: av.volumeName,
2877- Kind: &kind,
2878- ReadOnly: &readOnly,
2879- },
2880- }
2881- if fsType != "" {
2882- pvSource.AzureDisk.FSType = &fsType
2883- }
2884- return &pvSource, nil
2885-}
2886-
2887-func (a *azureDiskDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
2888- provisioner := "kubernetes.io/azure-disk"
2889- parameters := map[string]string{}
2890- if fsType != "" {
2891- parameters["fsType"] = fsType
2892- }
2893- ns := config.Framework.Namespace.Name
2894- suffix := fmt.Sprintf("%s-sc", a.driverInfo.Name)
2895- delayedBinding := storagev1.VolumeBindingWaitForFirstConsumer
2896-
2897- return testsuites.GetStorageClass(provisioner, parameters, &delayedBinding, ns, suffix)
2898-}
2899-
2900-func (a *azureDiskDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
2901- return &testsuites.PerTestConfig{
2902- Driver: a,
2903- Prefix: "azure",
2904- Framework: f,
2905- }, func() {}
2906-}
2907-
2908-func (a *azureDiskDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume {
2909- ginkgo.By("creating a test azure disk volume")
2910- zone := getInlineVolumeZone(config.Framework)
2911- if volType == testpatterns.InlineVolume {
2912- // PD will be created in framework.TestContext.CloudConfig.Zone zone,
2913- // so pods should be also scheduled there.
2914- config.ClientNodeSelection = e2epod.NodeSelection{
2915- Selector: map[string]string{
2916- v1.LabelZoneFailureDomain: zone,
2917- },
2918- }
2919- }
2920- volumeName, err := e2epv.CreatePDWithRetryAndZone(zone)
2921- framework.ExpectNoError(err)
2922- return &azureDiskVolume{
2923- volumeName: volumeName,
2924- }
2925-}
2926-
2927-func (v *azureDiskVolume) DeleteVolume() {
2928- e2epv.DeletePDWithRetry(v.volumeName)
2929-}
2930-
2931-// AWS
2932-type awsDriver struct {
2933- driverInfo testsuites.DriverInfo
2934-}
2935-
2936-type awsVolume struct {
2937- volumeName string
2938-}
2939-
2940-var _ testsuites.TestDriver = &awsDriver{}
2941-
2942-var _ testsuites.PreprovisionedVolumeTestDriver = &awsDriver{}
2943-var _ testsuites.InlineVolumeTestDriver = &awsDriver{}
2944-var _ testsuites.PreprovisionedPVTestDriver = &awsDriver{}
2945-var _ testsuites.DynamicPVTestDriver = &awsDriver{}
2946-
2947-// InitAwsDriver returns awsDriver that implements TestDriver interface
2948-func InitAwsDriver() testsuites.TestDriver {
2949- return &awsDriver{
2950- driverInfo: testsuites.DriverInfo{
2951- Name: "aws",
2952- InTreePluginName: "kubernetes.io/aws-ebs",
2953- MaxFileSize: testpatterns.FileSizeMedium,
2954- SupportedSizeRange: e2evolume.SizeRange{
2955- Min: "5Gi",
2956- },
2957- SupportedFsType: sets.NewString(
2958- "", // Default fsType
2959- "ext2",
2960- "ext3",
2961- "ext4",
2962- "xfs",
2963- "ntfs",
2964- ),
2965- SupportedMountOption: sets.NewString("debug", "nouid32"),
2966- TopologyKeys: []string{v1.LabelZoneFailureDomain},
2967- Capabilities: map[testsuites.Capability]bool{
2968- testsuites.CapPersistence: true,
2969- testsuites.CapFsGroup: true,
2970- testsuites.CapBlock: true,
2971- testsuites.CapExec: true,
2972- testsuites.CapMultiPODs: true,
2973- testsuites.CapControllerExpansion: true,
2974- testsuites.CapNodeExpansion: true,
2975- // AWS supports volume limits, but the test creates large
2976- // number of volumes and times out test suites.
2977- testsuites.CapVolumeLimits: false,
2978- testsuites.CapTopology: true,
2979- },
2980- },
2981- }
2982-}
2983-
2984-func (a *awsDriver) GetDriverInfo() *testsuites.DriverInfo {
2985- return &a.driverInfo
2986-}
2987-
2988-func (a *awsDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
2989- e2eskipper.SkipUnlessProviderIs("aws")
2990-}
2991-
2992-func (a *awsDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource {
2993- av, ok := e2evolume.(*awsVolume)
2994- framework.ExpectEqual(ok, true, "Failed to cast test volume to AWS test volume")
2995- volSource := v1.VolumeSource{
2996- AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
2997- VolumeID: av.volumeName,
2998- ReadOnly: readOnly,
2999- },
3000- }
3001- if fsType != "" {
3002- volSource.AWSElasticBlockStore.FSType = fsType
3003- }
3004- return &volSource
3005-}
3006-
3007-func (a *awsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
3008- av, ok := e2evolume.(*awsVolume)
3009- framework.ExpectEqual(ok, true, "Failed to cast test volume to AWS test volume")
3010- pvSource := v1.PersistentVolumeSource{
3011- AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
3012- VolumeID: av.volumeName,
3013- ReadOnly: readOnly,
3014- },
3015- }
3016- if fsType != "" {
3017- pvSource.AWSElasticBlockStore.FSType = fsType
3018- }
3019- return &pvSource, nil
3020-}
3021-
3022-func (a *awsDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
3023- provisioner := "kubernetes.io/aws-ebs"
3024- parameters := map[string]string{}
3025- if fsType != "" {
3026- parameters["fsType"] = fsType
3027- }
3028- ns := config.Framework.Namespace.Name
3029- suffix := fmt.Sprintf("%s-sc", a.driverInfo.Name)
3030- delayedBinding := storagev1.VolumeBindingWaitForFirstConsumer
3031-
3032- return testsuites.GetStorageClass(provisioner, parameters, &delayedBinding, ns, suffix)
3033-}
3034-
3035-func (a *awsDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
3036- config := &testsuites.PerTestConfig{
3037- Driver: a,
3038- Prefix: "aws",
3039- Framework: f,
3040- }
3041-
3042- if framework.NodeOSDistroIs("windows") {
3043- config.ClientNodeSelection = e2epod.NodeSelection{
3044- Selector: map[string]string{
3045- "kubernetes.io/os": "windows",
3046- },
3047- }
3048- }
3049- return config, func() {}
3050-}
3051-
3052-func (a *awsDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume {
3053- zone := getInlineVolumeZone(config.Framework)
3054- if volType == testpatterns.InlineVolume {
3055- // PD will be created in framework.TestContext.CloudConfig.Zone zone,
3056- // so pods should be also scheduled there.
3057- config.ClientNodeSelection = e2epod.NodeSelection{
3058- Selector: map[string]string{
3059- v1.LabelZoneFailureDomain: zone,
3060- },
3061- }
3062- }
3063- ginkgo.By("creating a test aws volume")
3064- vname, err := e2epv.CreatePDWithRetryAndZone(zone)
3065- framework.ExpectNoError(err)
3066- return &awsVolume{
3067- volumeName: vname,
3068- }
3069-}
3070-
3071-func (v *awsVolume) DeleteVolume() {
3072- e2epv.DeletePDWithRetry(v.volumeName)
3073-}
3074-
3075 // local
3076 type localDriver struct {
3077 driverInfo testsuites.DriverInfo
3078diff --git a/test/e2e/storage/drivers/in_tree_providers.go b/test/e2e/storage/drivers/in_tree_providers.go
3079new file mode 100644
3080index 00000000000..c7f5dd3052e
3081--- /dev/null
3082+++ b/test/e2e/storage/drivers/in_tree_providers.go
3083@@ -0,0 +1,751 @@
3084+// +build !providerless
3085+
3086+package drivers
3087+
3088+import (
3089+ "fmt"
3090+ "os/exec"
3091+ "strings"
3092+ "time"
3093+
3094+ "github.com/onsi/ginkgo"
3095+ v1 "k8s.io/api/core/v1"
3096+ storagev1 "k8s.io/api/storage/v1"
3097+ "k8s.io/apimachinery/pkg/util/sets"
3098+ "k8s.io/kubernetes/test/e2e/framework"
3099+ e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
3100+ e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
3101+ e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
3102+ e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
3103+ "k8s.io/kubernetes/test/e2e/storage/testpatterns"
3104+ "k8s.io/kubernetes/test/e2e/storage/testsuites"
3105+ vspheretest "k8s.io/kubernetes/test/e2e/storage/vsphere"
3106+)
3107+
3108+// Cinder
3109+// This driver assumes that OpenStack client tools are installed
3110+// (/usr/bin/nova, /usr/bin/cinder and /usr/bin/keystone)
3111+// and that the usual OpenStack authentication env. variables are set
3112+// (OS_USERNAME, OS_PASSWORD, OS_TENANT_NAME at least).
3113+type cinderDriver struct {
3114+ driverInfo testsuites.DriverInfo
3115+}
3116+
3117+type cinderVolume struct {
3118+ volumeName string
3119+ volumeID string
3120+}
3121+
3122+var _ testsuites.TestDriver = &cinderDriver{}
3123+var _ testsuites.PreprovisionedVolumeTestDriver = &cinderDriver{}
3124+var _ testsuites.InlineVolumeTestDriver = &cinderDriver{}
3125+var _ testsuites.PreprovisionedPVTestDriver = &cinderDriver{}
3126+var _ testsuites.DynamicPVTestDriver = &cinderDriver{}
3127+
3128+// InitCinderDriver returns cinderDriver that implements TestDriver interface
3129+func InitCinderDriver() testsuites.TestDriver {
3130+ return &cinderDriver{
3131+ driverInfo: testsuites.DriverInfo{
3132+ Name: "cinder",
3133+ InTreePluginName: "kubernetes.io/cinder",
3134+ MaxFileSize: testpatterns.FileSizeMedium,
3135+ SupportedSizeRange: e2evolume.SizeRange{
3136+ Min: "5Gi",
3137+ },
3138+ SupportedFsType: sets.NewString(
3139+ "", // Default fsType
3140+ "ext3",
3141+ ),
3142+ TopologyKeys: []string{v1.LabelZoneFailureDomain},
3143+ Capabilities: map[testsuites.Capability]bool{
3144+ testsuites.CapPersistence: true,
3145+ testsuites.CapFsGroup: true,
3146+ testsuites.CapExec: true,
3147+ testsuites.CapBlock: true,
3148+ // Cinder supports volume limits, but the test creates large
3149+ // number of volumes and times out test suites.
3150+ testsuites.CapVolumeLimits: false,
3151+ testsuites.CapTopology: true,
3152+ },
3153+ },
3154+ }
3155+}
3156+
3157+func (c *cinderDriver) GetDriverInfo() *testsuites.DriverInfo {
3158+ return &c.driverInfo
3159+}
3160+
3161+func (c *cinderDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
3162+ e2eskipper.SkipUnlessProviderIs("openstack")
3163+}
3164+
3165+func (c *cinderDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource {
3166+ cv, ok := e2evolume.(*cinderVolume)
3167+ framework.ExpectEqual(ok, true, "Failed to cast test volume to Cinder test volume")
3168+
3169+ volSource := v1.VolumeSource{
3170+ Cinder: &v1.CinderVolumeSource{
3171+ VolumeID: cv.volumeID,
3172+ ReadOnly: readOnly,
3173+ },
3174+ }
3175+ if fsType != "" {
3176+ volSource.Cinder.FSType = fsType
3177+ }
3178+ return &volSource
3179+}
3180+
3181+func (c *cinderDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
3182+ cv, ok := e2evolume.(*cinderVolume)
3183+ framework.ExpectEqual(ok, true, "Failed to cast test volume to Cinder test volume")
3184+
3185+ pvSource := v1.PersistentVolumeSource{
3186+ Cinder: &v1.CinderPersistentVolumeSource{
3187+ VolumeID: cv.volumeID,
3188+ ReadOnly: readOnly,
3189+ },
3190+ }
3191+ if fsType != "" {
3192+ pvSource.Cinder.FSType = fsType
3193+ }
3194+ return &pvSource, nil
3195+}
3196+
3197+func (c *cinderDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
3198+ provisioner := "kubernetes.io/cinder"
3199+ parameters := map[string]string{}
3200+ if fsType != "" {
3201+ parameters["fsType"] = fsType
3202+ }
3203+ ns := config.Framework.Namespace.Name
3204+ suffix := fmt.Sprintf("%s-sc", c.driverInfo.Name)
3205+
3206+ return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix)
3207+}
3208+
3209+func (c *cinderDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
3210+ return &testsuites.PerTestConfig{
3211+ Driver: c,
3212+ Prefix: "cinder",
3213+ Framework: f,
3214+ }, func() {}
3215+}
3216+
3217+func (c *cinderDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume {
3218+ f := config.Framework
3219+ ns := f.Namespace
3220+
3221+ // We assume that namespace.Name is a random string
3222+ volumeName := ns.Name
3223+ ginkgo.By("creating a test Cinder volume")
3224+ output, err := exec.Command("cinder", "create", "--display-name="+volumeName, "1").CombinedOutput()
3225+ outputString := string(output[:])
3226+ framework.Logf("cinder output:\n%s", outputString)
3227+ framework.ExpectNoError(err)
3228+
3229+ // Parse 'id'' from stdout. Expected format:
3230+ // | attachments | [] |
3231+ // | availability_zone | nova |
3232+ // ...
3233+ // | id | 1d6ff08f-5d1c-41a4-ad72-4ef872cae685 |
3234+ volumeID := ""
3235+ for _, line := range strings.Split(outputString, "\n") {
3236+ fields := strings.Fields(line)
3237+ if len(fields) != 5 {
3238+ continue
3239+ }
3240+ if fields[1] != "id" {
3241+ continue
3242+ }
3243+ volumeID = fields[3]
3244+ break
3245+ }
3246+ framework.Logf("Volume ID: %s", volumeID)
3247+ framework.ExpectNotEqual(volumeID, "")
3248+ return &cinderVolume{
3249+ volumeName: volumeName,
3250+ volumeID: volumeID,
3251+ }
3252+}
3253+
3254+func (v *cinderVolume) DeleteVolume() {
3255+ name := v.volumeName
3256+
3257+ // Try to delete the volume for several seconds - it takes
3258+ // a while for the plugin to detach it.
3259+ var output []byte
3260+ var err error
3261+ timeout := time.Second * 120
3262+
3263+ framework.Logf("Waiting up to %v for removal of cinder volume %s", timeout, name)
3264+ for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
3265+ output, err = exec.Command("cinder", "delete", name).CombinedOutput()
3266+ if err == nil {
3267+ framework.Logf("Cinder volume %s deleted", name)
3268+ return
3269+ }
3270+ framework.Logf("Failed to delete volume %s: %v", name, err)
3271+ }
3272+ framework.Logf("Giving up deleting volume %s: %v\n%s", name, err, string(output[:]))
3273+}
3274+
3275+// GCE
3276+type gcePdDriver struct {
3277+ driverInfo testsuites.DriverInfo
3278+}
3279+
3280+type gcePdVolume struct {
3281+ volumeName string
3282+}
3283+
3284+var _ testsuites.TestDriver = &gcePdDriver{}
3285+var _ testsuites.PreprovisionedVolumeTestDriver = &gcePdDriver{}
3286+var _ testsuites.InlineVolumeTestDriver = &gcePdDriver{}
3287+var _ testsuites.PreprovisionedPVTestDriver = &gcePdDriver{}
3288+var _ testsuites.DynamicPVTestDriver = &gcePdDriver{}
3289+
3290+// InitGcePdDriver returns gcePdDriver that implements TestDriver interface
3291+func InitGcePdDriver() testsuites.TestDriver {
3292+ // In current test structure, it first initialize the driver and then set up
3293+ // the new framework, so we cannot get the correct OS here. So here set to
3294+ // support all fs types including both linux and windows. We have code to check Node OS later
3295+ // during test.
3296+ supportedTypes := sets.NewString(
3297+ "", // Default fsType
3298+ "ext2",
3299+ "ext3",
3300+ "ext4",
3301+ "xfs",
3302+ "ntfs",
3303+ )
3304+ return &gcePdDriver{
3305+ driverInfo: testsuites.DriverInfo{
3306+ Name: "gcepd",
3307+ InTreePluginName: "kubernetes.io/gce-pd",
3308+ MaxFileSize: testpatterns.FileSizeMedium,
3309+ SupportedSizeRange: e2evolume.SizeRange{
3310+ Min: "5Gi",
3311+ },
3312+ SupportedFsType: supportedTypes,
3313+ SupportedMountOption: sets.NewString("debug", "nouid32"),
3314+ TopologyKeys: []string{v1.LabelZoneFailureDomain},
3315+ Capabilities: map[testsuites.Capability]bool{
3316+ testsuites.CapPersistence: true,
3317+ testsuites.CapFsGroup: true,
3318+ testsuites.CapBlock: true,
3319+ testsuites.CapExec: true,
3320+ testsuites.CapMultiPODs: true,
3321+ testsuites.CapControllerExpansion: true,
3322+ testsuites.CapNodeExpansion: true,
3323+ // GCE supports volume limits, but the test creates large
3324+ // number of volumes and times out test suites.
3325+ testsuites.CapVolumeLimits: false,
3326+ testsuites.CapTopology: true,
3327+ },
3328+ },
3329+ }
3330+}
3331+
3332+func (g *gcePdDriver) GetDriverInfo() *testsuites.DriverInfo {
3333+ return &g.driverInfo
3334+}
3335+
3336+func (g *gcePdDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
3337+ e2eskipper.SkipUnlessProviderIs("gce", "gke")
3338+ if pattern.FeatureTag == "[sig-windows]" {
3339+ e2eskipper.SkipUnlessNodeOSDistroIs("windows")
3340+ }
3341+}
3342+
3343+func (g *gcePdDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource {
3344+ gv, ok := e2evolume.(*gcePdVolume)
3345+ framework.ExpectEqual(ok, true, "Failed to cast test volume to GCE PD test volume")
3346+ volSource := v1.VolumeSource{
3347+ GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
3348+ PDName: gv.volumeName,
3349+ ReadOnly: readOnly,
3350+ },
3351+ }
3352+ if fsType != "" {
3353+ volSource.GCEPersistentDisk.FSType = fsType
3354+ }
3355+ return &volSource
3356+}
3357+
3358+func (g *gcePdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
3359+ gv, ok := e2evolume.(*gcePdVolume)
3360+ framework.ExpectEqual(ok, true, "Failed to cast test volume to GCE PD test volume")
3361+ pvSource := v1.PersistentVolumeSource{
3362+ GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
3363+ PDName: gv.volumeName,
3364+ ReadOnly: readOnly,
3365+ },
3366+ }
3367+ if fsType != "" {
3368+ pvSource.GCEPersistentDisk.FSType = fsType
3369+ }
3370+ return &pvSource, nil
3371+}
3372+
3373+func (g *gcePdDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
3374+ provisioner := "kubernetes.io/gce-pd"
3375+ parameters := map[string]string{}
3376+ if fsType != "" {
3377+ parameters["fsType"] = fsType
3378+ }
3379+ ns := config.Framework.Namespace.Name
3380+ suffix := fmt.Sprintf("%s-sc", g.driverInfo.Name)
3381+ delayedBinding := storagev1.VolumeBindingWaitForFirstConsumer
3382+
3383+ return testsuites.GetStorageClass(provisioner, parameters, &delayedBinding, ns, suffix)
3384+}
3385+
3386+func (g *gcePdDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
3387+ config := &testsuites.PerTestConfig{
3388+ Driver: g,
3389+ Prefix: "gcepd",
3390+ Framework: f,
3391+ }
3392+
3393+ if framework.NodeOSDistroIs("windows") {
3394+ config.ClientNodeSelection = e2epod.NodeSelection{
3395+ Selector: map[string]string{
3396+ "kubernetes.io/os": "windows",
3397+ },
3398+ }
3399+ }
3400+ return config, func() {}
3401+
3402+}
3403+
3404+func (g *gcePdDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume {
3405+ zone := getInlineVolumeZone(config.Framework)
3406+ if volType == testpatterns.InlineVolume {
3407+ // PD will be created in framework.TestContext.CloudConfig.Zone zone,
3408+ // so pods should be also scheduled there.
3409+ config.ClientNodeSelection = e2epod.NodeSelection{
3410+ Selector: map[string]string{
3411+ v1.LabelZoneFailureDomain: zone,
3412+ },
3413+ }
3414+ }
3415+ ginkgo.By("creating a test gce pd volume")
3416+ vname, err := e2epv.CreatePDWithRetryAndZone(zone)
3417+ framework.ExpectNoError(err)
3418+ return &gcePdVolume{
3419+ volumeName: vname,
3420+ }
3421+}
3422+
3423+func (v *gcePdVolume) DeleteVolume() {
3424+ e2epv.DeletePDWithRetry(v.volumeName)
3425+}
3426+
3427+// vSphere
3428+type vSphereDriver struct {
3429+ driverInfo testsuites.DriverInfo
3430+}
3431+
3432+type vSphereVolume struct {
3433+ volumePath string
3434+ nodeInfo *vspheretest.NodeInfo
3435+}
3436+
3437+var _ testsuites.TestDriver = &vSphereDriver{}
3438+var _ testsuites.PreprovisionedVolumeTestDriver = &vSphereDriver{}
3439+var _ testsuites.InlineVolumeTestDriver = &vSphereDriver{}
3440+var _ testsuites.PreprovisionedPVTestDriver = &vSphereDriver{}
3441+var _ testsuites.DynamicPVTestDriver = &vSphereDriver{}
3442+
3443+// InitVSphereDriver returns vSphereDriver that implements TestDriver interface
3444+func InitVSphereDriver() testsuites.TestDriver {
3445+ return &vSphereDriver{
3446+ driverInfo: testsuites.DriverInfo{
3447+ Name: "vsphere",
3448+ InTreePluginName: "kubernetes.io/vsphere-volume",
3449+ MaxFileSize: testpatterns.FileSizeMedium,
3450+ SupportedSizeRange: e2evolume.SizeRange{
3451+ Min: "5Gi",
3452+ },
3453+ SupportedFsType: sets.NewString(
3454+ "", // Default fsType
3455+ "ext4",
3456+ ),
3457+ TopologyKeys: []string{v1.LabelZoneFailureDomain},
3458+ Capabilities: map[testsuites.Capability]bool{
3459+ testsuites.CapPersistence: true,
3460+ testsuites.CapFsGroup: true,
3461+ testsuites.CapExec: true,
3462+ testsuites.CapMultiPODs: true,
3463+ testsuites.CapTopology: true,
3464+ },
3465+ },
3466+ }
3467+}
3468+func (v *vSphereDriver) GetDriverInfo() *testsuites.DriverInfo {
3469+ return &v.driverInfo
3470+}
3471+
3472+func (v *vSphereDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
3473+ e2eskipper.SkipUnlessProviderIs("vsphere")
3474+}
3475+
3476+func (v *vSphereDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource {
3477+ vsv, ok := e2evolume.(*vSphereVolume)
3478+ framework.ExpectEqual(ok, true, "Failed to cast test volume to vSphere test volume")
3479+
3480+ // vSphere driver doesn't seem to support readOnly volume
3481+ // TODO: check if it is correct
3482+ if readOnly {
3483+ return nil
3484+ }
3485+ volSource := v1.VolumeSource{
3486+ VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
3487+ VolumePath: vsv.volumePath,
3488+ },
3489+ }
3490+ if fsType != "" {
3491+ volSource.VsphereVolume.FSType = fsType
3492+ }
3493+ return &volSource
3494+}
3495+
3496+func (v *vSphereDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
3497+ vsv, ok := e2evolume.(*vSphereVolume)
3498+ framework.ExpectEqual(ok, true, "Failed to cast test volume to vSphere test volume")
3499+
3500+ // vSphere driver doesn't seem to support readOnly volume
3501+ // TODO: check if it is correct
3502+ if readOnly {
3503+ return nil, nil
3504+ }
3505+ pvSource := v1.PersistentVolumeSource{
3506+ VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
3507+ VolumePath: vsv.volumePath,
3508+ },
3509+ }
3510+ if fsType != "" {
3511+ pvSource.VsphereVolume.FSType = fsType
3512+ }
3513+ return &pvSource, nil
3514+}
3515+
3516+func (v *vSphereDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
3517+ provisioner := "kubernetes.io/vsphere-volume"
3518+ parameters := map[string]string{}
3519+ if fsType != "" {
3520+ parameters["fsType"] = fsType
3521+ }
3522+ ns := config.Framework.Namespace.Name
3523+ suffix := fmt.Sprintf("%s-sc", v.driverInfo.Name)
3524+
3525+ return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix)
3526+}
3527+
3528+func (v *vSphereDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
3529+ return &testsuites.PerTestConfig{
3530+ Driver: v,
3531+ Prefix: "vsphere",
3532+ Framework: f,
3533+ }, func() {}
3534+}
3535+
3536+func (v *vSphereDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume {
3537+ f := config.Framework
3538+ vspheretest.Bootstrap(f)
3539+ nodeInfo := vspheretest.GetReadySchedulableRandomNodeInfo()
3540+ volumePath, err := nodeInfo.VSphere.CreateVolume(&vspheretest.VolumeOptions{}, nodeInfo.DataCenterRef)
3541+ framework.ExpectNoError(err)
3542+ return &vSphereVolume{
3543+ volumePath: volumePath,
3544+ nodeInfo: nodeInfo,
3545+ }
3546+}
3547+
3548+func (v *vSphereVolume) DeleteVolume() {
3549+ v.nodeInfo.VSphere.DeleteVolume(v.volumePath, v.nodeInfo.DataCenterRef)
3550+}
3551+
3552+// Azure Disk
3553+type azureDiskDriver struct {
3554+ driverInfo testsuites.DriverInfo
3555+}
3556+
3557+type azureDiskVolume struct {
3558+ volumeName string
3559+}
3560+
3561+var _ testsuites.TestDriver = &azureDiskDriver{}
3562+var _ testsuites.PreprovisionedVolumeTestDriver = &azureDiskDriver{}
3563+var _ testsuites.InlineVolumeTestDriver = &azureDiskDriver{}
3564+var _ testsuites.PreprovisionedPVTestDriver = &azureDiskDriver{}
3565+var _ testsuites.DynamicPVTestDriver = &azureDiskDriver{}
3566+
3567+// InitAzureDiskDriver returns azureDiskDriver that implements TestDriver interface
3568+func InitAzureDiskDriver() testsuites.TestDriver {
3569+ return &azureDiskDriver{
3570+ driverInfo: testsuites.DriverInfo{
3571+ Name: "azure-disk",
3572+ InTreePluginName: "kubernetes.io/azure-disk",
3573+ MaxFileSize: testpatterns.FileSizeMedium,
3574+ SupportedSizeRange: e2evolume.SizeRange{
3575+ Min: "5Gi",
3576+ },
3577+ SupportedFsType: sets.NewString(
3578+ "", // Default fsType
3579+ "ext3",
3580+ "ext4",
3581+ "xfs",
3582+ ),
3583+ TopologyKeys: []string{v1.LabelZoneFailureDomain},
3584+ Capabilities: map[testsuites.Capability]bool{
3585+ testsuites.CapPersistence: true,
3586+ testsuites.CapFsGroup: true,
3587+ testsuites.CapBlock: true,
3588+ testsuites.CapExec: true,
3589+ testsuites.CapMultiPODs: true,
3590+ // Azure supports volume limits, but the test creates large
3591+ // number of volumes and times out test suites.
3592+ testsuites.CapVolumeLimits: false,
3593+ testsuites.CapTopology: true,
3594+ },
3595+ },
3596+ }
3597+}
3598+
3599+func (a *azureDiskDriver) GetDriverInfo() *testsuites.DriverInfo {
3600+ return &a.driverInfo
3601+}
3602+
3603+func (a *azureDiskDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
3604+ e2eskipper.SkipUnlessProviderIs("azure")
3605+}
3606+
3607+func (a *azureDiskDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource {
3608+ av, ok := e2evolume.(*azureDiskVolume)
3609+ framework.ExpectEqual(ok, true, "Failed to cast test volume to Azure test volume")
3610+ diskName := av.volumeName[(strings.LastIndex(av.volumeName, "/") + 1):]
3611+
3612+ kind := v1.AzureManagedDisk
3613+ volSource := v1.VolumeSource{
3614+ AzureDisk: &v1.AzureDiskVolumeSource{
3615+ DiskName: diskName,
3616+ DataDiskURI: av.volumeName,
3617+ Kind: &kind,
3618+ ReadOnly: &readOnly,
3619+ },
3620+ }
3621+ if fsType != "" {
3622+ volSource.AzureDisk.FSType = &fsType
3623+ }
3624+ return &volSource
3625+}
3626+
3627+func (a *azureDiskDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
3628+ av, ok := e2evolume.(*azureDiskVolume)
3629+ framework.ExpectEqual(ok, true, "Failed to cast test volume to Azure test volume")
3630+
3631+ diskName := av.volumeName[(strings.LastIndex(av.volumeName, "/") + 1):]
3632+
3633+ kind := v1.AzureManagedDisk
3634+ pvSource := v1.PersistentVolumeSource{
3635+ AzureDisk: &v1.AzureDiskVolumeSource{
3636+ DiskName: diskName,
3637+ DataDiskURI: av.volumeName,
3638+ Kind: &kind,
3639+ ReadOnly: &readOnly,
3640+ },
3641+ }
3642+ if fsType != "" {
3643+ pvSource.AzureDisk.FSType = &fsType
3644+ }
3645+ return &pvSource, nil
3646+}
3647+
3648+func (a *azureDiskDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
3649+ provisioner := "kubernetes.io/azure-disk"
3650+ parameters := map[string]string{}
3651+ if fsType != "" {
3652+ parameters["fsType"] = fsType
3653+ }
3654+ ns := config.Framework.Namespace.Name
3655+ suffix := fmt.Sprintf("%s-sc", a.driverInfo.Name)
3656+ delayedBinding := storagev1.VolumeBindingWaitForFirstConsumer
3657+
3658+ return testsuites.GetStorageClass(provisioner, parameters, &delayedBinding, ns, suffix)
3659+}
3660+
3661+func (a *azureDiskDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
3662+ return &testsuites.PerTestConfig{
3663+ Driver: a,
3664+ Prefix: "azure",
3665+ Framework: f,
3666+ }, func() {}
3667+}
3668+
3669+func (a *azureDiskDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume {
3670+ ginkgo.By("creating a test azure disk volume")
3671+ zone := getInlineVolumeZone(config.Framework)
3672+ if volType == testpatterns.InlineVolume {
3673+ // PD will be created in framework.TestContext.CloudConfig.Zone zone,
3674+ // so pods should be also scheduled there.
3675+ config.ClientNodeSelection = e2epod.NodeSelection{
3676+ Selector: map[string]string{
3677+ v1.LabelZoneFailureDomain: zone,
3678+ },
3679+ }
3680+ }
3681+ volumeName, err := e2epv.CreatePDWithRetryAndZone(zone)
3682+ framework.ExpectNoError(err)
3683+ return &azureDiskVolume{
3684+ volumeName: volumeName,
3685+ }
3686+}
3687+
3688+func (v *azureDiskVolume) DeleteVolume() {
3689+ e2epv.DeletePDWithRetry(v.volumeName)
3690+}
3691+
3692+// AWS
3693+type awsDriver struct {
3694+ driverInfo testsuites.DriverInfo
3695+}
3696+
3697+type awsVolume struct {
3698+ volumeName string
3699+}
3700+
3701+var _ testsuites.TestDriver = &awsDriver{}
3702+
3703+var _ testsuites.PreprovisionedVolumeTestDriver = &awsDriver{}
3704+var _ testsuites.InlineVolumeTestDriver = &awsDriver{}
3705+var _ testsuites.PreprovisionedPVTestDriver = &awsDriver{}
3706+var _ testsuites.DynamicPVTestDriver = &awsDriver{}
3707+
3708+// InitAwsDriver returns awsDriver that implements TestDriver interface
3709+func InitAwsDriver() testsuites.TestDriver {
3710+ return &awsDriver{
3711+ driverInfo: testsuites.DriverInfo{
3712+ Name: "aws",
3713+ InTreePluginName: "kubernetes.io/aws-ebs",
3714+ MaxFileSize: testpatterns.FileSizeMedium,
3715+ SupportedSizeRange: e2evolume.SizeRange{
3716+ Min: "5Gi",
3717+ },
3718+ SupportedFsType: sets.NewString(
3719+ "", // Default fsType
3720+ "ext2",
3721+ "ext3",
3722+ "ext4",
3723+ "xfs",
3724+ "ntfs",
3725+ ),
3726+ SupportedMountOption: sets.NewString("debug", "nouid32"),
3727+ TopologyKeys: []string{v1.LabelZoneFailureDomain},
3728+ Capabilities: map[testsuites.Capability]bool{
3729+ testsuites.CapPersistence: true,
3730+ testsuites.CapFsGroup: true,
3731+ testsuites.CapBlock: true,
3732+ testsuites.CapExec: true,
3733+ testsuites.CapMultiPODs: true,
3734+ testsuites.CapControllerExpansion: true,
3735+ testsuites.CapNodeExpansion: true,
3736+ // AWS supports volume limits, but the test creates large
3737+ // number of volumes and times out test suites.
3738+ testsuites.CapVolumeLimits: false,
3739+ testsuites.CapTopology: true,
3740+ },
3741+ },
3742+ }
3743+}
3744+
3745+func (a *awsDriver) GetDriverInfo() *testsuites.DriverInfo {
3746+ return &a.driverInfo
3747+}
3748+
3749+func (a *awsDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
3750+ e2eskipper.SkipUnlessProviderIs("aws")
3751+}
3752+
3753+func (a *awsDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource {
3754+ av, ok := e2evolume.(*awsVolume)
3755+ framework.ExpectEqual(ok, true, "Failed to cast test volume to AWS test volume")
3756+ volSource := v1.VolumeSource{
3757+ AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
3758+ VolumeID: av.volumeName,
3759+ ReadOnly: readOnly,
3760+ },
3761+ }
3762+ if fsType != "" {
3763+ volSource.AWSElasticBlockStore.FSType = fsType
3764+ }
3765+ return &volSource
3766+}
3767+
3768+func (a *awsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
3769+ av, ok := e2evolume.(*awsVolume)
3770+ framework.ExpectEqual(ok, true, "Failed to cast test volume to AWS test volume")
3771+ pvSource := v1.PersistentVolumeSource{
3772+ AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
3773+ VolumeID: av.volumeName,
3774+ ReadOnly: readOnly,
3775+ },
3776+ }
3777+ if fsType != "" {
3778+ pvSource.AWSElasticBlockStore.FSType = fsType
3779+ }
3780+ return &pvSource, nil
3781+}
3782+
3783+func (a *awsDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
3784+ provisioner := "kubernetes.io/aws-ebs"
3785+ parameters := map[string]string{}
3786+ if fsType != "" {
3787+ parameters["fsType"] = fsType
3788+ }
3789+ ns := config.Framework.Namespace.Name
3790+ suffix := fmt.Sprintf("%s-sc", a.driverInfo.Name)
3791+ delayedBinding := storagev1.VolumeBindingWaitForFirstConsumer
3792+
3793+ return testsuites.GetStorageClass(provisioner, parameters, &delayedBinding, ns, suffix)
3794+}
3795+
3796+func (a *awsDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
3797+ config := &testsuites.PerTestConfig{
3798+ Driver: a,
3799+ Prefix: "aws",
3800+ Framework: f,
3801+ }
3802+
3803+ if framework.NodeOSDistroIs("windows") {
3804+ config.ClientNodeSelection = e2epod.NodeSelection{
3805+ Selector: map[string]string{
3806+ "kubernetes.io/os": "windows",
3807+ },
3808+ }
3809+ }
3810+ return config, func() {}
3811+}
3812+
3813+func (a *awsDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume {
3814+ zone := getInlineVolumeZone(config.Framework)
3815+ if volType == testpatterns.InlineVolume {
3816+ // PD will be created in framework.TestContext.CloudConfig.Zone zone,
3817+ // so pods should be also scheduled there.
3818+ config.ClientNodeSelection = e2epod.NodeSelection{
3819+ Selector: map[string]string{
3820+ v1.LabelZoneFailureDomain: zone,
3821+ },
3822+ }
3823+ }
3824+ ginkgo.By("creating a test aws volume")
3825+ vname, err := e2epv.CreatePDWithRetryAndZone(zone)
3826+ framework.ExpectNoError(err)
3827+ return &awsVolume{
3828+ volumeName: vname,
3829+ }
3830+}
3831+
3832+func (v *awsVolume) DeleteVolume() {
3833+ e2epv.DeletePDWithRetry(v.volumeName)
3834+}
3835diff --git a/test/e2e/storage/in_tree_volumes.go b/test/e2e/storage/in_tree_volumes.go
3836index 19372062407..8322db743cd 100644
3837--- a/test/e2e/storage/in_tree_volumes.go
3838+++ b/test/e2e/storage/in_tree_volumes.go
3839@@ -33,11 +33,6 @@ var testDrivers = []func() testsuites.TestDriver{
3840 drivers.InitHostPathDriver,
3841 drivers.InitHostPathSymlinkDriver,
3842 drivers.InitEmptydirDriver,
3843- drivers.InitCinderDriver,
3844- drivers.InitGcePdDriver,
3845- drivers.InitVSphereDriver,
3846- drivers.InitAzureDiskDriver,
3847- drivers.InitAwsDriver,
3848 drivers.InitLocalDriverWithVolumeType(utils.LocalVolumeDirectory),
3849 drivers.InitLocalDriverWithVolumeType(utils.LocalVolumeDirectoryLink),
3850 drivers.InitLocalDriverWithVolumeType(utils.LocalVolumeDirectoryBindMounted),
3851diff --git a/test/e2e/storage/in_tree_volumes_providers.go b/test/e2e/storage/in_tree_volumes_providers.go
3852new file mode 100644
3853index 00000000000..d6a5dbca191
3854--- /dev/null
3855+++ b/test/e2e/storage/in_tree_volumes_providers.go
3856@@ -0,0 +1,46 @@
3857+// +build !providerless
3858+
3859+/*
3860+Copyright 2020 The Kubernetes Authors.
3861+
3862+Licensed under the Apache License, Version 2.0 (the "License");
3863+you may not use this file except in compliance with the License.
3864+You may obtain a copy of the License at
3865+
3866+ http://www.apache.org/licenses/LICENSE-2.0
3867+
3868+Unless required by applicable law or agreed to in writing, software
3869+distributed under the License is distributed on an "AS IS" BASIS,
3870+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
3871+See the License for the specific language governing permissions and
3872+limitations under the License.
3873+*/
3874+
3875+package storage
3876+
3877+import (
3878+ "github.com/onsi/ginkgo"
3879+ "k8s.io/kubernetes/test/e2e/storage/drivers"
3880+ "k8s.io/kubernetes/test/e2e/storage/testsuites"
3881+ "k8s.io/kubernetes/test/e2e/storage/utils"
3882+)
3883+
3884+// List of testDrivers to be executed in below loop
3885+var testDriversProviders = []func() testsuites.TestDriver{
3886+ drivers.InitCinderDriver,
3887+ drivers.InitGcePdDriver,
3888+ drivers.InitVSphereDriver,
3889+ drivers.InitAzureDiskDriver,
3890+ drivers.InitAwsDriver,
3891+}
3892+
3893+// This executes testSuites for in-tree volumes.
3894+var _ = utils.SIGDescribe("In-tree Volumes for Cloud Providers", func() {
3895+ for _, initDriver := range testDriversProviders {
3896+ curDriver := initDriver()
3897+
3898+ ginkgo.Context(testsuites.GetDriverNameWithFeatureTags(curDriver), func() {
3899+ testsuites.DefineTestSuite(curDriver, testsuites.BaseSuites)
3900+ })
3901+ }
3902+})
3903diff --git a/test/e2e/storage/nfs_persistent_volume-disruptive.go b/test/e2e/storage/nfs_persistent_volume-disruptive.go
3904index 5afebb5e903..b197eee99a6 100644
3905--- a/test/e2e/storage/nfs_persistent_volume-disruptive.go
3906+++ b/test/e2e/storage/nfs_persistent_volume-disruptive.go
3907@@ -91,7 +91,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
3908
3909 ginkgo.BeforeEach(func() {
3910 // To protect the NFS volume pod from the kubelet restart, we isolate it on its own node.
3911- e2eskipper.SkipUnlessNodeCountIsAtLeast(minNodes)
3912+ e2eskipper.SkipUnlessNodeCountIsAtLeast(2)
3913 e2eskipper.SkipIfProviderIs("local")
3914
3915 c = f.ClientSet
3916diff --git a/test/e2e/storage/pd.go b/test/e2e/storage/pd.go
Serge Bazanski19eb0002021-01-21 14:25:25 +01003917index f5b6060a834..addd304147c 100644
Lorenz Bruned0503c2020-07-28 17:21:25 +02003918--- a/test/e2e/storage/pd.go
3919+++ b/test/e2e/storage/pd.go
3920@@ -1,3 +1,5 @@
3921+// +build !providerless
3922+
3923 /*
3924 Copyright 2015 The Kubernetes Authors.
3925
3926diff --git a/test/e2e/storage/persistent_volumes-gce.go b/test/e2e/storage/persistent_volumes-gce.go
Serge Bazanski19eb0002021-01-21 14:25:25 +01003927index b8bc887384e..f572754c5e8 100644
Lorenz Bruned0503c2020-07-28 17:21:25 +02003928--- a/test/e2e/storage/persistent_volumes-gce.go
3929+++ b/test/e2e/storage/persistent_volumes-gce.go
3930@@ -1,3 +1,5 @@
3931+// +build !providerless
3932+
3933 /*
3934 Copyright 2017 The Kubernetes Authors.
3935
Lorenz Bruned0503c2020-07-28 17:21:25 +02003936diff --git a/test/e2e/storage/regional_pd.go b/test/e2e/storage/regional_pd.go
3937index 7763afaf6b1..a042dcc9d4a 100644
3938--- a/test/e2e/storage/regional_pd.go
3939+++ b/test/e2e/storage/regional_pd.go
3940@@ -1,3 +1,5 @@
3941+// +build !providerless
3942+
3943 /*
3944 Copyright 2016 The Kubernetes Authors.
3945
3946@@ -18,6 +20,7 @@ package storage
3947
3948 import (
3949 "context"
3950+
3951 "github.com/onsi/ginkgo"
3952 "github.com/onsi/gomega"
3953
Serge Bazanski19eb0002021-01-21 14:25:25 +01003954diff --git a/test/e2e/storage/utils/BUILD b/test/e2e/storage/utils/BUILD
3955index bdc78982dcb..c57ff325f76 100644
3956--- a/test/e2e/storage/utils/BUILD
3957+++ b/test/e2e/storage/utils/BUILD
3958@@ -7,7 +7,6 @@ go_library(
3959 srcs = [
3960 "create.go",
3961 "deployment.go",
3962- "ebs.go",
3963 "framework.go",
3964 "host_exec.go",
3965 "local.go",
3966@@ -37,8 +36,6 @@ go_library(
3967 "//test/e2e/framework/ssh:go_default_library",
3968 "//test/e2e/framework/testfiles:go_default_library",
3969 "//test/utils/image:go_default_library",
3970- "//vendor/github.com/aws/aws-sdk-go/aws:go_default_library",
3971- "//vendor/github.com/aws/aws-sdk-go/service/ec2:go_default_library",
3972 "//vendor/github.com/onsi/ginkgo:go_default_library",
3973 "//vendor/github.com/onsi/gomega:go_default_library",
3974 "//vendor/github.com/pkg/errors:go_default_library",
3975diff --git a/test/e2e/storage/utils/ebs.go b/test/e2e/storage/utils/ebs.go
3976index 39e223f36aa..55065ea07b7 100644
3977--- a/test/e2e/storage/utils/ebs.go
3978+++ b/test/e2e/storage/utils/ebs.go
3979@@ -1,3 +1,5 @@
3980+// +build !providerless
3981+
3982 /*
3983 Copyright 2020 The Kubernetes Authors.
3984
Lorenz Bruned0503c2020-07-28 17:21:25 +02003985diff --git a/test/e2e/storage/volume_provisioning.go b/test/e2e/storage/volume_provisioning.go
3986index a8b494eb3ac..c070a81283c 100644
3987--- a/test/e2e/storage/volume_provisioning.go
3988+++ b/test/e2e/storage/volume_provisioning.go
3989@@ -24,11 +24,6 @@ import (
3990 "time"
3991
3992 "github.com/onsi/ginkgo"
3993- "github.com/onsi/gomega"
3994-
3995- "github.com/aws/aws-sdk-go/aws"
3996- "github.com/aws/aws-sdk-go/aws/session"
3997- "github.com/aws/aws-sdk-go/service/ec2"
3998
3999 v1 "k8s.io/api/core/v1"
4000 rbacv1 "k8s.io/api/rbac/v1"
4001@@ -37,9 +32,7 @@ import (
4002 apierrors "k8s.io/apimachinery/pkg/api/errors"
4003 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
4004 "k8s.io/apimachinery/pkg/runtime/schema"
4005- "k8s.io/apimachinery/pkg/types"
4006 "k8s.io/apimachinery/pkg/util/rand"
4007- "k8s.io/apimachinery/pkg/util/sets"
4008 "k8s.io/apimachinery/pkg/util/wait"
4009 "k8s.io/apiserver/pkg/authentication/serviceaccount"
4010 clientset "k8s.io/client-go/kubernetes"
4011@@ -48,7 +41,6 @@ import (
4012 e2eauth "k8s.io/kubernetes/test/e2e/framework/auth"
4013 e2enode "k8s.io/kubernetes/test/e2e/framework/node"
4014 e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
4015- "k8s.io/kubernetes/test/e2e/framework/providers/gce"
4016 e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
4017 e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
4018 "k8s.io/kubernetes/test/e2e/storage/testsuites"
4019@@ -61,80 +53,6 @@ const (
4020 externalPluginName = "example.com/nfs"
4021 )
4022
4023-// checkAWSEBS checks properties of an AWS EBS. Test framework does not
4024-// instantiate full AWS provider, therefore we need use ec2 API directly.
4025-func checkAWSEBS(volume *v1.PersistentVolume, volumeType string, encrypted bool) error {
4026- diskName := volume.Spec.AWSElasticBlockStore.VolumeID
4027-
4028- var client *ec2.EC2
4029-
4030- tokens := strings.Split(diskName, "/")
4031- volumeID := tokens[len(tokens)-1]
4032-
4033- zone := framework.TestContext.CloudConfig.Zone
4034-
4035- awsSession, err := session.NewSession()
4036- if err != nil {
4037- return fmt.Errorf("error creating session: %v", err)
4038- }
4039-
4040- if len(zone) > 0 {
4041- region := zone[:len(zone)-1]
4042- cfg := aws.Config{Region: &region}
4043- framework.Logf("using region %s", region)
4044- client = ec2.New(awsSession, &cfg)
4045- } else {
4046- framework.Logf("no region configured")
4047- client = ec2.New(awsSession)
4048- }
4049-
4050- request := &ec2.DescribeVolumesInput{
4051- VolumeIds: []*string{&volumeID},
4052- }
4053- info, err := client.DescribeVolumes(request)
4054- if err != nil {
4055- return fmt.Errorf("error querying ec2 for volume %q: %v", volumeID, err)
4056- }
4057- if len(info.Volumes) == 0 {
4058- return fmt.Errorf("no volumes found for volume %q", volumeID)
4059- }
4060- if len(info.Volumes) > 1 {
4061- return fmt.Errorf("multiple volumes found for volume %q", volumeID)
4062- }
4063-
4064- awsVolume := info.Volumes[0]
4065- if awsVolume.VolumeType == nil {
4066- return fmt.Errorf("expected volume type %q, got nil", volumeType)
4067- }
4068- if *awsVolume.VolumeType != volumeType {
4069- return fmt.Errorf("expected volume type %q, got %q", volumeType, *awsVolume.VolumeType)
4070- }
4071- if encrypted && awsVolume.Encrypted == nil {
4072- return fmt.Errorf("expected encrypted volume, got no encryption")
4073- }
4074- if encrypted && !*awsVolume.Encrypted {
4075- return fmt.Errorf("expected encrypted volume, got %v", *awsVolume.Encrypted)
4076- }
4077- return nil
4078-}
4079-
4080-func checkGCEPD(volume *v1.PersistentVolume, volumeType string) error {
4081- cloud, err := gce.GetGCECloud()
4082- if err != nil {
4083- return err
4084- }
4085- diskName := volume.Spec.GCEPersistentDisk.PDName
4086- disk, err := cloud.GetDiskByNameUnknownZone(diskName)
4087- if err != nil {
4088- return err
4089- }
4090-
4091- if !strings.HasSuffix(disk.Type, volumeType) {
4092- return fmt.Errorf("unexpected disk type %q, expected suffix %q", disk.Type, volumeType)
4093- }
4094- return nil
4095-}
4096-
4097 var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
4098 f := framework.NewDefaultFramework("volume-provisioning")
4099
4100@@ -147,451 +65,6 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
4101 ns = f.Namespace.Name
4102 })
4103
4104- ginkgo.Describe("DynamicProvisioner [Slow]", func() {
4105- ginkgo.It("should provision storage with different parameters", func() {
4106-
4107- // This test checks that dynamic provisioning can provision a volume
4108- // that can be used to persist data among pods.
4109- tests := []testsuites.StorageClassTest{
4110- // GCE/GKE
4111- {
4112- Name: "SSD PD on GCE/GKE",
4113- CloudProviders: []string{"gce", "gke"},
4114- Provisioner: "kubernetes.io/gce-pd",
4115- Parameters: map[string]string{
4116- "type": "pd-ssd",
4117- "zone": getRandomClusterZone(c),
4118- },
4119- ClaimSize: "1.5Gi",
4120- ExpectedSize: "2Gi",
4121- PvCheck: func(claim *v1.PersistentVolumeClaim) {
4122- volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
4123- gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
4124-
4125- err := checkGCEPD(volume, "pd-ssd")
4126- framework.ExpectNoError(err, "checkGCEPD pd-ssd")
4127- },
4128- },
4129- {
4130- Name: "HDD PD on GCE/GKE",
4131- CloudProviders: []string{"gce", "gke"},
4132- Provisioner: "kubernetes.io/gce-pd",
4133- Parameters: map[string]string{
4134- "type": "pd-standard",
4135- },
4136- ClaimSize: "1.5Gi",
4137- ExpectedSize: "2Gi",
4138- PvCheck: func(claim *v1.PersistentVolumeClaim) {
4139- volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
4140- gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
4141-
4142- err := checkGCEPD(volume, "pd-standard")
4143- framework.ExpectNoError(err, "checkGCEPD pd-standard")
4144- },
4145- },
4146- // AWS
4147- {
4148- Name: "gp2 EBS on AWS",
4149- CloudProviders: []string{"aws"},
4150- Provisioner: "kubernetes.io/aws-ebs",
4151- Parameters: map[string]string{
4152- "type": "gp2",
4153- "zone": getRandomClusterZone(c),
4154- },
4155- ClaimSize: "1.5Gi",
4156- ExpectedSize: "2Gi",
4157- PvCheck: func(claim *v1.PersistentVolumeClaim) {
4158- volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
4159- gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
4160-
4161- err := checkAWSEBS(volume, "gp2", false)
4162- framework.ExpectNoError(err, "checkAWSEBS gp2")
4163- },
4164- },
4165- {
4166- Name: "io1 EBS on AWS",
4167- CloudProviders: []string{"aws"},
4168- Provisioner: "kubernetes.io/aws-ebs",
4169- Parameters: map[string]string{
4170- "type": "io1",
4171- "iopsPerGB": "50",
4172- },
4173- ClaimSize: "3.5Gi",
4174- ExpectedSize: "4Gi", // 4 GiB is minimum for io1
4175- PvCheck: func(claim *v1.PersistentVolumeClaim) {
4176- volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
4177- gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
4178-
4179- err := checkAWSEBS(volume, "io1", false)
4180- framework.ExpectNoError(err, "checkAWSEBS io1")
4181- },
4182- },
4183- {
4184- Name: "sc1 EBS on AWS",
4185- CloudProviders: []string{"aws"},
4186- Provisioner: "kubernetes.io/aws-ebs",
4187- Parameters: map[string]string{
4188- "type": "sc1",
4189- },
4190- ClaimSize: "500Gi", // minimum for sc1
4191- ExpectedSize: "500Gi",
4192- PvCheck: func(claim *v1.PersistentVolumeClaim) {
4193- volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
4194- gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
4195-
4196- err := checkAWSEBS(volume, "sc1", false)
4197- framework.ExpectNoError(err, "checkAWSEBS sc1")
4198- },
4199- },
4200- {
4201- Name: "st1 EBS on AWS",
4202- CloudProviders: []string{"aws"},
4203- Provisioner: "kubernetes.io/aws-ebs",
4204- Parameters: map[string]string{
4205- "type": "st1",
4206- },
4207- ClaimSize: "500Gi", // minimum for st1
4208- ExpectedSize: "500Gi",
4209- PvCheck: func(claim *v1.PersistentVolumeClaim) {
4210- volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
4211- gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
4212-
4213- err := checkAWSEBS(volume, "st1", false)
4214- framework.ExpectNoError(err, "checkAWSEBS st1")
4215- },
4216- },
4217- {
4218- Name: "encrypted EBS on AWS",
4219- CloudProviders: []string{"aws"},
4220- Provisioner: "kubernetes.io/aws-ebs",
4221- Parameters: map[string]string{
4222- "encrypted": "true",
4223- },
4224- ClaimSize: "1Gi",
4225- ExpectedSize: "1Gi",
4226- PvCheck: func(claim *v1.PersistentVolumeClaim) {
4227- volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
4228- gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
4229-
4230- err := checkAWSEBS(volume, "gp2", true)
4231- framework.ExpectNoError(err, "checkAWSEBS gp2 encrypted")
4232- },
4233- },
4234- // OpenStack generic tests (works on all OpenStack deployments)
4235- {
4236- Name: "generic Cinder volume on OpenStack",
4237- CloudProviders: []string{"openstack"},
4238- Provisioner: "kubernetes.io/cinder",
4239- Parameters: map[string]string{},
4240- ClaimSize: "1.5Gi",
4241- ExpectedSize: "2Gi",
4242- PvCheck: func(claim *v1.PersistentVolumeClaim) {
4243- testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
4244- },
4245- },
4246- {
4247- Name: "Cinder volume with empty volume type and zone on OpenStack",
4248- CloudProviders: []string{"openstack"},
4249- Provisioner: "kubernetes.io/cinder",
4250- Parameters: map[string]string{
4251- "type": "",
4252- "availability": "",
4253- },
4254- ClaimSize: "1.5Gi",
4255- ExpectedSize: "2Gi",
4256- PvCheck: func(claim *v1.PersistentVolumeClaim) {
4257- testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
4258- },
4259- },
4260- // vSphere generic test
4261- {
4262- Name: "generic vSphere volume",
4263- CloudProviders: []string{"vsphere"},
4264- Provisioner: "kubernetes.io/vsphere-volume",
4265- Parameters: map[string]string{},
4266- ClaimSize: "1.5Gi",
4267- ExpectedSize: "1.5Gi",
4268- PvCheck: func(claim *v1.PersistentVolumeClaim) {
4269- testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
4270- },
4271- },
4272- // Azure
4273- {
4274- Name: "Azure disk volume with empty sku and location",
4275- CloudProviders: []string{"azure"},
4276- Provisioner: "kubernetes.io/azure-disk",
4277- Parameters: map[string]string{},
4278- ClaimSize: "1Gi",
4279- ExpectedSize: "1Gi",
4280- PvCheck: func(claim *v1.PersistentVolumeClaim) {
4281- testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
4282- },
4283- },
4284- }
4285-
4286- var betaTest *testsuites.StorageClassTest
4287- for i, t := range tests {
4288- // Beware of clojure, use local variables instead of those from
4289- // outer scope
4290- test := t
4291-
4292- if !framework.ProviderIs(test.CloudProviders...) {
4293- framework.Logf("Skipping %q: cloud providers is not %v", test.Name, test.CloudProviders)
4294- continue
4295- }
4296-
4297- // Remember the last supported test for subsequent test of beta API
4298- betaTest = &test
4299-
4300- ginkgo.By("Testing " + test.Name)
4301- suffix := fmt.Sprintf("%d", i)
4302- test.Client = c
4303- test.Class = newStorageClass(test, ns, suffix)
4304- test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
4305- ClaimSize: test.ClaimSize,
4306- StorageClassName: &test.Class.Name,
4307- VolumeMode: &test.VolumeMode,
4308- }, ns)
4309- test.TestDynamicProvisioning()
4310- }
4311-
4312- // Run the last test with storage.k8s.io/v1beta1 on pvc
4313- if betaTest != nil {
4314- ginkgo.By("Testing " + betaTest.Name + " with beta volume provisioning")
4315- class := newBetaStorageClass(*betaTest, "beta")
4316- // we need to create the class manually, testDynamicProvisioning does not accept beta class
4317- class, err := c.StorageV1beta1().StorageClasses().Create(context.TODO(), class, metav1.CreateOptions{})
4318- framework.ExpectNoError(err)
4319- defer deleteStorageClass(c, class.Name)
4320-
4321- betaTest.Client = c
4322- betaTest.Class = nil
4323- betaTest.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
4324- ClaimSize: betaTest.ClaimSize,
4325- StorageClassName: &class.Name,
4326- VolumeMode: &betaTest.VolumeMode,
4327- }, ns)
4328- betaTest.Claim.Spec.StorageClassName = &(class.Name)
4329- (*betaTest).TestDynamicProvisioning()
4330- }
4331- })
4332-
4333- ginkgo.It("should provision storage with non-default reclaim policy Retain", func() {
4334- e2eskipper.SkipUnlessProviderIs("gce", "gke")
4335-
4336- test := testsuites.StorageClassTest{
4337- Client: c,
4338- Name: "HDD PD on GCE/GKE",
4339- CloudProviders: []string{"gce", "gke"},
4340- Provisioner: "kubernetes.io/gce-pd",
4341- Parameters: map[string]string{
4342- "type": "pd-standard",
4343- },
4344- ClaimSize: "1Gi",
4345- ExpectedSize: "1Gi",
4346- PvCheck: func(claim *v1.PersistentVolumeClaim) {
4347- volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
4348- gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
4349-
4350- err := checkGCEPD(volume, "pd-standard")
4351- framework.ExpectNoError(err, "checkGCEPD")
4352- },
4353- }
4354- test.Class = newStorageClass(test, ns, "reclaimpolicy")
4355- retain := v1.PersistentVolumeReclaimRetain
4356- test.Class.ReclaimPolicy = &retain
4357- test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
4358- ClaimSize: test.ClaimSize,
4359- StorageClassName: &test.Class.Name,
4360- VolumeMode: &test.VolumeMode,
4361- }, ns)
4362- pv := test.TestDynamicProvisioning()
4363-
4364- ginkgo.By(fmt.Sprintf("waiting for the provisioned PV %q to enter phase %s", pv.Name, v1.VolumeReleased))
4365- framework.ExpectNoError(e2epv.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 1*time.Second, 30*time.Second))
4366-
4367- ginkgo.By(fmt.Sprintf("deleting the storage asset backing the PV %q", pv.Name))
4368- framework.ExpectNoError(e2epv.DeletePDWithRetry(pv.Spec.GCEPersistentDisk.PDName))
4369-
4370- ginkgo.By(fmt.Sprintf("deleting the PV %q", pv.Name))
4371- framework.ExpectNoError(e2epv.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name)
4372- framework.ExpectNoError(e2epv.WaitForPersistentVolumeDeleted(c, pv.Name, 1*time.Second, 30*time.Second))
4373- })
4374-
4375- ginkgo.It("should not provision a volume in an unmanaged GCE zone.", func() {
4376- e2eskipper.SkipUnlessProviderIs("gce", "gke")
4377- var suffix string = "unmananged"
4378-
4379- ginkgo.By("Discovering an unmanaged zone")
4380- allZones := sets.NewString() // all zones in the project
4381-
4382- gceCloud, err := gce.GetGCECloud()
4383- framework.ExpectNoError(err)
4384-
4385- // Get all k8s managed zones (same as zones with nodes in them for test)
4386- managedZones, err := gceCloud.GetAllZonesFromCloudProvider()
4387- framework.ExpectNoError(err)
4388-
4389- // Get a list of all zones in the project
4390- zones, err := gceCloud.ComputeServices().GA.Zones.List(framework.TestContext.CloudConfig.ProjectID).Do()
4391- framework.ExpectNoError(err)
4392- for _, z := range zones.Items {
4393- allZones.Insert(z.Name)
4394- }
4395-
4396- // Get the subset of zones not managed by k8s
4397- var unmanagedZone string
4398- var popped bool
4399- unmanagedZones := allZones.Difference(managedZones)
4400- // And select one of them at random.
4401- if unmanagedZone, popped = unmanagedZones.PopAny(); !popped {
4402- e2eskipper.Skipf("No unmanaged zones found.")
4403- }
4404-
4405- ginkgo.By("Creating a StorageClass for the unmanaged zone")
4406- test := testsuites.StorageClassTest{
4407- Name: "unmanaged_zone",
4408- Provisioner: "kubernetes.io/gce-pd",
4409- Parameters: map[string]string{"zone": unmanagedZone},
4410- ClaimSize: "1Gi",
4411- }
4412- sc := newStorageClass(test, ns, suffix)
4413- sc, err = c.StorageV1().StorageClasses().Create(context.TODO(), sc, metav1.CreateOptions{})
4414- framework.ExpectNoError(err)
4415- defer deleteStorageClass(c, sc.Name)
4416-
4417- ginkgo.By("Creating a claim and expecting it to timeout")
4418- pvc := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
4419- ClaimSize: test.ClaimSize,
4420- StorageClassName: &sc.Name,
4421- VolumeMode: &test.VolumeMode,
4422- }, ns)
4423- pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), pvc, metav1.CreateOptions{})
4424- framework.ExpectNoError(err)
4425- defer func() {
4426- framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)
4427- }()
4428-
4429- // The claim should timeout phase:Pending
4430- err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 2*time.Second, framework.ClaimProvisionShortTimeout)
4431- framework.ExpectError(err)
4432- framework.Logf(err.Error())
4433- })
4434-
4435- ginkgo.It("should test that deleting a claim before the volume is provisioned deletes the volume.", func() {
4436- // This case tests for the regressions of a bug fixed by PR #21268
4437- // REGRESSION: Deleting the PVC before the PV is provisioned can result in the PV
4438- // not being deleted.
4439- // NOTE: Polls until no PVs are detected, times out at 5 minutes.
4440-
4441- e2eskipper.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure")
4442-
4443- const raceAttempts int = 100
4444- var residualPVs []*v1.PersistentVolume
4445- ginkgo.By(fmt.Sprintf("Creating and deleting PersistentVolumeClaims %d times", raceAttempts))
4446- test := testsuites.StorageClassTest{
4447- Name: "deletion race",
4448- Provisioner: "", // Use a native one based on current cloud provider
4449- ClaimSize: "1Gi",
4450- }
4451-
4452- class := newStorageClass(test, ns, "race")
4453- class, err := c.StorageV1().StorageClasses().Create(context.TODO(), class, metav1.CreateOptions{})
4454- framework.ExpectNoError(err)
4455- defer deleteStorageClass(c, class.Name)
4456-
4457- // To increase chance of detection, attempt multiple iterations
4458- for i := 0; i < raceAttempts; i++ {
4459- prefix := fmt.Sprintf("race-%d", i)
4460- claim := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
4461- NamePrefix: prefix,
4462- ClaimSize: test.ClaimSize,
4463- StorageClassName: &class.Name,
4464- VolumeMode: &test.VolumeMode,
4465- }, ns)
4466- tmpClaim, err := e2epv.CreatePVC(c, ns, claim)
4467- framework.ExpectNoError(err)
4468- framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, tmpClaim.Name, ns))
4469- }
4470-
4471- ginkgo.By(fmt.Sprintf("Checking for residual PersistentVolumes associated with StorageClass %s", class.Name))
4472- residualPVs, err = waitForProvisionedVolumesDeleted(c, class.Name)
4473- // Cleanup the test resources before breaking
4474- defer deleteProvisionedVolumesAndDisks(c, residualPVs)
4475- framework.ExpectNoError(err, "PersistentVolumes were not deleted as expected. %d remain", len(residualPVs))
4476-
4477- framework.Logf("0 PersistentVolumes remain.")
4478- })
4479-
4480- ginkgo.It("deletion should be idempotent", func() {
4481- // This test ensures that deletion of a volume is idempotent.
4482- // It creates a PV with Retain policy, deletes underlying AWS / GCE
4483- // volume and changes the reclaim policy to Delete.
4484- // PV controller should delete the PV even though the underlying volume
4485- // is already deleted.
4486- e2eskipper.SkipUnlessProviderIs("gce", "gke", "aws")
4487- ginkgo.By("creating PD")
4488- diskName, err := e2epv.CreatePDWithRetry()
4489- framework.ExpectNoError(err)
4490-
4491- ginkgo.By("creating PV")
4492- pv := e2epv.MakePersistentVolume(e2epv.PersistentVolumeConfig{
4493- NamePrefix: "volume-idempotent-delete-",
4494- // Use Retain to keep the PV, the test will change it to Delete
4495- // when the time comes.
4496- ReclaimPolicy: v1.PersistentVolumeReclaimRetain,
4497- AccessModes: []v1.PersistentVolumeAccessMode{
4498- v1.ReadWriteOnce,
4499- },
4500- Capacity: "1Gi",
4501- // PV is bound to non-existing PVC, so it's reclaim policy is
4502- // executed immediately
4503- Prebind: &v1.PersistentVolumeClaim{
4504- ObjectMeta: metav1.ObjectMeta{
4505- Name: "dummy-claim-name",
4506- Namespace: ns,
4507- UID: types.UID("01234567890"),
4508- },
4509- },
4510- })
4511- switch framework.TestContext.Provider {
4512- case "aws":
4513- pv.Spec.PersistentVolumeSource = v1.PersistentVolumeSource{
4514- AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
4515- VolumeID: diskName,
4516- },
4517- }
4518- case "gce", "gke":
4519- pv.Spec.PersistentVolumeSource = v1.PersistentVolumeSource{
4520- GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
4521- PDName: diskName,
4522- },
4523- }
4524- }
4525- pv, err = c.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{})
4526- framework.ExpectNoError(err)
4527-
4528- ginkgo.By("waiting for the PV to get Released")
4529- err = e2epv.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 2*time.Second, e2epv.PVReclaimingTimeout)
4530- framework.ExpectNoError(err)
4531-
4532- ginkgo.By("deleting the PD")
4533- err = e2epv.DeletePVSource(&pv.Spec.PersistentVolumeSource)
4534- framework.ExpectNoError(err)
4535-
4536- ginkgo.By("changing the PV reclaim policy")
4537- pv, err = c.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{})
4538- framework.ExpectNoError(err)
4539- pv.Spec.PersistentVolumeReclaimPolicy = v1.PersistentVolumeReclaimDelete
4540- pv, err = c.CoreV1().PersistentVolumes().Update(context.TODO(), pv, metav1.UpdateOptions{})
4541- framework.ExpectNoError(err)
4542-
4543- ginkgo.By("waiting for the PV to get deleted")
4544- err = e2epv.WaitForPersistentVolumeDeleted(c, pv.Name, 5*time.Second, e2epv.PVDeletingTimeout)
4545- framework.ExpectNoError(err)
4546- })
4547- })
4548-
4549 ginkgo.Describe("DynamicProvisioner External", func() {
4550 ginkgo.It("should let an external dynamic provisioner create and delete persistent volumes [Slow]", func() {
4551 // external dynamic provisioner pods need additional permissions provided by the
4552diff --git a/test/e2e/storage/volume_provisioning_providers.go b/test/e2e/storage/volume_provisioning_providers.go
4553new file mode 100644
4554index 00000000000..932c644af7a
4555--- /dev/null
4556+++ b/test/e2e/storage/volume_provisioning_providers.go
4557@@ -0,0 +1,577 @@
4558+// +build !providerless
4559+
4560+/*
4561+Copyright 2016 The Kubernetes Authors.
4562+
4563+Licensed under the Apache License, Version 2.0 (the "License");
4564+you may not use this file except in compliance with the License.
4565+You may obtain a copy of the License at
4566+
4567+ http://www.apache.org/licenses/LICENSE-2.0
4568+
4569+Unless required by applicable law or agreed to in writing, software
4570+distributed under the License is distributed on an "AS IS" BASIS,
4571+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
4572+See the License for the specific language governing permissions and
4573+limitations under the License.
4574+*/
4575+
4576+package storage
4577+
4578+import (
4579+ "context"
4580+ "fmt"
4581+ "strings"
4582+ "time"
4583+
4584+ "github.com/aws/aws-sdk-go/aws"
4585+ "github.com/aws/aws-sdk-go/aws/session"
4586+ "github.com/aws/aws-sdk-go/service/ec2"
4587+ "github.com/onsi/ginkgo"
4588+ "github.com/onsi/gomega"
4589+
4590+ v1 "k8s.io/api/core/v1"
4591+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
4592+ "k8s.io/apimachinery/pkg/types"
4593+ "k8s.io/apimachinery/pkg/util/sets"
4594+ clientset "k8s.io/client-go/kubernetes"
4595+ "k8s.io/kubernetes/test/e2e/framework"
4596+ e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
4597+ "k8s.io/kubernetes/test/e2e/framework/providers/gce"
4598+ e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
4599+ e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
4600+ "k8s.io/kubernetes/test/e2e/storage/testsuites"
4601+ "k8s.io/kubernetes/test/e2e/storage/utils"
4602+)
4603+
4604+// checkAWSEBS checks properties of an AWS EBS. Test framework does not
4605+// instantiate full AWS provider, therefore we need use ec2 API directly.
4606+func checkAWSEBS(volume *v1.PersistentVolume, volumeType string, encrypted bool) error {
4607+ diskName := volume.Spec.AWSElasticBlockStore.VolumeID
4608+
4609+ var client *ec2.EC2
4610+
4611+ tokens := strings.Split(diskName, "/")
4612+ volumeID := tokens[len(tokens)-1]
4613+
4614+ zone := framework.TestContext.CloudConfig.Zone
4615+
4616+ awsSession, err := session.NewSession()
4617+ if err != nil {
4618+ return fmt.Errorf("error creating session: %v", err)
4619+ }
4620+
4621+ if len(zone) > 0 {
4622+ region := zone[:len(zone)-1]
4623+ cfg := aws.Config{Region: &region}
4624+ framework.Logf("using region %s", region)
4625+ client = ec2.New(awsSession, &cfg)
4626+ } else {
4627+ framework.Logf("no region configured")
4628+ client = ec2.New(awsSession)
4629+ }
4630+
4631+ request := &ec2.DescribeVolumesInput{
4632+ VolumeIds: []*string{&volumeID},
4633+ }
4634+ info, err := client.DescribeVolumes(request)
4635+ if err != nil {
4636+ return fmt.Errorf("error querying ec2 for volume %q: %v", volumeID, err)
4637+ }
4638+ if len(info.Volumes) == 0 {
4639+ return fmt.Errorf("no volumes found for volume %q", volumeID)
4640+ }
4641+ if len(info.Volumes) > 1 {
4642+ return fmt.Errorf("multiple volumes found for volume %q", volumeID)
4643+ }
4644+
4645+ awsVolume := info.Volumes[0]
4646+ if awsVolume.VolumeType == nil {
4647+ return fmt.Errorf("expected volume type %q, got nil", volumeType)
4648+ }
4649+ if *awsVolume.VolumeType != volumeType {
4650+ return fmt.Errorf("expected volume type %q, got %q", volumeType, *awsVolume.VolumeType)
4651+ }
4652+ if encrypted && awsVolume.Encrypted == nil {
4653+ return fmt.Errorf("expected encrypted volume, got no encryption")
4654+ }
4655+ if encrypted && !*awsVolume.Encrypted {
4656+ return fmt.Errorf("expected encrypted volume, got %v", *awsVolume.Encrypted)
4657+ }
4658+ return nil
4659+}
4660+
4661+func checkGCEPD(volume *v1.PersistentVolume, volumeType string) error {
4662+ cloud, err := gce.GetGCECloud()
4663+ if err != nil {
4664+ return err
4665+ }
4666+ diskName := volume.Spec.GCEPersistentDisk.PDName
4667+ disk, err := cloud.GetDiskByNameUnknownZone(diskName)
4668+ if err != nil {
4669+ return err
4670+ }
4671+
4672+ if !strings.HasSuffix(disk.Type, volumeType) {
4673+ return fmt.Errorf("unexpected disk type %q, expected suffix %q", disk.Type, volumeType)
4674+ }
4675+ return nil
4676+}
4677+
4678+var _ = utils.SIGDescribe("Dynamic Provisioning with cloud providers", func() {
4679+ f := framework.NewDefaultFramework("volume-provisioning")
4680+
4681+ // filled in BeforeEach
4682+ var c clientset.Interface
4683+ var ns string
4684+
4685+ ginkgo.BeforeEach(func() {
4686+ c = f.ClientSet
4687+ ns = f.Namespace.Name
4688+ })
4689+
4690+ ginkgo.Describe("DynamicProvisioner [Slow]", func() {
4691+ ginkgo.It("should provision storage with different parameters", func() {
4692+
4693+ // This test checks that dynamic provisioning can provision a volume
4694+ // that can be used to persist data among pods.
4695+ tests := []testsuites.StorageClassTest{
4696+ // GCE/GKE
4697+ {
4698+ Name: "SSD PD on GCE/GKE",
4699+ CloudProviders: []string{"gce", "gke"},
4700+ Provisioner: "kubernetes.io/gce-pd",
4701+ Parameters: map[string]string{
4702+ "type": "pd-ssd",
4703+ "zone": getRandomClusterZone(c),
4704+ },
4705+ ClaimSize: "1.5Gi",
4706+ ExpectedSize: "2Gi",
4707+ PvCheck: func(claim *v1.PersistentVolumeClaim) {
4708+ volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
4709+ gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
4710+
4711+ err := checkGCEPD(volume, "pd-ssd")
4712+ framework.ExpectNoError(err, "checkGCEPD pd-ssd")
4713+ },
4714+ },
4715+ {
4716+ Name: "HDD PD on GCE/GKE",
4717+ CloudProviders: []string{"gce", "gke"},
4718+ Provisioner: "kubernetes.io/gce-pd",
4719+ Parameters: map[string]string{
4720+ "type": "pd-standard",
4721+ },
4722+ ClaimSize: "1.5Gi",
4723+ ExpectedSize: "2Gi",
4724+ PvCheck: func(claim *v1.PersistentVolumeClaim) {
4725+ volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
4726+ gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
4727+
4728+ err := checkGCEPD(volume, "pd-standard")
4729+ framework.ExpectNoError(err, "checkGCEPD pd-standard")
4730+ },
4731+ },
4732+ // AWS
4733+ {
4734+ Name: "gp2 EBS on AWS",
4735+ CloudProviders: []string{"aws"},
4736+ Provisioner: "kubernetes.io/aws-ebs",
4737+ Parameters: map[string]string{
4738+ "type": "gp2",
4739+ "zone": getRandomClusterZone(c),
4740+ },
4741+ ClaimSize: "1.5Gi",
4742+ ExpectedSize: "2Gi",
4743+ PvCheck: func(claim *v1.PersistentVolumeClaim) {
4744+ volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
4745+ gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
4746+
4747+ err := checkAWSEBS(volume, "gp2", false)
4748+ framework.ExpectNoError(err, "checkAWSEBS gp2")
4749+ },
4750+ },
4751+ {
4752+ Name: "io1 EBS on AWS",
4753+ CloudProviders: []string{"aws"},
4754+ Provisioner: "kubernetes.io/aws-ebs",
4755+ Parameters: map[string]string{
4756+ "type": "io1",
4757+ "iopsPerGB": "50",
4758+ },
4759+ ClaimSize: "3.5Gi",
4760+ ExpectedSize: "4Gi", // 4 GiB is minimum for io1
4761+ PvCheck: func(claim *v1.PersistentVolumeClaim) {
4762+ volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
4763+ gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
4764+
4765+ err := checkAWSEBS(volume, "io1", false)
4766+ framework.ExpectNoError(err, "checkAWSEBS io1")
4767+ },
4768+ },
4769+ {
4770+ Name: "sc1 EBS on AWS",
4771+ CloudProviders: []string{"aws"},
4772+ Provisioner: "kubernetes.io/aws-ebs",
4773+ Parameters: map[string]string{
4774+ "type": "sc1",
4775+ },
4776+ ClaimSize: "500Gi", // minimum for sc1
4777+ ExpectedSize: "500Gi",
4778+ PvCheck: func(claim *v1.PersistentVolumeClaim) {
4779+ volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
4780+ gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
4781+
4782+ err := checkAWSEBS(volume, "sc1", false)
4783+ framework.ExpectNoError(err, "checkAWSEBS sc1")
4784+ },
4785+ },
4786+ {
4787+ Name: "st1 EBS on AWS",
4788+ CloudProviders: []string{"aws"},
4789+ Provisioner: "kubernetes.io/aws-ebs",
4790+ Parameters: map[string]string{
4791+ "type": "st1",
4792+ },
4793+ ClaimSize: "500Gi", // minimum for st1
4794+ ExpectedSize: "500Gi",
4795+ PvCheck: func(claim *v1.PersistentVolumeClaim) {
4796+ volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
4797+ gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
4798+
4799+ err := checkAWSEBS(volume, "st1", false)
4800+ framework.ExpectNoError(err, "checkAWSEBS st1")
4801+ },
4802+ },
4803+ {
4804+ Name: "encrypted EBS on AWS",
4805+ CloudProviders: []string{"aws"},
4806+ Provisioner: "kubernetes.io/aws-ebs",
4807+ Parameters: map[string]string{
4808+ "encrypted": "true",
4809+ },
4810+ ClaimSize: "1Gi",
4811+ ExpectedSize: "1Gi",
4812+ PvCheck: func(claim *v1.PersistentVolumeClaim) {
4813+ volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
4814+ gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
4815+
4816+ err := checkAWSEBS(volume, "gp2", true)
4817+ framework.ExpectNoError(err, "checkAWSEBS gp2 encrypted")
4818+ },
4819+ },
4820+ // OpenStack generic tests (works on all OpenStack deployments)
4821+ {
4822+ Name: "generic Cinder volume on OpenStack",
4823+ CloudProviders: []string{"openstack"},
4824+ Provisioner: "kubernetes.io/cinder",
4825+ Parameters: map[string]string{},
4826+ ClaimSize: "1.5Gi",
4827+ ExpectedSize: "2Gi",
4828+ PvCheck: func(claim *v1.PersistentVolumeClaim) {
4829+ testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
4830+ },
4831+ },
4832+ {
4833+ Name: "Cinder volume with empty volume type and zone on OpenStack",
4834+ CloudProviders: []string{"openstack"},
4835+ Provisioner: "kubernetes.io/cinder",
4836+ Parameters: map[string]string{
4837+ "type": "",
4838+ "availability": "",
4839+ },
4840+ ClaimSize: "1.5Gi",
4841+ ExpectedSize: "2Gi",
4842+ PvCheck: func(claim *v1.PersistentVolumeClaim) {
4843+ testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
4844+ },
4845+ },
4846+ // vSphere generic test
4847+ {
4848+ Name: "generic vSphere volume",
4849+ CloudProviders: []string{"vsphere"},
4850+ Provisioner: "kubernetes.io/vsphere-volume",
4851+ Parameters: map[string]string{},
4852+ ClaimSize: "1.5Gi",
4853+ ExpectedSize: "1.5Gi",
4854+ PvCheck: func(claim *v1.PersistentVolumeClaim) {
4855+ testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
4856+ },
4857+ },
4858+ // Azure
4859+ {
4860+ Name: "Azure disk volume with empty sku and location",
4861+ CloudProviders: []string{"azure"},
4862+ Provisioner: "kubernetes.io/azure-disk",
4863+ Parameters: map[string]string{},
4864+ ClaimSize: "1Gi",
4865+ ExpectedSize: "1Gi",
4866+ PvCheck: func(claim *v1.PersistentVolumeClaim) {
4867+ testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
4868+ },
4869+ },
4870+ }
4871+
4872+ var betaTest *testsuites.StorageClassTest
4873+ for i, t := range tests {
4874+ // Beware of clojure, use local variables instead of those from
4875+ // outer scope
4876+ test := t
4877+
4878+ if !framework.ProviderIs(test.CloudProviders...) {
4879+ framework.Logf("Skipping %q: cloud providers is not %v", test.Name, test.CloudProviders)
4880+ continue
4881+ }
4882+
4883+ // Remember the last supported test for subsequent test of beta API
4884+ betaTest = &test
4885+
4886+ ginkgo.By("Testing " + test.Name)
4887+ suffix := fmt.Sprintf("%d", i)
4888+ test.Client = c
4889+ test.Class = newStorageClass(test, ns, suffix)
4890+ test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
4891+ ClaimSize: test.ClaimSize,
4892+ StorageClassName: &test.Class.Name,
4893+ VolumeMode: &test.VolumeMode,
4894+ }, ns)
4895+ test.TestDynamicProvisioning()
4896+ }
4897+
4898+ // Run the last test with storage.k8s.io/v1beta1 on pvc
4899+ if betaTest != nil {
4900+ ginkgo.By("Testing " + betaTest.Name + " with beta volume provisioning")
4901+ class := newBetaStorageClass(*betaTest, "beta")
4902+ // we need to create the class manually, testDynamicProvisioning does not accept beta class
4903+ class, err := c.StorageV1beta1().StorageClasses().Create(context.TODO(), class, metav1.CreateOptions{})
4904+ framework.ExpectNoError(err)
4905+ defer deleteStorageClass(c, class.Name)
4906+
4907+ betaTest.Client = c
4908+ betaTest.Class = nil
4909+ betaTest.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
4910+ ClaimSize: betaTest.ClaimSize,
4911+ StorageClassName: &class.Name,
4912+ VolumeMode: &betaTest.VolumeMode,
4913+ }, ns)
4914+ betaTest.Claim.Spec.StorageClassName = &(class.Name)
4915+ (*betaTest).TestDynamicProvisioning()
4916+ }
4917+ })
4918+
4919+ ginkgo.It("should provision storage with non-default reclaim policy Retain", func() {
4920+ e2eskipper.SkipUnlessProviderIs("gce", "gke")
4921+
4922+ test := testsuites.StorageClassTest{
4923+ Client: c,
4924+ Name: "HDD PD on GCE/GKE",
4925+ CloudProviders: []string{"gce", "gke"},
4926+ Provisioner: "kubernetes.io/gce-pd",
4927+ Parameters: map[string]string{
4928+ "type": "pd-standard",
4929+ },
4930+ ClaimSize: "1Gi",
4931+ ExpectedSize: "1Gi",
4932+ PvCheck: func(claim *v1.PersistentVolumeClaim) {
4933+ volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
4934+ gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
4935+
4936+ err := checkGCEPD(volume, "pd-standard")
4937+ framework.ExpectNoError(err, "checkGCEPD")
4938+ },
4939+ }
4940+ test.Class = newStorageClass(test, ns, "reclaimpolicy")
4941+ retain := v1.PersistentVolumeReclaimRetain
4942+ test.Class.ReclaimPolicy = &retain
4943+ test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
4944+ ClaimSize: test.ClaimSize,
4945+ StorageClassName: &test.Class.Name,
4946+ VolumeMode: &test.VolumeMode,
4947+ }, ns)
4948+ pv := test.TestDynamicProvisioning()
4949+
4950+ ginkgo.By(fmt.Sprintf("waiting for the provisioned PV %q to enter phase %s", pv.Name, v1.VolumeReleased))
4951+ framework.ExpectNoError(e2epv.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 1*time.Second, 30*time.Second))
4952+
4953+ ginkgo.By(fmt.Sprintf("deleting the storage asset backing the PV %q", pv.Name))
4954+ framework.ExpectNoError(e2epv.DeletePDWithRetry(pv.Spec.GCEPersistentDisk.PDName))
4955+
4956+ ginkgo.By(fmt.Sprintf("deleting the PV %q", pv.Name))
4957+ framework.ExpectNoError(e2epv.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name)
4958+ framework.ExpectNoError(e2epv.WaitForPersistentVolumeDeleted(c, pv.Name, 1*time.Second, 30*time.Second))
4959+ })
4960+
4961+ ginkgo.It("should not provision a volume in an unmanaged GCE zone.", func() {
4962+ e2eskipper.SkipUnlessProviderIs("gce", "gke")
4963+ var suffix string = "unmananged"
4964+
4965+ ginkgo.By("Discovering an unmanaged zone")
4966+ allZones := sets.NewString() // all zones in the project
4967+
4968+ gceCloud, err := gce.GetGCECloud()
4969+ framework.ExpectNoError(err)
4970+
4971+ // Get all k8s managed zones (same as zones with nodes in them for test)
4972+ managedZones, err := gceCloud.GetAllZonesFromCloudProvider()
4973+ framework.ExpectNoError(err)
4974+
4975+ // Get a list of all zones in the project
4976+ zones, err := gceCloud.ComputeServices().GA.Zones.List(framework.TestContext.CloudConfig.ProjectID).Do()
4977+ framework.ExpectNoError(err)
4978+ for _, z := range zones.Items {
4979+ allZones.Insert(z.Name)
4980+ }
4981+
4982+ // Get the subset of zones not managed by k8s
4983+ var unmanagedZone string
4984+ var popped bool
4985+ unmanagedZones := allZones.Difference(managedZones)
4986+ // And select one of them at random.
4987+ if unmanagedZone, popped = unmanagedZones.PopAny(); !popped {
4988+ e2eskipper.Skipf("No unmanaged zones found.")
4989+ }
4990+
4991+ ginkgo.By("Creating a StorageClass for the unmanaged zone")
4992+ test := testsuites.StorageClassTest{
4993+ Name: "unmanaged_zone",
4994+ Provisioner: "kubernetes.io/gce-pd",
4995+ Parameters: map[string]string{"zone": unmanagedZone},
4996+ ClaimSize: "1Gi",
4997+ }
4998+ sc := newStorageClass(test, ns, suffix)
4999+ sc, err = c.StorageV1().StorageClasses().Create(context.TODO(), sc, metav1.CreateOptions{})
5000+ framework.ExpectNoError(err)
5001+ defer deleteStorageClass(c, sc.Name)
5002+
5003+ ginkgo.By("Creating a claim and expecting it to timeout")
5004+ pvc := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
5005+ ClaimSize: test.ClaimSize,
5006+ StorageClassName: &sc.Name,
5007+ VolumeMode: &test.VolumeMode,
5008+ }, ns)
5009+ pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), pvc, metav1.CreateOptions{})
5010+ framework.ExpectNoError(err)
5011+ defer func() {
5012+ framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)
5013+ }()
5014+
5015+ // The claim should timeout phase:Pending
5016+ err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 2*time.Second, framework.ClaimProvisionShortTimeout)
5017+ framework.ExpectError(err)
5018+ framework.Logf(err.Error())
5019+ })
5020+
5021+ ginkgo.It("should test that deleting a claim before the volume is provisioned deletes the volume.", func() {
5022+ // This case tests for the regressions of a bug fixed by PR #21268
5023+ // REGRESSION: Deleting the PVC before the PV is provisioned can result in the PV
5024+ // not being deleted.
5025+ // NOTE: Polls until no PVs are detected, times out at 5 minutes.
5026+
5027+ e2eskipper.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure")
5028+
5029+ const raceAttempts int = 100
5030+ var residualPVs []*v1.PersistentVolume
5031+ ginkgo.By(fmt.Sprintf("Creating and deleting PersistentVolumeClaims %d times", raceAttempts))
5032+ test := testsuites.StorageClassTest{
5033+ Name: "deletion race",
5034+ Provisioner: "", // Use a native one based on current cloud provider
5035+ ClaimSize: "1Gi",
5036+ }
5037+
5038+ class := newStorageClass(test, ns, "race")
5039+ class, err := c.StorageV1().StorageClasses().Create(context.TODO(), class, metav1.CreateOptions{})
5040+ framework.ExpectNoError(err)
5041+ defer deleteStorageClass(c, class.Name)
5042+
5043+ // To increase chance of detection, attempt multiple iterations
5044+ for i := 0; i < raceAttempts; i++ {
5045+ prefix := fmt.Sprintf("race-%d", i)
5046+ claim := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
5047+ NamePrefix: prefix,
5048+ ClaimSize: test.ClaimSize,
5049+ StorageClassName: &class.Name,
5050+ VolumeMode: &test.VolumeMode,
5051+ }, ns)
5052+ tmpClaim, err := e2epv.CreatePVC(c, ns, claim)
5053+ framework.ExpectNoError(err)
5054+ framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, tmpClaim.Name, ns))
5055+ }
5056+
5057+ ginkgo.By(fmt.Sprintf("Checking for residual PersistentVolumes associated with StorageClass %s", class.Name))
5058+ residualPVs, err = waitForProvisionedVolumesDeleted(c, class.Name)
5059+ // Cleanup the test resources before breaking
5060+ defer deleteProvisionedVolumesAndDisks(c, residualPVs)
5061+ framework.ExpectNoError(err, "PersistentVolumes were not deleted as expected. %d remain", len(residualPVs))
5062+
5063+ framework.Logf("0 PersistentVolumes remain.")
5064+ })
5065+
5066+ ginkgo.It("deletion should be idempotent", func() {
5067+ // This test ensures that deletion of a volume is idempotent.
5068+ // It creates a PV with Retain policy, deletes underlying AWS / GCE
5069+ // volume and changes the reclaim policy to Delete.
5070+ // PV controller should delete the PV even though the underlying volume
5071+ // is already deleted.
5072+ e2eskipper.SkipUnlessProviderIs("gce", "gke", "aws")
5073+ ginkgo.By("creating PD")
5074+ diskName, err := e2epv.CreatePDWithRetry()
5075+ framework.ExpectNoError(err)
5076+
5077+ ginkgo.By("creating PV")
5078+ pv := e2epv.MakePersistentVolume(e2epv.PersistentVolumeConfig{
5079+ NamePrefix: "volume-idempotent-delete-",
5080+ // Use Retain to keep the PV, the test will change it to Delete
5081+ // when the time comes.
5082+ ReclaimPolicy: v1.PersistentVolumeReclaimRetain,
5083+ AccessModes: []v1.PersistentVolumeAccessMode{
5084+ v1.ReadWriteOnce,
5085+ },
5086+ Capacity: "1Gi",
5087+ // PV is bound to non-existing PVC, so it's reclaim policy is
5088+ // executed immediately
5089+ Prebind: &v1.PersistentVolumeClaim{
5090+ ObjectMeta: metav1.ObjectMeta{
5091+ Name: "dummy-claim-name",
5092+ Namespace: ns,
5093+ UID: types.UID("01234567890"),
5094+ },
5095+ },
5096+ })
5097+ switch framework.TestContext.Provider {
5098+ case "aws":
5099+ pv.Spec.PersistentVolumeSource = v1.PersistentVolumeSource{
5100+ AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
5101+ VolumeID: diskName,
5102+ },
5103+ }
5104+ case "gce", "gke":
5105+ pv.Spec.PersistentVolumeSource = v1.PersistentVolumeSource{
5106+ GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
5107+ PDName: diskName,
5108+ },
5109+ }
5110+ }
5111+ pv, err = c.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{})
5112+ framework.ExpectNoError(err)
5113+
5114+ ginkgo.By("waiting for the PV to get Released")
5115+ err = e2epv.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 2*time.Second, e2epv.PVReclaimingTimeout)
5116+ framework.ExpectNoError(err)
5117+
5118+ ginkgo.By("deleting the PD")
5119+ err = e2epv.DeletePVSource(&pv.Spec.PersistentVolumeSource)
5120+ framework.ExpectNoError(err)
5121+
5122+ ginkgo.By("changing the PV reclaim policy")
5123+ pv, err = c.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{})
5124+ framework.ExpectNoError(err)
5125+ pv.Spec.PersistentVolumeReclaimPolicy = v1.PersistentVolumeReclaimDelete
5126+ pv, err = c.CoreV1().PersistentVolumes().Update(context.TODO(), pv, metav1.UpdateOptions{})
5127+ framework.ExpectNoError(err)
5128+
5129+ ginkgo.By("waiting for the PV to get deleted")
5130+ err = e2epv.WaitForPersistentVolumeDeleted(c, pv.Name, 5*time.Second, e2epv.PVDeletingTimeout)
5131+ framework.ExpectNoError(err)
5132+ })
5133+ })
5134+})
5135diff --git a/test/e2e/upgrades/nvidia-gpu.go b/test/e2e/upgrades/nvidia-gpu.go
5136index cf3b8c0cda3..30515197ef7 100644
5137--- a/test/e2e/upgrades/nvidia-gpu.go
5138+++ b/test/e2e/upgrades/nvidia-gpu.go
5139@@ -1,3 +1,5 @@
5140+// +build !providerless
5141+
5142 /*
5143 Copyright 2018 The Kubernetes Authors.
5144
Serge Bazanski19eb0002021-01-21 14:25:25 +01005145--
51462.26.2
Lorenz Bruned0503c2020-07-28 17:21:25 +02005147