Lorenz Brun | ed0503c | 2020-07-28 17:21:25 +0200 | [diff] [blame^] | 1 | Copyright 2020 The Monogon Project Authors. |
| 2 | |
| 3 | Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | you may not use this file except in compliance with the License. |
| 5 | You may obtain a copy of the License at |
| 6 | |
| 7 | http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | |
| 9 | Unless required by applicable law or agreed to in writing, software |
| 10 | distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | See the License for the specific language governing permissions and |
| 13 | limitations under the License. |
| 14 | |
| 15 | |
| 16 | From 43752b6c34f49080de3a66b79cbcd92b214c1f01 Mon Sep 17 00:00:00 2001 |
| 17 | From: Lorenz Brun <lorenz@nexantic.com> |
| 18 | Date: Mon, 20 Jul 2020 16:50:56 +0200 |
| 19 | Subject: [PATCH] POC Make e2e test suite support providerless |
| 20 | |
| 21 | --- |
| 22 | .../custom_metrics_stackdriver_autoscaling.go | 2 + |
| 23 | test/e2e/cloud/imports.go | 2 + |
| 24 | test/e2e/e2e.go | 10 - |
| 25 | test/e2e/e2e_providers.go | 32 + |
| 26 | .../framework/providers/gce/firewall_test.go | 2 + |
| 27 | .../instrumentation/monitoring/accelerator.go | 2 + |
| 28 | .../monitoring/custom_metrics_deployments.go | 2 + |
| 29 | .../monitoring/custom_metrics_stackdriver.go | 4 +- |
| 30 | .../instrumentation/monitoring/stackdriver.go | 2 + |
| 31 | .../monitoring/stackdriver_metadata_agent.go | 4 +- |
| 32 | test/e2e/network/firewall.go | 2 + |
| 33 | test/e2e/network/ingress.go | 2 + |
| 34 | test/e2e/network/ingress_scale.go | 2 + |
| 35 | test/e2e/network/network_tiers.go | 2 + |
| 36 | test/e2e/network/scale/ingress.go | 2 + |
| 37 | .../network/scale/localrun/ingress_scale.go | 2 +- |
| 38 | test/e2e/network/service.go | 912 ---------------- |
| 39 | test/e2e/network/service_providers.go | 980 ++++++++++++++++++ |
| 40 | test/e2e/node/recreate_node.go | 2 + |
| 41 | test/e2e/scheduling/nvidia-gpus.go | 2 + |
| 42 | test/e2e/scheduling/ubernetes_lite_volumes.go | 2 + |
| 43 | test/e2e/storage/drivers/in_tree.go | 732 ------------- |
| 44 | test/e2e/storage/drivers/in_tree_providers.go | 751 ++++++++++++++ |
| 45 | test/e2e/storage/in_tree_volumes.go | 5 - |
| 46 | test/e2e/storage/in_tree_volumes_providers.go | 46 + |
| 47 | .../nfs_persistent_volume-disruptive.go | 2 +- |
| 48 | test/e2e/storage/pd.go | 2 + |
| 49 | test/e2e/storage/persistent_volumes-gce.go | 3 + |
| 50 | test/e2e/storage/regional_pd.go | 3 + |
| 51 | test/e2e/storage/volume_provisioning.go | 527 ---------- |
| 52 | .../storage/volume_provisioning_providers.go | 577 +++++++++++ |
| 53 | test/e2e/upgrades/nvidia-gpu.go | 2 + |
| 54 | 32 files changed, 2432 insertions(+), 2190 deletions(-) |
| 55 | create mode 100644 test/e2e/e2e_providers.go |
| 56 | create mode 100644 test/e2e/network/service_providers.go |
| 57 | create mode 100644 test/e2e/storage/drivers/in_tree_providers.go |
| 58 | create mode 100644 test/e2e/storage/in_tree_volumes_providers.go |
| 59 | create mode 100644 test/e2e/storage/volume_provisioning_providers.go |
| 60 | |
| 61 | diff --git a/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go b/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go |
| 62 | index d3a7862d338..8bacec7fe1d 100644 |
| 63 | --- a/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go |
| 64 | +++ b/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go |
| 65 | @@ -1,3 +1,5 @@ |
| 66 | +// +build !providerless |
| 67 | + |
| 68 | /* |
| 69 | Copyright 2017 The Kubernetes Authors. |
| 70 | |
| 71 | diff --git a/test/e2e/cloud/imports.go b/test/e2e/cloud/imports.go |
| 72 | index 5aa1def97d1..382cb1a2264 100644 |
| 73 | --- a/test/e2e/cloud/imports.go |
| 74 | +++ b/test/e2e/cloud/imports.go |
| 75 | @@ -1,3 +1,5 @@ |
| 76 | +// +build !providerless |
| 77 | + |
| 78 | /* |
| 79 | Copyright 2019 The Kubernetes Authors. |
| 80 | |
| 81 | diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go |
| 82 | index d1e23325d69..f5717e417e7 100644 |
| 83 | --- a/test/e2e/e2e.go |
| 84 | +++ b/test/e2e/e2e.go |
| 85 | @@ -53,16 +53,6 @@ import ( |
| 86 | utilnet "k8s.io/utils/net" |
| 87 | |
| 88 | clientset "k8s.io/client-go/kubernetes" |
| 89 | - // ensure auth plugins are loaded |
| 90 | - _ "k8s.io/client-go/plugin/pkg/client/auth" |
| 91 | - |
| 92 | - // ensure that cloud providers are loaded |
| 93 | - _ "k8s.io/kubernetes/test/e2e/framework/providers/aws" |
| 94 | - _ "k8s.io/kubernetes/test/e2e/framework/providers/azure" |
| 95 | - _ "k8s.io/kubernetes/test/e2e/framework/providers/gce" |
| 96 | - _ "k8s.io/kubernetes/test/e2e/framework/providers/kubemark" |
| 97 | - _ "k8s.io/kubernetes/test/e2e/framework/providers/openstack" |
| 98 | - _ "k8s.io/kubernetes/test/e2e/framework/providers/vsphere" |
| 99 | ) |
| 100 | |
| 101 | const ( |
| 102 | diff --git a/test/e2e/e2e_providers.go b/test/e2e/e2e_providers.go |
| 103 | new file mode 100644 |
| 104 | index 00000000000..cf96642b110 |
| 105 | --- /dev/null |
| 106 | +++ b/test/e2e/e2e_providers.go |
| 107 | @@ -0,0 +1,32 @@ |
| 108 | +// +build !providerless |
| 109 | + |
| 110 | +/* |
| 111 | +Copyright 2020 The Kubernetes Authors. |
| 112 | + |
| 113 | +Licensed under the Apache License, Version 2.0 (the "License"); |
| 114 | +you may not use this file except in compliance with the License. |
| 115 | +You may obtain a copy of the License at |
| 116 | + |
| 117 | + http://www.apache.org/licenses/LICENSE-2.0 |
| 118 | + |
| 119 | +Unless required by applicable law or agreed to in writing, software |
| 120 | +distributed under the License is distributed on an "AS IS" BASIS, |
| 121 | +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 122 | +See the License for the specific language governing permissions and |
| 123 | +limitations under the License. |
| 124 | +*/ |
| 125 | + |
| 126 | +package e2e |
| 127 | + |
| 128 | +import ( |
| 129 | + // ensure auth plugins are loaded |
| 130 | + _ "k8s.io/client-go/plugin/pkg/client/auth" |
| 131 | + |
| 132 | + // ensure that cloud providers are loaded |
| 133 | + _ "k8s.io/kubernetes/test/e2e/framework/providers/aws" |
| 134 | + _ "k8s.io/kubernetes/test/e2e/framework/providers/azure" |
| 135 | + _ "k8s.io/kubernetes/test/e2e/framework/providers/gce" |
| 136 | + _ "k8s.io/kubernetes/test/e2e/framework/providers/kubemark" |
| 137 | + _ "k8s.io/kubernetes/test/e2e/framework/providers/openstack" |
| 138 | + _ "k8s.io/kubernetes/test/e2e/framework/providers/vsphere" |
| 139 | +) |
| 140 | diff --git a/test/e2e/framework/providers/gce/firewall_test.go b/test/e2e/framework/providers/gce/firewall_test.go |
| 141 | index 647441dc962..2a92543a5a7 100644 |
| 142 | --- a/test/e2e/framework/providers/gce/firewall_test.go |
| 143 | +++ b/test/e2e/framework/providers/gce/firewall_test.go |
| 144 | @@ -1,3 +1,5 @@ |
| 145 | +// +build !providerless |
| 146 | + |
| 147 | /* |
| 148 | Copyright 2018 The Kubernetes Authors. |
| 149 | |
| 150 | diff --git a/test/e2e/instrumentation/monitoring/accelerator.go b/test/e2e/instrumentation/monitoring/accelerator.go |
| 151 | index 90047e46ea1..6fa094e6a18 100644 |
| 152 | --- a/test/e2e/instrumentation/monitoring/accelerator.go |
| 153 | +++ b/test/e2e/instrumentation/monitoring/accelerator.go |
| 154 | @@ -1,3 +1,5 @@ |
| 155 | +// +build !providerless |
| 156 | + |
| 157 | /* |
| 158 | Copyright 2017 The Kubernetes Authors. |
| 159 | |
| 160 | diff --git a/test/e2e/instrumentation/monitoring/custom_metrics_deployments.go b/test/e2e/instrumentation/monitoring/custom_metrics_deployments.go |
| 161 | index de80b129315..8d96b93bf11 100644 |
| 162 | --- a/test/e2e/instrumentation/monitoring/custom_metrics_deployments.go |
| 163 | +++ b/test/e2e/instrumentation/monitoring/custom_metrics_deployments.go |
| 164 | @@ -1,3 +1,5 @@ |
| 165 | +// +build !providerless |
| 166 | + |
| 167 | /* |
| 168 | Copyright 2017 The Kubernetes Authors. |
| 169 | |
| 170 | diff --git a/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go b/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go |
| 171 | index 277b5a0ab24..ddbc3f20802 100644 |
| 172 | --- a/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go |
| 173 | +++ b/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go |
| 174 | @@ -1,3 +1,5 @@ |
| 175 | +// +build !providerless |
| 176 | + |
| 177 | /* |
| 178 | Copyright 2017 The Kubernetes Authors. |
| 179 | |
| 180 | @@ -21,7 +23,7 @@ import ( |
| 181 | "time" |
| 182 | |
| 183 | gcm "google.golang.org/api/monitoring/v3" |
| 184 | - "k8s.io/api/core/v1" |
| 185 | + v1 "k8s.io/api/core/v1" |
| 186 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" |
| 187 | "k8s.io/apimachinery/pkg/labels" |
| 188 | "k8s.io/apimachinery/pkg/runtime/schema" |
| 189 | diff --git a/test/e2e/instrumentation/monitoring/stackdriver.go b/test/e2e/instrumentation/monitoring/stackdriver.go |
| 190 | index dbc5e51c20d..3db0120900b 100644 |
| 191 | --- a/test/e2e/instrumentation/monitoring/stackdriver.go |
| 192 | +++ b/test/e2e/instrumentation/monitoring/stackdriver.go |
| 193 | @@ -1,3 +1,5 @@ |
| 194 | +// +build !providerless |
| 195 | + |
| 196 | /* |
| 197 | Copyright 2017 The Kubernetes Authors. |
| 198 | |
| 199 | diff --git a/test/e2e/instrumentation/monitoring/stackdriver_metadata_agent.go b/test/e2e/instrumentation/monitoring/stackdriver_metadata_agent.go |
| 200 | index 321591344db..bad9be5b5bf 100644 |
| 201 | --- a/test/e2e/instrumentation/monitoring/stackdriver_metadata_agent.go |
| 202 | +++ b/test/e2e/instrumentation/monitoring/stackdriver_metadata_agent.go |
| 203 | @@ -1,3 +1,5 @@ |
| 204 | +// +build !providerless |
| 205 | + |
| 206 | /* |
| 207 | Copyright 2017 The Kubernetes Authors. |
| 208 | |
| 209 | @@ -24,7 +26,7 @@ import ( |
| 210 | "reflect" |
| 211 | "time" |
| 212 | |
| 213 | - "k8s.io/api/core/v1" |
| 214 | + v1 "k8s.io/api/core/v1" |
| 215 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" |
| 216 | clientset "k8s.io/client-go/kubernetes" |
| 217 | "k8s.io/kubernetes/test/e2e/framework" |
| 218 | diff --git a/test/e2e/network/firewall.go b/test/e2e/network/firewall.go |
| 219 | index f4200f5a30c..f8612ed75a9 100644 |
| 220 | --- a/test/e2e/network/firewall.go |
| 221 | +++ b/test/e2e/network/firewall.go |
| 222 | @@ -1,3 +1,5 @@ |
| 223 | +// +build !providerless |
| 224 | + |
| 225 | /* |
| 226 | Copyright 2016 The Kubernetes Authors. |
| 227 | |
| 228 | diff --git a/test/e2e/network/ingress.go b/test/e2e/network/ingress.go |
| 229 | index f1bce918e8f..1a37badbd7b 100644 |
| 230 | --- a/test/e2e/network/ingress.go |
| 231 | +++ b/test/e2e/network/ingress.go |
| 232 | @@ -1,3 +1,5 @@ |
| 233 | +// +build !providerless |
| 234 | + |
| 235 | /* |
| 236 | Copyright 2015 The Kubernetes Authors. |
| 237 | |
| 238 | diff --git a/test/e2e/network/ingress_scale.go b/test/e2e/network/ingress_scale.go |
| 239 | index 6cc8585b7b2..867c834868c 100644 |
| 240 | --- a/test/e2e/network/ingress_scale.go |
| 241 | +++ b/test/e2e/network/ingress_scale.go |
| 242 | @@ -1,3 +1,5 @@ |
| 243 | +// +build !providerless |
| 244 | + |
| 245 | /* |
| 246 | Copyright 2018 The Kubernetes Authors. |
| 247 | |
| 248 | diff --git a/test/e2e/network/network_tiers.go b/test/e2e/network/network_tiers.go |
| 249 | index 5ae68a5a1ee..f3ea1f72a6b 100644 |
| 250 | --- a/test/e2e/network/network_tiers.go |
| 251 | +++ b/test/e2e/network/network_tiers.go |
| 252 | @@ -1,3 +1,5 @@ |
| 253 | +// +build !providerless |
| 254 | + |
| 255 | /* |
| 256 | Copyright 2017 The Kubernetes Authors. |
| 257 | |
| 258 | diff --git a/test/e2e/network/scale/ingress.go b/test/e2e/network/scale/ingress.go |
| 259 | index 954296beb52..43ad9c9b618 100644 |
| 260 | --- a/test/e2e/network/scale/ingress.go |
| 261 | +++ b/test/e2e/network/scale/ingress.go |
| 262 | @@ -1,3 +1,5 @@ |
| 263 | +// +build !providerless |
| 264 | + |
| 265 | /* |
| 266 | Copyright 2018 The Kubernetes Authors. |
| 267 | |
| 268 | diff --git a/test/e2e/network/scale/localrun/ingress_scale.go b/test/e2e/network/scale/localrun/ingress_scale.go |
| 269 | index 2e2c39884da..5a27f5f4cb2 100644 |
| 270 | --- a/test/e2e/network/scale/localrun/ingress_scale.go |
| 271 | +++ b/test/e2e/network/scale/localrun/ingress_scale.go |
| 272 | @@ -27,7 +27,7 @@ import ( |
| 273 | |
| 274 | "k8s.io/klog/v2" |
| 275 | |
| 276 | - "k8s.io/api/core/v1" |
| 277 | + v1 "k8s.io/api/core/v1" |
| 278 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" |
| 279 | clientset "k8s.io/client-go/kubernetes" |
| 280 | "k8s.io/client-go/tools/clientcmd" |
| 281 | diff --git a/test/e2e/network/service.go b/test/e2e/network/service.go |
| 282 | index cce449b5e8e..db7328efbe8 100644 |
| 283 | --- a/test/e2e/network/service.go |
| 284 | +++ b/test/e2e/network/service.go |
| 285 | @@ -17,7 +17,6 @@ limitations under the License. |
| 286 | package network |
| 287 | |
| 288 | import ( |
| 289 | - "bytes" |
| 290 | "context" |
| 291 | "encoding/json" |
| 292 | "errors" |
| 293 | @@ -32,8 +31,6 @@ import ( |
| 294 | |
| 295 | utilnet "k8s.io/apimachinery/pkg/util/net" |
| 296 | |
| 297 | - compute "google.golang.org/api/compute/v1" |
| 298 | - |
| 299 | appsv1 "k8s.io/api/apps/v1" |
| 300 | v1 "k8s.io/api/core/v1" |
| 301 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" |
| 302 | @@ -47,11 +44,9 @@ import ( |
| 303 | "k8s.io/kubernetes/test/e2e/framework" |
| 304 | e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment" |
| 305 | e2eendpoints "k8s.io/kubernetes/test/e2e/framework/endpoints" |
| 306 | - e2ekubesystem "k8s.io/kubernetes/test/e2e/framework/kubesystem" |
| 307 | e2enetwork "k8s.io/kubernetes/test/e2e/framework/network" |
| 308 | e2enode "k8s.io/kubernetes/test/e2e/framework/node" |
| 309 | e2epod "k8s.io/kubernetes/test/e2e/framework/pod" |
| 310 | - "k8s.io/kubernetes/test/e2e/framework/providers/gce" |
| 311 | e2erc "k8s.io/kubernetes/test/e2e/framework/rc" |
| 312 | e2eservice "k8s.io/kubernetes/test/e2e/framework/service" |
| 313 | e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" |
| 314 | @@ -59,7 +54,6 @@ import ( |
| 315 | "k8s.io/kubernetes/test/e2e/storage/utils" |
| 316 | testutils "k8s.io/kubernetes/test/utils" |
| 317 | imageutils "k8s.io/kubernetes/test/utils/image" |
| 318 | - gcecloud "k8s.io/legacy-cloud-providers/gce" |
| 319 | |
| 320 | "github.com/onsi/ginkgo" |
| 321 | "github.com/onsi/gomega" |
| 322 | @@ -1191,375 +1185,6 @@ var _ = SIGDescribe("Services", func() { |
| 323 | framework.ExpectNoError(err) |
| 324 | }) |
| 325 | |
| 326 | - // TODO: Get rid of [DisabledForLargeClusters] tag when issue #56138 is fixed. |
| 327 | - ginkgo.It("should be able to change the type and ports of a service [Slow] [DisabledForLargeClusters]", func() { |
| 328 | - // requires cloud load-balancer support |
| 329 | - e2eskipper.SkipUnlessProviderIs("gce", "gke", "aws") |
| 330 | - |
| 331 | - loadBalancerSupportsUDP := !framework.ProviderIs("aws") |
| 332 | - |
| 333 | - loadBalancerLagTimeout := e2eservice.LoadBalancerLagTimeoutDefault |
| 334 | - if framework.ProviderIs("aws") { |
| 335 | - loadBalancerLagTimeout = e2eservice.LoadBalancerLagTimeoutAWS |
| 336 | - } |
| 337 | - loadBalancerCreateTimeout := e2eservice.GetServiceLoadBalancerCreationTimeout(cs) |
| 338 | - |
| 339 | - // This test is more monolithic than we'd like because LB turnup can be |
| 340 | - // very slow, so we lumped all the tests into one LB lifecycle. |
| 341 | - |
| 342 | - serviceName := "mutability-test" |
| 343 | - ns1 := f.Namespace.Name // LB1 in ns1 on TCP |
| 344 | - framework.Logf("namespace for TCP test: %s", ns1) |
| 345 | - |
| 346 | - ginkgo.By("creating a second namespace") |
| 347 | - namespacePtr, err := f.CreateNamespace("services", nil) |
| 348 | - framework.ExpectNoError(err, "failed to create namespace") |
| 349 | - ns2 := namespacePtr.Name // LB2 in ns2 on UDP |
| 350 | - framework.Logf("namespace for UDP test: %s", ns2) |
| 351 | - |
| 352 | - nodeIP, err := e2enode.PickIP(cs) // for later |
| 353 | - framework.ExpectNoError(err) |
| 354 | - |
| 355 | - // Test TCP and UDP Services. Services with the same name in different |
| 356 | - // namespaces should get different node ports and load balancers. |
| 357 | - |
| 358 | - ginkgo.By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns1) |
| 359 | - tcpJig := e2eservice.NewTestJig(cs, ns1, serviceName) |
| 360 | - tcpService, err := tcpJig.CreateTCPService(nil) |
| 361 | - framework.ExpectNoError(err) |
| 362 | - |
| 363 | - ginkgo.By("creating a UDP service " + serviceName + " with type=ClusterIP in namespace " + ns2) |
| 364 | - udpJig := e2eservice.NewTestJig(cs, ns2, serviceName) |
| 365 | - udpService, err := udpJig.CreateUDPService(nil) |
| 366 | - framework.ExpectNoError(err) |
| 367 | - |
| 368 | - ginkgo.By("verifying that TCP and UDP use the same port") |
| 369 | - if tcpService.Spec.Ports[0].Port != udpService.Spec.Ports[0].Port { |
| 370 | - framework.Failf("expected to use the same port for TCP and UDP") |
| 371 | - } |
| 372 | - svcPort := int(tcpService.Spec.Ports[0].Port) |
| 373 | - framework.Logf("service port (TCP and UDP): %d", svcPort) |
| 374 | - |
| 375 | - ginkgo.By("creating a pod to be part of the TCP service " + serviceName) |
| 376 | - _, err = tcpJig.Run(nil) |
| 377 | - framework.ExpectNoError(err) |
| 378 | - |
| 379 | - ginkgo.By("creating a pod to be part of the UDP service " + serviceName) |
| 380 | - _, err = udpJig.Run(nil) |
| 381 | - framework.ExpectNoError(err) |
| 382 | - |
| 383 | - // Change the services to NodePort. |
| 384 | - |
| 385 | - ginkgo.By("changing the TCP service to type=NodePort") |
| 386 | - tcpService, err = tcpJig.UpdateService(func(s *v1.Service) { |
| 387 | - s.Spec.Type = v1.ServiceTypeNodePort |
| 388 | - }) |
| 389 | - framework.ExpectNoError(err) |
| 390 | - tcpNodePort := int(tcpService.Spec.Ports[0].NodePort) |
| 391 | - framework.Logf("TCP node port: %d", tcpNodePort) |
| 392 | - |
| 393 | - ginkgo.By("changing the UDP service to type=NodePort") |
| 394 | - udpService, err = udpJig.UpdateService(func(s *v1.Service) { |
| 395 | - s.Spec.Type = v1.ServiceTypeNodePort |
| 396 | - }) |
| 397 | - framework.ExpectNoError(err) |
| 398 | - udpNodePort := int(udpService.Spec.Ports[0].NodePort) |
| 399 | - framework.Logf("UDP node port: %d", udpNodePort) |
| 400 | - |
| 401 | - ginkgo.By("hitting the TCP service's NodePort") |
| 402 | - e2eservice.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout) |
| 403 | - |
| 404 | - ginkgo.By("hitting the UDP service's NodePort") |
| 405 | - testReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout) |
| 406 | - |
| 407 | - // Change the services to LoadBalancer. |
| 408 | - |
| 409 | - // Here we test that LoadBalancers can receive static IP addresses. This isn't |
| 410 | - // necessary, but is an additional feature this monolithic test checks. |
| 411 | - requestedIP := "" |
| 412 | - staticIPName := "" |
| 413 | - if framework.ProviderIs("gce", "gke") { |
| 414 | - ginkgo.By("creating a static load balancer IP") |
| 415 | - staticIPName = fmt.Sprintf("e2e-external-lb-test-%s", framework.RunID) |
| 416 | - gceCloud, err := gce.GetGCECloud() |
| 417 | - framework.ExpectNoError(err, "failed to get GCE cloud provider") |
| 418 | - |
| 419 | - err = gceCloud.ReserveRegionAddress(&compute.Address{Name: staticIPName}, gceCloud.Region()) |
| 420 | - defer func() { |
| 421 | - if staticIPName != "" { |
| 422 | - // Release GCE static IP - this is not kube-managed and will not be automatically released. |
| 423 | - if err := gceCloud.DeleteRegionAddress(staticIPName, gceCloud.Region()); err != nil { |
| 424 | - framework.Logf("failed to release static IP %s: %v", staticIPName, err) |
| 425 | - } |
| 426 | - } |
| 427 | - }() |
| 428 | - framework.ExpectNoError(err, "failed to create region address: %s", staticIPName) |
| 429 | - reservedAddr, err := gceCloud.GetRegionAddress(staticIPName, gceCloud.Region()) |
| 430 | - framework.ExpectNoError(err, "failed to get region address: %s", staticIPName) |
| 431 | - |
| 432 | - requestedIP = reservedAddr.Address |
| 433 | - framework.Logf("Allocated static load balancer IP: %s", requestedIP) |
| 434 | - } |
| 435 | - |
| 436 | - ginkgo.By("changing the TCP service to type=LoadBalancer") |
| 437 | - tcpService, err = tcpJig.UpdateService(func(s *v1.Service) { |
| 438 | - s.Spec.LoadBalancerIP = requestedIP // will be "" if not applicable |
| 439 | - s.Spec.Type = v1.ServiceTypeLoadBalancer |
| 440 | - }) |
| 441 | - framework.ExpectNoError(err) |
| 442 | - |
| 443 | - if loadBalancerSupportsUDP { |
| 444 | - ginkgo.By("changing the UDP service to type=LoadBalancer") |
| 445 | - udpService, err = udpJig.UpdateService(func(s *v1.Service) { |
| 446 | - s.Spec.Type = v1.ServiceTypeLoadBalancer |
| 447 | - }) |
| 448 | - framework.ExpectNoError(err) |
| 449 | - } |
| 450 | - serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(tcpService)) |
| 451 | - if loadBalancerSupportsUDP { |
| 452 | - serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(udpService)) |
| 453 | - } |
| 454 | - |
| 455 | - ginkgo.By("waiting for the TCP service to have a load balancer") |
| 456 | - // Wait for the load balancer to be created asynchronously |
| 457 | - tcpService, err = tcpJig.WaitForLoadBalancer(loadBalancerCreateTimeout) |
| 458 | - framework.ExpectNoError(err) |
| 459 | - if int(tcpService.Spec.Ports[0].NodePort) != tcpNodePort { |
| 460 | - framework.Failf("TCP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", tcpNodePort, tcpService.Spec.Ports[0].NodePort) |
| 461 | - } |
| 462 | - if requestedIP != "" && e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != requestedIP { |
| 463 | - framework.Failf("unexpected TCP Status.LoadBalancer.Ingress (expected %s, got %s)", requestedIP, e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) |
| 464 | - } |
| 465 | - tcpIngressIP := e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) |
| 466 | - framework.Logf("TCP load balancer: %s", tcpIngressIP) |
| 467 | - |
| 468 | - if framework.ProviderIs("gce", "gke") { |
| 469 | - // Do this as early as possible, which overrides the `defer` above. |
| 470 | - // This is mostly out of fear of leaking the IP in a timeout case |
| 471 | - // (as of this writing we're not 100% sure where the leaks are |
| 472 | - // coming from, so this is first-aid rather than surgery). |
| 473 | - ginkgo.By("demoting the static IP to ephemeral") |
| 474 | - if staticIPName != "" { |
| 475 | - gceCloud, err := gce.GetGCECloud() |
| 476 | - framework.ExpectNoError(err, "failed to get GCE cloud provider") |
| 477 | - // Deleting it after it is attached "demotes" it to an |
| 478 | - // ephemeral IP, which can be auto-released. |
| 479 | - if err := gceCloud.DeleteRegionAddress(staticIPName, gceCloud.Region()); err != nil { |
| 480 | - framework.Failf("failed to release static IP %s: %v", staticIPName, err) |
| 481 | - } |
| 482 | - staticIPName = "" |
| 483 | - } |
| 484 | - } |
| 485 | - |
| 486 | - var udpIngressIP string |
| 487 | - if loadBalancerSupportsUDP { |
| 488 | - ginkgo.By("waiting for the UDP service to have a load balancer") |
| 489 | - // 2nd one should be faster since they ran in parallel. |
| 490 | - udpService, err = udpJig.WaitForLoadBalancer(loadBalancerCreateTimeout) |
| 491 | - framework.ExpectNoError(err) |
| 492 | - if int(udpService.Spec.Ports[0].NodePort) != udpNodePort { |
| 493 | - framework.Failf("UDP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", udpNodePort, udpService.Spec.Ports[0].NodePort) |
| 494 | - } |
| 495 | - udpIngressIP = e2eservice.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) |
| 496 | - framework.Logf("UDP load balancer: %s", udpIngressIP) |
| 497 | - |
| 498 | - ginkgo.By("verifying that TCP and UDP use different load balancers") |
| 499 | - if tcpIngressIP == udpIngressIP { |
| 500 | - framework.Failf("Load balancers are not different: %s", e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) |
| 501 | - } |
| 502 | - } |
| 503 | - |
| 504 | - ginkgo.By("hitting the TCP service's NodePort") |
| 505 | - e2eservice.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout) |
| 506 | - |
| 507 | - ginkgo.By("hitting the UDP service's NodePort") |
| 508 | - testReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout) |
| 509 | - |
| 510 | - ginkgo.By("hitting the TCP service's LoadBalancer") |
| 511 | - e2eservice.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout) |
| 512 | - |
| 513 | - if loadBalancerSupportsUDP { |
| 514 | - ginkgo.By("hitting the UDP service's LoadBalancer") |
| 515 | - testReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout) |
| 516 | - } |
| 517 | - |
| 518 | - // Change the services' node ports. |
| 519 | - |
| 520 | - ginkgo.By("changing the TCP service's NodePort") |
| 521 | - tcpService, err = tcpJig.ChangeServiceNodePort(tcpNodePort) |
| 522 | - framework.ExpectNoError(err) |
| 523 | - tcpNodePortOld := tcpNodePort |
| 524 | - tcpNodePort = int(tcpService.Spec.Ports[0].NodePort) |
| 525 | - if tcpNodePort == tcpNodePortOld { |
| 526 | - framework.Failf("TCP Spec.Ports[0].NodePort (%d) did not change", tcpNodePort) |
| 527 | - } |
| 528 | - if e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != tcpIngressIP { |
| 529 | - framework.Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) |
| 530 | - } |
| 531 | - framework.Logf("TCP node port: %d", tcpNodePort) |
| 532 | - |
| 533 | - ginkgo.By("changing the UDP service's NodePort") |
| 534 | - udpService, err = udpJig.ChangeServiceNodePort(udpNodePort) |
| 535 | - framework.ExpectNoError(err) |
| 536 | - udpNodePortOld := udpNodePort |
| 537 | - udpNodePort = int(udpService.Spec.Ports[0].NodePort) |
| 538 | - if udpNodePort == udpNodePortOld { |
| 539 | - framework.Failf("UDP Spec.Ports[0].NodePort (%d) did not change", udpNodePort) |
| 540 | - } |
| 541 | - if loadBalancerSupportsUDP && e2eservice.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) != udpIngressIP { |
| 542 | - framework.Failf("UDP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", udpIngressIP, e2eservice.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0])) |
| 543 | - } |
| 544 | - framework.Logf("UDP node port: %d", udpNodePort) |
| 545 | - |
| 546 | - ginkgo.By("hitting the TCP service's new NodePort") |
| 547 | - e2eservice.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout) |
| 548 | - |
| 549 | - ginkgo.By("hitting the UDP service's new NodePort") |
| 550 | - testReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout) |
| 551 | - |
| 552 | - ginkgo.By("checking the old TCP NodePort is closed") |
| 553 | - testNotReachableHTTP(nodeIP, tcpNodePortOld, e2eservice.KubeProxyLagTimeout) |
| 554 | - |
| 555 | - ginkgo.By("checking the old UDP NodePort is closed") |
| 556 | - testNotReachableUDP(nodeIP, udpNodePortOld, e2eservice.KubeProxyLagTimeout) |
| 557 | - |
| 558 | - ginkgo.By("hitting the TCP service's LoadBalancer") |
| 559 | - e2eservice.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout) |
| 560 | - |
| 561 | - if loadBalancerSupportsUDP { |
| 562 | - ginkgo.By("hitting the UDP service's LoadBalancer") |
| 563 | - testReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout) |
| 564 | - } |
| 565 | - |
| 566 | - // Change the services' main ports. |
| 567 | - |
| 568 | - ginkgo.By("changing the TCP service's port") |
| 569 | - tcpService, err = tcpJig.UpdateService(func(s *v1.Service) { |
| 570 | - s.Spec.Ports[0].Port++ |
| 571 | - }) |
| 572 | - framework.ExpectNoError(err) |
| 573 | - svcPortOld := svcPort |
| 574 | - svcPort = int(tcpService.Spec.Ports[0].Port) |
| 575 | - if svcPort == svcPortOld { |
| 576 | - framework.Failf("TCP Spec.Ports[0].Port (%d) did not change", svcPort) |
| 577 | - } |
| 578 | - if int(tcpService.Spec.Ports[0].NodePort) != tcpNodePort { |
| 579 | - framework.Failf("TCP Spec.Ports[0].NodePort (%d) changed", tcpService.Spec.Ports[0].NodePort) |
| 580 | - } |
| 581 | - if e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != tcpIngressIP { |
| 582 | - framework.Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) |
| 583 | - } |
| 584 | - |
| 585 | - ginkgo.By("changing the UDP service's port") |
| 586 | - udpService, err = udpJig.UpdateService(func(s *v1.Service) { |
| 587 | - s.Spec.Ports[0].Port++ |
| 588 | - }) |
| 589 | - framework.ExpectNoError(err) |
| 590 | - if int(udpService.Spec.Ports[0].Port) != svcPort { |
| 591 | - framework.Failf("UDP Spec.Ports[0].Port (%d) did not change", udpService.Spec.Ports[0].Port) |
| 592 | - } |
| 593 | - if int(udpService.Spec.Ports[0].NodePort) != udpNodePort { |
| 594 | - framework.Failf("UDP Spec.Ports[0].NodePort (%d) changed", udpService.Spec.Ports[0].NodePort) |
| 595 | - } |
| 596 | - if loadBalancerSupportsUDP && e2eservice.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) != udpIngressIP { |
| 597 | - framework.Failf("UDP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", udpIngressIP, e2eservice.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0])) |
| 598 | - } |
| 599 | - |
| 600 | - framework.Logf("service port (TCP and UDP): %d", svcPort) |
| 601 | - |
| 602 | - ginkgo.By("hitting the TCP service's NodePort") |
| 603 | - e2eservice.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout) |
| 604 | - |
| 605 | - ginkgo.By("hitting the UDP service's NodePort") |
| 606 | - testReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout) |
| 607 | - |
| 608 | - ginkgo.By("hitting the TCP service's LoadBalancer") |
| 609 | - e2eservice.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout) |
| 610 | - |
| 611 | - if loadBalancerSupportsUDP { |
| 612 | - ginkgo.By("hitting the UDP service's LoadBalancer") |
| 613 | - testReachableUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout) |
| 614 | - } |
| 615 | - |
| 616 | - ginkgo.By("Scaling the pods to 0") |
| 617 | - err = tcpJig.Scale(0) |
| 618 | - framework.ExpectNoError(err) |
| 619 | - err = udpJig.Scale(0) |
| 620 | - framework.ExpectNoError(err) |
| 621 | - |
| 622 | - ginkgo.By("looking for ICMP REJECT on the TCP service's NodePort") |
| 623 | - testRejectedHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout) |
| 624 | - |
| 625 | - ginkgo.By("looking for ICMP REJECT on the UDP service's NodePort") |
| 626 | - testRejectedUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout) |
| 627 | - |
| 628 | - ginkgo.By("looking for ICMP REJECT on the TCP service's LoadBalancer") |
| 629 | - testRejectedHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout) |
| 630 | - |
| 631 | - if loadBalancerSupportsUDP { |
| 632 | - ginkgo.By("looking for ICMP REJECT on the UDP service's LoadBalancer") |
| 633 | - testRejectedUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout) |
| 634 | - } |
| 635 | - |
| 636 | - ginkgo.By("Scaling the pods to 1") |
| 637 | - err = tcpJig.Scale(1) |
| 638 | - framework.ExpectNoError(err) |
| 639 | - err = udpJig.Scale(1) |
| 640 | - framework.ExpectNoError(err) |
| 641 | - |
| 642 | - ginkgo.By("hitting the TCP service's NodePort") |
| 643 | - e2eservice.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout) |
| 644 | - |
| 645 | - ginkgo.By("hitting the UDP service's NodePort") |
| 646 | - testReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout) |
| 647 | - |
| 648 | - ginkgo.By("hitting the TCP service's LoadBalancer") |
| 649 | - e2eservice.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout) |
| 650 | - |
| 651 | - if loadBalancerSupportsUDP { |
| 652 | - ginkgo.By("hitting the UDP service's LoadBalancer") |
| 653 | - testReachableUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout) |
| 654 | - } |
| 655 | - |
| 656 | - // Change the services back to ClusterIP. |
| 657 | - |
| 658 | - ginkgo.By("changing TCP service back to type=ClusterIP") |
| 659 | - _, err = tcpJig.UpdateService(func(s *v1.Service) { |
| 660 | - s.Spec.Type = v1.ServiceTypeClusterIP |
| 661 | - s.Spec.Ports[0].NodePort = 0 |
| 662 | - }) |
| 663 | - framework.ExpectNoError(err) |
| 664 | - // Wait for the load balancer to be destroyed asynchronously |
| 665 | - _, err = tcpJig.WaitForLoadBalancerDestroy(tcpIngressIP, svcPort, loadBalancerCreateTimeout) |
| 666 | - framework.ExpectNoError(err) |
| 667 | - |
| 668 | - ginkgo.By("changing UDP service back to type=ClusterIP") |
| 669 | - _, err = udpJig.UpdateService(func(s *v1.Service) { |
| 670 | - s.Spec.Type = v1.ServiceTypeClusterIP |
| 671 | - s.Spec.Ports[0].NodePort = 0 |
| 672 | - }) |
| 673 | - framework.ExpectNoError(err) |
| 674 | - if loadBalancerSupportsUDP { |
| 675 | - // Wait for the load balancer to be destroyed asynchronously |
| 676 | - _, err = udpJig.WaitForLoadBalancerDestroy(udpIngressIP, svcPort, loadBalancerCreateTimeout) |
| 677 | - framework.ExpectNoError(err) |
| 678 | - } |
| 679 | - |
| 680 | - ginkgo.By("checking the TCP NodePort is closed") |
| 681 | - testNotReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout) |
| 682 | - |
| 683 | - ginkgo.By("checking the UDP NodePort is closed") |
| 684 | - testNotReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout) |
| 685 | - |
| 686 | - ginkgo.By("checking the TCP LoadBalancer is closed") |
| 687 | - testNotReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout) |
| 688 | - |
| 689 | - if loadBalancerSupportsUDP { |
| 690 | - ginkgo.By("checking the UDP LoadBalancer is closed") |
| 691 | - testNotReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout) |
| 692 | - } |
| 693 | - }) |
| 694 | - |
| 695 | /* |
| 696 | Testname: Service, update NodePort, same port different protocol |
| 697 | Description: Create a service to accept TCP requests. By default, created service MUST be of type ClusterIP and an ClusterIP MUST be assigned to the service. |
| 698 | @@ -2202,199 +1827,6 @@ var _ = SIGDescribe("Services", func() { |
| 699 | checkReachabilityFromPod(true, normalReachabilityTimeout, namespace, dropPod.Name, svcIP) |
| 700 | }) |
| 701 | |
| 702 | - ginkgo.It("should be able to create an internal type load balancer [Slow]", func() { |
| 703 | - e2eskipper.SkipUnlessProviderIs("azure", "gke", "gce") |
| 704 | - |
| 705 | - createTimeout := e2eservice.GetServiceLoadBalancerCreationTimeout(cs) |
| 706 | - pollInterval := framework.Poll * 10 |
| 707 | - |
| 708 | - namespace := f.Namespace.Name |
| 709 | - serviceName := "lb-internal" |
| 710 | - jig := e2eservice.NewTestJig(cs, namespace, serviceName) |
| 711 | - |
| 712 | - ginkgo.By("creating pod to be part of service " + serviceName) |
| 713 | - _, err := jig.Run(nil) |
| 714 | - framework.ExpectNoError(err) |
| 715 | - |
| 716 | - enableILB, disableILB := enableAndDisableInternalLB() |
| 717 | - |
| 718 | - isInternalEndpoint := func(lbIngress *v1.LoadBalancerIngress) bool { |
| 719 | - ingressEndpoint := e2eservice.GetIngressPoint(lbIngress) |
| 720 | - // Needs update for providers using hostname as endpoint. |
| 721 | - return strings.HasPrefix(ingressEndpoint, "10.") |
| 722 | - } |
| 723 | - |
| 724 | - ginkgo.By("creating a service with type LoadBalancer and cloud specific Internal-LB annotation enabled") |
| 725 | - svc, err := jig.CreateTCPService(func(svc *v1.Service) { |
| 726 | - svc.Spec.Type = v1.ServiceTypeLoadBalancer |
| 727 | - enableILB(svc) |
| 728 | - }) |
| 729 | - framework.ExpectNoError(err) |
| 730 | - |
| 731 | - defer func() { |
| 732 | - ginkgo.By("Clean up loadbalancer service") |
| 733 | - e2eservice.WaitForServiceDeletedWithFinalizer(cs, svc.Namespace, svc.Name) |
| 734 | - }() |
| 735 | - |
| 736 | - svc, err = jig.WaitForLoadBalancer(createTimeout) |
| 737 | - framework.ExpectNoError(err) |
| 738 | - lbIngress := &svc.Status.LoadBalancer.Ingress[0] |
| 739 | - svcPort := int(svc.Spec.Ports[0].Port) |
| 740 | - // should have an internal IP. |
| 741 | - framework.ExpectEqual(isInternalEndpoint(lbIngress), true) |
| 742 | - |
| 743 | - // ILBs are not accessible from the test orchestrator, so it's necessary to use |
| 744 | - // a pod to test the service. |
| 745 | - ginkgo.By("hitting the internal load balancer from pod") |
| 746 | - framework.Logf("creating pod with host network") |
| 747 | - hostExec := launchHostExecPod(f.ClientSet, f.Namespace.Name, "ilb-host-exec") |
| 748 | - |
| 749 | - framework.Logf("Waiting up to %v for service %q's internal LB to respond to requests", createTimeout, serviceName) |
| 750 | - tcpIngressIP := e2eservice.GetIngressPoint(lbIngress) |
| 751 | - if pollErr := wait.PollImmediate(pollInterval, createTimeout, func() (bool, error) { |
| 752 | - cmd := fmt.Sprintf(`curl -m 5 'http://%v:%v/echo?msg=hello'`, tcpIngressIP, svcPort) |
| 753 | - stdout, err := framework.RunHostCmd(hostExec.Namespace, hostExec.Name, cmd) |
| 754 | - if err != nil { |
| 755 | - framework.Logf("error curling; stdout: %v. err: %v", stdout, err) |
| 756 | - return false, nil |
| 757 | - } |
| 758 | - |
| 759 | - if !strings.Contains(stdout, "hello") { |
| 760 | - framework.Logf("Expected output to contain 'hello', got %q; retrying...", stdout) |
| 761 | - return false, nil |
| 762 | - } |
| 763 | - |
| 764 | - framework.Logf("Successful curl; stdout: %v", stdout) |
| 765 | - return true, nil |
| 766 | - }); pollErr != nil { |
| 767 | - framework.Failf("ginkgo.Failed to hit ILB IP, err: %v", pollErr) |
| 768 | - } |
| 769 | - |
| 770 | - ginkgo.By("switching to external type LoadBalancer") |
| 771 | - svc, err = jig.UpdateService(func(svc *v1.Service) { |
| 772 | - disableILB(svc) |
| 773 | - }) |
| 774 | - framework.ExpectNoError(err) |
| 775 | - framework.Logf("Waiting up to %v for service %q to have an external LoadBalancer", createTimeout, serviceName) |
| 776 | - if pollErr := wait.PollImmediate(pollInterval, createTimeout, func() (bool, error) { |
| 777 | - svc, err := cs.CoreV1().Services(namespace).Get(context.TODO(), serviceName, metav1.GetOptions{}) |
| 778 | - if err != nil { |
| 779 | - return false, err |
| 780 | - } |
| 781 | - lbIngress = &svc.Status.LoadBalancer.Ingress[0] |
| 782 | - return !isInternalEndpoint(lbIngress), nil |
| 783 | - }); pollErr != nil { |
| 784 | - framework.Failf("Loadbalancer IP not changed to external.") |
| 785 | - } |
| 786 | - // should have an external IP. |
| 787 | - gomega.Expect(isInternalEndpoint(lbIngress)).To(gomega.BeFalse()) |
| 788 | - |
| 789 | - ginkgo.By("hitting the external load balancer") |
| 790 | - framework.Logf("Waiting up to %v for service %q's external LB to respond to requests", createTimeout, serviceName) |
| 791 | - tcpIngressIP = e2eservice.GetIngressPoint(lbIngress) |
| 792 | - e2eservice.TestReachableHTTP(tcpIngressIP, svcPort, e2eservice.LoadBalancerLagTimeoutDefault) |
| 793 | - |
| 794 | - // GCE cannot test a specific IP because the test may not own it. This cloud specific condition |
| 795 | - // will be removed when GCP supports similar functionality. |
| 796 | - if framework.ProviderIs("azure") { |
| 797 | - ginkgo.By("switching back to interal type LoadBalancer, with static IP specified.") |
| 798 | - internalStaticIP := "10.240.11.11" |
| 799 | - svc, err = jig.UpdateService(func(svc *v1.Service) { |
| 800 | - svc.Spec.LoadBalancerIP = internalStaticIP |
| 801 | - enableILB(svc) |
| 802 | - }) |
| 803 | - framework.ExpectNoError(err) |
| 804 | - framework.Logf("Waiting up to %v for service %q to have an internal LoadBalancer", createTimeout, serviceName) |
| 805 | - if pollErr := wait.PollImmediate(pollInterval, createTimeout, func() (bool, error) { |
| 806 | - svc, err := cs.CoreV1().Services(namespace).Get(context.TODO(), serviceName, metav1.GetOptions{}) |
| 807 | - if err != nil { |
| 808 | - return false, err |
| 809 | - } |
| 810 | - lbIngress = &svc.Status.LoadBalancer.Ingress[0] |
| 811 | - return isInternalEndpoint(lbIngress), nil |
| 812 | - }); pollErr != nil { |
| 813 | - framework.Failf("Loadbalancer IP not changed to internal.") |
| 814 | - } |
| 815 | - // should have the given static internal IP. |
| 816 | - framework.ExpectEqual(e2eservice.GetIngressPoint(lbIngress), internalStaticIP) |
| 817 | - } |
| 818 | - }) |
| 819 | - |
| 820 | - // This test creates a load balancer, make sure its health check interval |
| 821 | - // equals to gceHcCheckIntervalSeconds. Then the interval is manipulated |
| 822 | - // to be something else, see if the interval will be reconciled. |
| 823 | - ginkgo.It("should reconcile LB health check interval [Slow][Serial]", func() { |
| 824 | - const gceHcCheckIntervalSeconds = int64(8) |
| 825 | - // This test is for clusters on GCE. |
| 826 | - // (It restarts kube-controller-manager, which we don't support on GKE) |
| 827 | - e2eskipper.SkipUnlessProviderIs("gce") |
| 828 | - e2eskipper.SkipUnlessSSHKeyPresent() |
| 829 | - |
| 830 | - clusterID, err := gce.GetClusterID(cs) |
| 831 | - if err != nil { |
| 832 | - framework.Failf("framework.GetClusterID(cs) = _, %v; want nil", err) |
| 833 | - } |
| 834 | - gceCloud, err := gce.GetGCECloud() |
| 835 | - if err != nil { |
| 836 | - framework.Failf("framework.GetGCECloud() = _, %v; want nil", err) |
| 837 | - } |
| 838 | - |
| 839 | - namespace := f.Namespace.Name |
| 840 | - serviceName := "lb-hc-int" |
| 841 | - jig := e2eservice.NewTestJig(cs, namespace, serviceName) |
| 842 | - |
| 843 | - ginkgo.By("create load balancer service") |
| 844 | - // Create loadbalancer service with source range from node[0] and podAccept |
| 845 | - svc, err := jig.CreateTCPService(func(svc *v1.Service) { |
| 846 | - svc.Spec.Type = v1.ServiceTypeLoadBalancer |
| 847 | - }) |
| 848 | - framework.ExpectNoError(err) |
| 849 | - |
| 850 | - defer func() { |
| 851 | - ginkgo.By("Clean up loadbalancer service") |
| 852 | - e2eservice.WaitForServiceDeletedWithFinalizer(cs, svc.Namespace, svc.Name) |
| 853 | - }() |
| 854 | - |
| 855 | - svc, err = jig.WaitForLoadBalancer(e2eservice.GetServiceLoadBalancerCreationTimeout(cs)) |
| 856 | - framework.ExpectNoError(err) |
| 857 | - |
| 858 | - hcName := gcecloud.MakeNodesHealthCheckName(clusterID) |
| 859 | - hc, err := gceCloud.GetHTTPHealthCheck(hcName) |
| 860 | - if err != nil { |
| 861 | - framework.Failf("gceCloud.GetHttpHealthCheck(%q) = _, %v; want nil", hcName, err) |
| 862 | - } |
| 863 | - framework.ExpectEqual(hc.CheckIntervalSec, gceHcCheckIntervalSeconds) |
| 864 | - |
| 865 | - ginkgo.By("modify the health check interval") |
| 866 | - hc.CheckIntervalSec = gceHcCheckIntervalSeconds - 1 |
| 867 | - if err = gceCloud.UpdateHTTPHealthCheck(hc); err != nil { |
| 868 | - framework.Failf("gcecloud.UpdateHttpHealthCheck(%#v) = %v; want nil", hc, err) |
| 869 | - } |
| 870 | - |
| 871 | - ginkgo.By("restart kube-controller-manager") |
| 872 | - if err := e2ekubesystem.RestartControllerManager(); err != nil { |
| 873 | - framework.Failf("e2ekubesystem.RestartControllerManager() = %v; want nil", err) |
| 874 | - } |
| 875 | - if err := e2ekubesystem.WaitForControllerManagerUp(); err != nil { |
| 876 | - framework.Failf("e2ekubesystem.WaitForControllerManagerUp() = %v; want nil", err) |
| 877 | - } |
| 878 | - |
| 879 | - ginkgo.By("health check should be reconciled") |
| 880 | - pollInterval := framework.Poll * 10 |
| 881 | - loadBalancerPropagationTimeout := e2eservice.GetServiceLoadBalancerPropagationTimeout(cs) |
| 882 | - if pollErr := wait.PollImmediate(pollInterval, loadBalancerPropagationTimeout, func() (bool, error) { |
| 883 | - hc, err := gceCloud.GetHTTPHealthCheck(hcName) |
| 884 | - if err != nil { |
| 885 | - framework.Logf("ginkgo.Failed to get HttpHealthCheck(%q): %v", hcName, err) |
| 886 | - return false, err |
| 887 | - } |
| 888 | - framework.Logf("hc.CheckIntervalSec = %v", hc.CheckIntervalSec) |
| 889 | - return hc.CheckIntervalSec == gceHcCheckIntervalSeconds, nil |
| 890 | - }); pollErr != nil { |
| 891 | - framework.Failf("Health check %q does not reconcile its check interval to %d.", hcName, gceHcCheckIntervalSeconds) |
| 892 | - } |
| 893 | - }) |
| 894 | - |
| 895 | /* |
| 896 | Release: v1.19 |
| 897 | Testname: Service, ClusterIP type, session affinity to ClientIP |
| 898 | @@ -2880,350 +2312,6 @@ var _ = SIGDescribe("Services", func() { |
| 899 | }) |
| 900 | }) |
| 901 | |
| 902 | -var _ = SIGDescribe("ESIPP [Slow]", func() { |
| 903 | - f := framework.NewDefaultFramework("esipp") |
| 904 | - var loadBalancerCreateTimeout time.Duration |
| 905 | - |
| 906 | - var cs clientset.Interface |
| 907 | - serviceLBNames := []string{} |
| 908 | - |
| 909 | - ginkgo.BeforeEach(func() { |
| 910 | - // requires cloud load-balancer support - this feature currently supported only on GCE/GKE |
| 911 | - e2eskipper.SkipUnlessProviderIs("gce", "gke") |
| 912 | - |
| 913 | - cs = f.ClientSet |
| 914 | - loadBalancerCreateTimeout = e2eservice.GetServiceLoadBalancerCreationTimeout(cs) |
| 915 | - }) |
| 916 | - |
| 917 | - ginkgo.AfterEach(func() { |
| 918 | - if ginkgo.CurrentGinkgoTestDescription().Failed { |
| 919 | - DescribeSvc(f.Namespace.Name) |
| 920 | - } |
| 921 | - for _, lb := range serviceLBNames { |
| 922 | - framework.Logf("cleaning load balancer resource for %s", lb) |
| 923 | - e2eservice.CleanupServiceResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone) |
| 924 | - } |
| 925 | - //reset serviceLBNames |
| 926 | - serviceLBNames = []string{} |
| 927 | - }) |
| 928 | - |
| 929 | - ginkgo.It("should work for type=LoadBalancer", func() { |
| 930 | - namespace := f.Namespace.Name |
| 931 | - serviceName := "external-local-lb" |
| 932 | - jig := e2eservice.NewTestJig(cs, namespace, serviceName) |
| 933 | - |
| 934 | - svc, err := jig.CreateOnlyLocalLoadBalancerService(loadBalancerCreateTimeout, true, nil) |
| 935 | - framework.ExpectNoError(err) |
| 936 | - serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc)) |
| 937 | - healthCheckNodePort := int(svc.Spec.HealthCheckNodePort) |
| 938 | - if healthCheckNodePort == 0 { |
| 939 | - framework.Failf("Service HealthCheck NodePort was not allocated") |
| 940 | - } |
| 941 | - defer func() { |
| 942 | - err = jig.ChangeServiceType(v1.ServiceTypeClusterIP, loadBalancerCreateTimeout) |
| 943 | - framework.ExpectNoError(err) |
| 944 | - |
| 945 | - // Make sure we didn't leak the health check node port. |
| 946 | - threshold := 2 |
| 947 | - nodes, err := jig.GetEndpointNodes() |
| 948 | - framework.ExpectNoError(err) |
| 949 | - for _, ips := range nodes { |
| 950 | - err := TestHTTPHealthCheckNodePort(ips[0], healthCheckNodePort, "/healthz", e2eservice.KubeProxyEndpointLagTimeout, false, threshold) |
| 951 | - framework.ExpectNoError(err) |
| 952 | - } |
| 953 | - err = cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{}) |
| 954 | - framework.ExpectNoError(err) |
| 955 | - }() |
| 956 | - |
| 957 | - svcTCPPort := int(svc.Spec.Ports[0].Port) |
| 958 | - ingressIP := e2eservice.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0]) |
| 959 | - |
| 960 | - ginkgo.By("reading clientIP using the TCP service's service port via its external VIP") |
| 961 | - content := GetHTTPContent(ingressIP, svcTCPPort, e2eservice.KubeProxyLagTimeout, "/clientip") |
| 962 | - clientIP := content.String() |
| 963 | - framework.Logf("ClientIP detected by target pod using VIP:SvcPort is %s", clientIP) |
| 964 | - |
| 965 | - ginkgo.By("checking if Source IP is preserved") |
| 966 | - if strings.HasPrefix(clientIP, "10.") { |
| 967 | - framework.Failf("Source IP was NOT preserved") |
| 968 | - } |
| 969 | - }) |
| 970 | - |
| 971 | - ginkgo.It("should work for type=NodePort", func() { |
| 972 | - namespace := f.Namespace.Name |
| 973 | - serviceName := "external-local-nodeport" |
| 974 | - jig := e2eservice.NewTestJig(cs, namespace, serviceName) |
| 975 | - |
| 976 | - svc, err := jig.CreateOnlyLocalNodePortService(true) |
| 977 | - framework.ExpectNoError(err) |
| 978 | - defer func() { |
| 979 | - err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{}) |
| 980 | - framework.ExpectNoError(err) |
| 981 | - }() |
| 982 | - |
| 983 | - tcpNodePort := int(svc.Spec.Ports[0].NodePort) |
| 984 | - endpointsNodeMap, err := jig.GetEndpointNodes() |
| 985 | - framework.ExpectNoError(err) |
| 986 | - path := "/clientip" |
| 987 | - |
| 988 | - for nodeName, nodeIPs := range endpointsNodeMap { |
| 989 | - nodeIP := nodeIPs[0] |
| 990 | - ginkgo.By(fmt.Sprintf("reading clientIP using the TCP service's NodePort, on node %v: %v%v%v", nodeName, nodeIP, tcpNodePort, path)) |
| 991 | - content := GetHTTPContent(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout, path) |
| 992 | - clientIP := content.String() |
| 993 | - framework.Logf("ClientIP detected by target pod using NodePort is %s", clientIP) |
| 994 | - if strings.HasPrefix(clientIP, "10.") { |
| 995 | - framework.Failf("Source IP was NOT preserved") |
| 996 | - } |
| 997 | - } |
| 998 | - }) |
| 999 | - |
| 1000 | - ginkgo.It("should only target nodes with endpoints", func() { |
| 1001 | - namespace := f.Namespace.Name |
| 1002 | - serviceName := "external-local-nodes" |
| 1003 | - jig := e2eservice.NewTestJig(cs, namespace, serviceName) |
| 1004 | - nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, e2eservice.MaxNodesForEndpointsTests) |
| 1005 | - framework.ExpectNoError(err) |
| 1006 | - |
| 1007 | - svc, err := jig.CreateOnlyLocalLoadBalancerService(loadBalancerCreateTimeout, false, |
| 1008 | - func(svc *v1.Service) { |
| 1009 | - // Change service port to avoid collision with opened hostPorts |
| 1010 | - // in other tests that run in parallel. |
| 1011 | - if len(svc.Spec.Ports) != 0 { |
| 1012 | - svc.Spec.Ports[0].TargetPort = intstr.FromInt(int(svc.Spec.Ports[0].Port)) |
| 1013 | - svc.Spec.Ports[0].Port = 8081 |
| 1014 | - } |
| 1015 | - |
| 1016 | - }) |
| 1017 | - framework.ExpectNoError(err) |
| 1018 | - serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc)) |
| 1019 | - defer func() { |
| 1020 | - err = jig.ChangeServiceType(v1.ServiceTypeClusterIP, loadBalancerCreateTimeout) |
| 1021 | - framework.ExpectNoError(err) |
| 1022 | - err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{}) |
| 1023 | - framework.ExpectNoError(err) |
| 1024 | - }() |
| 1025 | - |
| 1026 | - healthCheckNodePort := int(svc.Spec.HealthCheckNodePort) |
| 1027 | - if healthCheckNodePort == 0 { |
| 1028 | - framework.Failf("Service HealthCheck NodePort was not allocated") |
| 1029 | - } |
| 1030 | - |
| 1031 | - ips := e2enode.CollectAddresses(nodes, v1.NodeExternalIP) |
| 1032 | - |
| 1033 | - ingressIP := e2eservice.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0]) |
| 1034 | - svcTCPPort := int(svc.Spec.Ports[0].Port) |
| 1035 | - |
| 1036 | - threshold := 2 |
| 1037 | - path := "/healthz" |
| 1038 | - for i := 0; i < len(nodes.Items); i++ { |
| 1039 | - endpointNodeName := nodes.Items[i].Name |
| 1040 | - |
| 1041 | - ginkgo.By("creating a pod to be part of the service " + serviceName + " on node " + endpointNodeName) |
| 1042 | - _, err = jig.Run(func(rc *v1.ReplicationController) { |
| 1043 | - rc.Name = serviceName |
| 1044 | - if endpointNodeName != "" { |
| 1045 | - rc.Spec.Template.Spec.NodeName = endpointNodeName |
| 1046 | - } |
| 1047 | - }) |
| 1048 | - framework.ExpectNoError(err) |
| 1049 | - |
| 1050 | - ginkgo.By(fmt.Sprintf("waiting for service endpoint on node %v", endpointNodeName)) |
| 1051 | - err = jig.WaitForEndpointOnNode(endpointNodeName) |
| 1052 | - framework.ExpectNoError(err) |
| 1053 | - |
| 1054 | - // HealthCheck should pass only on the node where num(endpoints) > 0 |
| 1055 | - // All other nodes should fail the healthcheck on the service healthCheckNodePort |
| 1056 | - for n, publicIP := range ips { |
| 1057 | - // Make sure the loadbalancer picked up the health check change. |
| 1058 | - // Confirm traffic can reach backend through LB before checking healthcheck nodeport. |
| 1059 | - e2eservice.TestReachableHTTP(ingressIP, svcTCPPort, e2eservice.KubeProxyLagTimeout) |
| 1060 | - expectedSuccess := nodes.Items[n].Name == endpointNodeName |
| 1061 | - port := strconv.Itoa(healthCheckNodePort) |
| 1062 | - ipPort := net.JoinHostPort(publicIP, port) |
| 1063 | - framework.Logf("Health checking %s, http://%s%s, expectedSuccess %v", nodes.Items[n].Name, ipPort, path, expectedSuccess) |
| 1064 | - err := TestHTTPHealthCheckNodePort(publicIP, healthCheckNodePort, path, e2eservice.KubeProxyEndpointLagTimeout, expectedSuccess, threshold) |
| 1065 | - framework.ExpectNoError(err) |
| 1066 | - } |
| 1067 | - framework.ExpectNoError(e2erc.DeleteRCAndWaitForGC(f.ClientSet, namespace, serviceName)) |
| 1068 | - } |
| 1069 | - }) |
| 1070 | - |
| 1071 | - ginkgo.It("should work from pods", func() { |
| 1072 | - var err error |
| 1073 | - namespace := f.Namespace.Name |
| 1074 | - serviceName := "external-local-pods" |
| 1075 | - jig := e2eservice.NewTestJig(cs, namespace, serviceName) |
| 1076 | - |
| 1077 | - svc, err := jig.CreateOnlyLocalLoadBalancerService(loadBalancerCreateTimeout, true, nil) |
| 1078 | - framework.ExpectNoError(err) |
| 1079 | - serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc)) |
| 1080 | - defer func() { |
| 1081 | - err = jig.ChangeServiceType(v1.ServiceTypeClusterIP, loadBalancerCreateTimeout) |
| 1082 | - framework.ExpectNoError(err) |
| 1083 | - err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{}) |
| 1084 | - framework.ExpectNoError(err) |
| 1085 | - }() |
| 1086 | - |
| 1087 | - ingressIP := e2eservice.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0]) |
| 1088 | - port := strconv.Itoa(int(svc.Spec.Ports[0].Port)) |
| 1089 | - ipPort := net.JoinHostPort(ingressIP, port) |
| 1090 | - path := fmt.Sprintf("%s/clientip", ipPort) |
| 1091 | - |
| 1092 | - ginkgo.By("Creating pause pod deployment to make sure, pausePods are in desired state") |
| 1093 | - deployment := createPausePodDeployment(cs, "pause-pod-deployment", namespace, 1) |
| 1094 | - framework.ExpectNoError(e2edeployment.WaitForDeploymentComplete(cs, deployment), "Failed to complete pause pod deployment") |
| 1095 | - |
| 1096 | - defer func() { |
| 1097 | - framework.Logf("Deleting deployment") |
| 1098 | - err = cs.AppsV1().Deployments(namespace).Delete(context.TODO(), deployment.Name, metav1.DeleteOptions{}) |
| 1099 | - framework.ExpectNoError(err, "Failed to delete deployment %s", deployment.Name) |
| 1100 | - }() |
| 1101 | - |
| 1102 | - deployment, err = cs.AppsV1().Deployments(namespace).Get(context.TODO(), deployment.Name, metav1.GetOptions{}) |
| 1103 | - framework.ExpectNoError(err, "Error in retrieving pause pod deployment") |
| 1104 | - labelSelector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) |
| 1105 | - framework.ExpectNoError(err, "Error in setting LabelSelector as selector from deployment") |
| 1106 | - |
| 1107 | - pausePods, err := cs.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector.String()}) |
| 1108 | - framework.ExpectNoError(err, "Error in listing pods associated with pause pod deployments") |
| 1109 | - |
| 1110 | - pausePod := pausePods.Items[0] |
| 1111 | - framework.Logf("Waiting up to %v curl %v", e2eservice.KubeProxyLagTimeout, path) |
| 1112 | - cmd := fmt.Sprintf(`curl -q -s --connect-timeout 30 %v`, path) |
| 1113 | - |
| 1114 | - var srcIP string |
| 1115 | - loadBalancerPropagationTimeout := e2eservice.GetServiceLoadBalancerPropagationTimeout(cs) |
| 1116 | - ginkgo.By(fmt.Sprintf("Hitting external lb %v from pod %v on node %v", ingressIP, pausePod.Name, pausePod.Spec.NodeName)) |
| 1117 | - if pollErr := wait.PollImmediate(framework.Poll, loadBalancerPropagationTimeout, func() (bool, error) { |
| 1118 | - stdout, err := framework.RunHostCmd(pausePod.Namespace, pausePod.Name, cmd) |
| 1119 | - if err != nil { |
| 1120 | - framework.Logf("got err: %v, retry until timeout", err) |
| 1121 | - return false, nil |
| 1122 | - } |
| 1123 | - srcIP = strings.TrimSpace(strings.Split(stdout, ":")[0]) |
| 1124 | - return srcIP == pausePod.Status.PodIP, nil |
| 1125 | - }); pollErr != nil { |
| 1126 | - framework.Failf("Source IP not preserved from %v, expected '%v' got '%v'", pausePod.Name, pausePod.Status.PodIP, srcIP) |
| 1127 | - } |
| 1128 | - }) |
| 1129 | - |
| 1130 | - // TODO: Get rid of [DisabledForLargeClusters] tag when issue #90047 is fixed. |
| 1131 | - ginkgo.It("should handle updates to ExternalTrafficPolicy field [DisabledForLargeClusters]", func() { |
| 1132 | - namespace := f.Namespace.Name |
| 1133 | - serviceName := "external-local-update" |
| 1134 | - jig := e2eservice.NewTestJig(cs, namespace, serviceName) |
| 1135 | - |
| 1136 | - nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, e2eservice.MaxNodesForEndpointsTests) |
| 1137 | - framework.ExpectNoError(err) |
| 1138 | - if len(nodes.Items) < 2 { |
| 1139 | - framework.Failf("Need at least 2 nodes to verify source ip from a node without endpoint") |
| 1140 | - } |
| 1141 | - |
| 1142 | - svc, err := jig.CreateOnlyLocalLoadBalancerService(loadBalancerCreateTimeout, true, nil) |
| 1143 | - framework.ExpectNoError(err) |
| 1144 | - serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc)) |
| 1145 | - defer func() { |
| 1146 | - err = jig.ChangeServiceType(v1.ServiceTypeClusterIP, loadBalancerCreateTimeout) |
| 1147 | - framework.ExpectNoError(err) |
| 1148 | - err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{}) |
| 1149 | - framework.ExpectNoError(err) |
| 1150 | - }() |
| 1151 | - |
| 1152 | - // save the health check node port because it disappears when ESIPP is turned off. |
| 1153 | - healthCheckNodePort := int(svc.Spec.HealthCheckNodePort) |
| 1154 | - |
| 1155 | - ginkgo.By("turning ESIPP off") |
| 1156 | - svc, err = jig.UpdateService(func(svc *v1.Service) { |
| 1157 | - svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeCluster |
| 1158 | - }) |
| 1159 | - framework.ExpectNoError(err) |
| 1160 | - if svc.Spec.HealthCheckNodePort > 0 { |
| 1161 | - framework.Failf("Service HealthCheck NodePort still present") |
| 1162 | - } |
| 1163 | - |
| 1164 | - endpointNodeMap, err := jig.GetEndpointNodes() |
| 1165 | - framework.ExpectNoError(err) |
| 1166 | - noEndpointNodeMap := map[string][]string{} |
| 1167 | - for _, n := range nodes.Items { |
| 1168 | - if _, ok := endpointNodeMap[n.Name]; ok { |
| 1169 | - continue |
| 1170 | - } |
| 1171 | - noEndpointNodeMap[n.Name] = e2enode.GetAddresses(&n, v1.NodeExternalIP) |
| 1172 | - } |
| 1173 | - |
| 1174 | - svcTCPPort := int(svc.Spec.Ports[0].Port) |
| 1175 | - svcNodePort := int(svc.Spec.Ports[0].NodePort) |
| 1176 | - ingressIP := e2eservice.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0]) |
| 1177 | - path := "/clientip" |
| 1178 | - |
| 1179 | - ginkgo.By(fmt.Sprintf("endpoints present on nodes %v, absent on nodes %v", endpointNodeMap, noEndpointNodeMap)) |
| 1180 | - for nodeName, nodeIPs := range noEndpointNodeMap { |
| 1181 | - ginkgo.By(fmt.Sprintf("Checking %v (%v:%v%v) proxies to endpoints on another node", nodeName, nodeIPs[0], svcNodePort, path)) |
| 1182 | - GetHTTPContent(nodeIPs[0], svcNodePort, e2eservice.KubeProxyLagTimeout, path) |
| 1183 | - } |
| 1184 | - |
| 1185 | - for nodeName, nodeIPs := range endpointNodeMap { |
| 1186 | - ginkgo.By(fmt.Sprintf("checking kube-proxy health check fails on node with endpoint (%s), public IP %s", nodeName, nodeIPs[0])) |
| 1187 | - var body bytes.Buffer |
| 1188 | - pollfn := func() (bool, error) { |
| 1189 | - result := e2enetwork.PokeHTTP(nodeIPs[0], healthCheckNodePort, "/healthz", nil) |
| 1190 | - if result.Code == 0 { |
| 1191 | - return true, nil |
| 1192 | - } |
| 1193 | - body.Reset() |
| 1194 | - body.Write(result.Body) |
| 1195 | - return false, nil |
| 1196 | - } |
| 1197 | - if pollErr := wait.PollImmediate(framework.Poll, e2eservice.TestTimeout, pollfn); pollErr != nil { |
| 1198 | - framework.Failf("Kube-proxy still exposing health check on node %v:%v, after ESIPP was turned off. body %s", |
| 1199 | - nodeName, healthCheckNodePort, body.String()) |
| 1200 | - } |
| 1201 | - } |
| 1202 | - |
| 1203 | - // Poll till kube-proxy re-adds the MASQUERADE rule on the node. |
| 1204 | - ginkgo.By(fmt.Sprintf("checking source ip is NOT preserved through loadbalancer %v", ingressIP)) |
| 1205 | - var clientIP string |
| 1206 | - pollErr := wait.PollImmediate(framework.Poll, e2eservice.KubeProxyLagTimeout, func() (bool, error) { |
| 1207 | - content := GetHTTPContent(ingressIP, svcTCPPort, e2eservice.KubeProxyLagTimeout, "/clientip") |
| 1208 | - clientIP = content.String() |
| 1209 | - if strings.HasPrefix(clientIP, "10.") { |
| 1210 | - return true, nil |
| 1211 | - } |
| 1212 | - return false, nil |
| 1213 | - }) |
| 1214 | - if pollErr != nil { |
| 1215 | - framework.Failf("Source IP WAS preserved even after ESIPP turned off. Got %v, expected a ten-dot cluster ip.", clientIP) |
| 1216 | - } |
| 1217 | - |
| 1218 | - // TODO: We need to attempt to create another service with the previously |
| 1219 | - // allocated healthcheck nodePort. If the health check nodePort has been |
| 1220 | - // freed, the new service creation will succeed, upon which we cleanup. |
| 1221 | - // If the health check nodePort has NOT been freed, the new service |
| 1222 | - // creation will fail. |
| 1223 | - |
| 1224 | - ginkgo.By("setting ExternalTraffic field back to OnlyLocal") |
| 1225 | - svc, err = jig.UpdateService(func(svc *v1.Service) { |
| 1226 | - svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal |
| 1227 | - // Request the same healthCheckNodePort as before, to test the user-requested allocation path |
| 1228 | - svc.Spec.HealthCheckNodePort = int32(healthCheckNodePort) |
| 1229 | - }) |
| 1230 | - framework.ExpectNoError(err) |
| 1231 | - pollErr = wait.PollImmediate(framework.Poll, e2eservice.KubeProxyLagTimeout, func() (bool, error) { |
| 1232 | - content := GetHTTPContent(ingressIP, svcTCPPort, e2eservice.KubeProxyLagTimeout, path) |
| 1233 | - clientIP = content.String() |
| 1234 | - ginkgo.By(fmt.Sprintf("Endpoint %v:%v%v returned client ip %v", ingressIP, svcTCPPort, path, clientIP)) |
| 1235 | - if !strings.HasPrefix(clientIP, "10.") { |
| 1236 | - return true, nil |
| 1237 | - } |
| 1238 | - return false, nil |
| 1239 | - }) |
| 1240 | - if pollErr != nil { |
| 1241 | - framework.Failf("Source IP (%v) is not the client IP even after ESIPP turned on, expected a public IP.", clientIP) |
| 1242 | - } |
| 1243 | - }) |
| 1244 | -}) |
| 1245 | - |
| 1246 | func execSourceipTest(pausePod v1.Pod, serviceAddress string) (string, string) { |
| 1247 | var err error |
| 1248 | var stdout string |
| 1249 | diff --git a/test/e2e/network/service_providers.go b/test/e2e/network/service_providers.go |
| 1250 | new file mode 100644 |
| 1251 | index 00000000000..b7eae6feb2c |
| 1252 | --- /dev/null |
| 1253 | +++ b/test/e2e/network/service_providers.go |
| 1254 | @@ -0,0 +1,980 @@ |
| 1255 | +// +build !providerless |
| 1256 | + |
| 1257 | +/* |
| 1258 | +Copyright 2020 The Kubernetes Authors. |
| 1259 | + |
| 1260 | +Licensed under the Apache License, Version 2.0 (the "License"); |
| 1261 | +you may not use this file except in compliance with the License. |
| 1262 | +You may obtain a copy of the License at |
| 1263 | + |
| 1264 | + http://www.apache.org/licenses/LICENSE-2.0 |
| 1265 | + |
| 1266 | +Unless required by applicable law or agreed to in writing, software |
| 1267 | +distributed under the License is distributed on an "AS IS" BASIS, |
| 1268 | +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 1269 | +See the License for the specific language governing permissions and |
| 1270 | +limitations under the License. |
| 1271 | +*/ |
| 1272 | + |
| 1273 | +package network |
| 1274 | + |
| 1275 | +import ( |
| 1276 | + "bytes" |
| 1277 | + "context" |
| 1278 | + "fmt" |
| 1279 | + "net" |
| 1280 | + "strconv" |
| 1281 | + "strings" |
| 1282 | + "time" |
| 1283 | + |
| 1284 | + compute "google.golang.org/api/compute/v1" |
| 1285 | + v1 "k8s.io/api/core/v1" |
| 1286 | + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" |
| 1287 | + "k8s.io/apimachinery/pkg/util/intstr" |
| 1288 | + "k8s.io/apimachinery/pkg/util/wait" |
| 1289 | + clientset "k8s.io/client-go/kubernetes" |
| 1290 | + cloudprovider "k8s.io/cloud-provider" |
| 1291 | + "k8s.io/kubernetes/test/e2e/framework" |
| 1292 | + e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment" |
| 1293 | + e2ekubesystem "k8s.io/kubernetes/test/e2e/framework/kubesystem" |
| 1294 | + e2enetwork "k8s.io/kubernetes/test/e2e/framework/network" |
| 1295 | + e2enode "k8s.io/kubernetes/test/e2e/framework/node" |
| 1296 | + "k8s.io/kubernetes/test/e2e/framework/providers/gce" |
| 1297 | + e2erc "k8s.io/kubernetes/test/e2e/framework/rc" |
| 1298 | + e2eservice "k8s.io/kubernetes/test/e2e/framework/service" |
| 1299 | + e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" |
| 1300 | + gcecloud "k8s.io/legacy-cloud-providers/gce" |
| 1301 | + |
| 1302 | + "github.com/onsi/ginkgo" |
| 1303 | + "github.com/onsi/gomega" |
| 1304 | +) |
| 1305 | + |
| 1306 | +var _ = SIGDescribe("Services with Cloud LoadBalancers", func() { |
| 1307 | + |
| 1308 | + f := framework.NewDefaultFramework("services") |
| 1309 | + |
| 1310 | + var cs clientset.Interface |
| 1311 | + serviceLBNames := []string{} |
| 1312 | + |
| 1313 | + ginkgo.BeforeEach(func() { |
| 1314 | + cs = f.ClientSet |
| 1315 | + }) |
| 1316 | + |
| 1317 | + ginkgo.AfterEach(func() { |
| 1318 | + if ginkgo.CurrentGinkgoTestDescription().Failed { |
| 1319 | + DescribeSvc(f.Namespace.Name) |
| 1320 | + } |
| 1321 | + for _, lb := range serviceLBNames { |
| 1322 | + framework.Logf("cleaning load balancer resource for %s", lb) |
| 1323 | + e2eservice.CleanupServiceResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone) |
| 1324 | + } |
| 1325 | + //reset serviceLBNames |
| 1326 | + serviceLBNames = []string{} |
| 1327 | + }) |
| 1328 | + |
| 1329 | + // TODO: Get rid of [DisabledForLargeClusters] tag when issue #56138 is fixed |
| 1330 | + ginkgo.It("should be able to change the type and ports of a service [Slow] [DisabledForLargeClusters]", func() { |
| 1331 | + // requires cloud load-balancer support |
| 1332 | + e2eskipper.SkipUnlessProviderIs("gce", "gke", "aws") |
| 1333 | + |
| 1334 | + loadBalancerSupportsUDP := !framework.ProviderIs("aws") |
| 1335 | + |
| 1336 | + loadBalancerLagTimeout := e2eservice.LoadBalancerLagTimeoutDefault |
| 1337 | + if framework.ProviderIs("aws") { |
| 1338 | + loadBalancerLagTimeout = e2eservice.LoadBalancerLagTimeoutAWS |
| 1339 | + } |
| 1340 | + loadBalancerCreateTimeout := e2eservice.GetServiceLoadBalancerCreationTimeout(cs) |
| 1341 | + |
| 1342 | + // This test is more monolithic than we'd like because LB turnup can be |
| 1343 | + // very slow, so we lumped all the tests into one LB lifecycle. |
| 1344 | + |
| 1345 | + serviceName := "mutability-test" |
| 1346 | + ns1 := f.Namespace.Name // LB1 in ns1 on TCP |
| 1347 | + framework.Logf("namespace for TCP test: %s", ns1) |
| 1348 | + |
| 1349 | + ginkgo.By("creating a second namespace") |
| 1350 | + namespacePtr, err := f.CreateNamespace("services", nil) |
| 1351 | + framework.ExpectNoError(err, "failed to create namespace") |
| 1352 | + ns2 := namespacePtr.Name // LB2 in ns2 on UDP |
| 1353 | + framework.Logf("namespace for UDP test: %s", ns2) |
| 1354 | + |
| 1355 | + nodeIP, err := e2enode.PickIP(cs) // for later |
| 1356 | + framework.ExpectNoError(err) |
| 1357 | + |
| 1358 | + // Test TCP and UDP Services. Services with the same name in different |
| 1359 | + // namespaces should get different node ports and load balancers. |
| 1360 | + |
| 1361 | + ginkgo.By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns1) |
| 1362 | + tcpJig := e2eservice.NewTestJig(cs, ns1, serviceName) |
| 1363 | + tcpService, err := tcpJig.CreateTCPService(nil) |
| 1364 | + framework.ExpectNoError(err) |
| 1365 | + |
| 1366 | + ginkgo.By("creating a UDP service " + serviceName + " with type=ClusterIP in namespace " + ns2) |
| 1367 | + udpJig := e2eservice.NewTestJig(cs, ns2, serviceName) |
| 1368 | + udpService, err := udpJig.CreateUDPService(nil) |
| 1369 | + framework.ExpectNoError(err) |
| 1370 | + |
| 1371 | + ginkgo.By("verifying that TCP and UDP use the same port") |
| 1372 | + if tcpService.Spec.Ports[0].Port != udpService.Spec.Ports[0].Port { |
| 1373 | + framework.Failf("expected to use the same port for TCP and UDP") |
| 1374 | + } |
| 1375 | + svcPort := int(tcpService.Spec.Ports[0].Port) |
| 1376 | + framework.Logf("service port (TCP and UDP): %d", svcPort) |
| 1377 | + |
| 1378 | + ginkgo.By("creating a pod to be part of the TCP service " + serviceName) |
| 1379 | + _, err = tcpJig.Run(nil) |
| 1380 | + framework.ExpectNoError(err) |
| 1381 | + |
| 1382 | + ginkgo.By("creating a pod to be part of the UDP service " + serviceName) |
| 1383 | + _, err = udpJig.Run(nil) |
| 1384 | + framework.ExpectNoError(err) |
| 1385 | + |
| 1386 | + // Change the services to NodePort. |
| 1387 | + |
| 1388 | + ginkgo.By("changing the TCP service to type=NodePort") |
| 1389 | + tcpService, err = tcpJig.UpdateService(func(s *v1.Service) { |
| 1390 | + s.Spec.Type = v1.ServiceTypeNodePort |
| 1391 | + }) |
| 1392 | + framework.ExpectNoError(err) |
| 1393 | + tcpNodePort := int(tcpService.Spec.Ports[0].NodePort) |
| 1394 | + framework.Logf("TCP node port: %d", tcpNodePort) |
| 1395 | + |
| 1396 | + ginkgo.By("changing the UDP service to type=NodePort") |
| 1397 | + udpService, err = udpJig.UpdateService(func(s *v1.Service) { |
| 1398 | + s.Spec.Type = v1.ServiceTypeNodePort |
| 1399 | + }) |
| 1400 | + framework.ExpectNoError(err) |
| 1401 | + udpNodePort := int(udpService.Spec.Ports[0].NodePort) |
| 1402 | + framework.Logf("UDP node port: %d", udpNodePort) |
| 1403 | + |
| 1404 | + ginkgo.By("hitting the TCP service's NodePort") |
| 1405 | + e2eservice.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout) |
| 1406 | + |
| 1407 | + ginkgo.By("hitting the UDP service's NodePort") |
| 1408 | + testReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout) |
| 1409 | + |
| 1410 | + // Change the services to LoadBalancer. |
| 1411 | + |
| 1412 | + // Here we test that LoadBalancers can receive static IP addresses. This isn't |
| 1413 | + // necessary, but is an additional feature this monolithic test checks. |
| 1414 | + requestedIP := "" |
| 1415 | + staticIPName := "" |
| 1416 | + if framework.ProviderIs("gce", "gke") { |
| 1417 | + ginkgo.By("creating a static load balancer IP") |
| 1418 | + staticIPName = fmt.Sprintf("e2e-external-lb-test-%s", framework.RunID) |
| 1419 | + gceCloud, err := gce.GetGCECloud() |
| 1420 | + framework.ExpectNoError(err, "failed to get GCE cloud provider") |
| 1421 | + |
| 1422 | + err = gceCloud.ReserveRegionAddress(&compute.Address{Name: staticIPName}, gceCloud.Region()) |
| 1423 | + defer func() { |
| 1424 | + if staticIPName != "" { |
| 1425 | + // Release GCE static IP - this is not kube-managed and will not be automatically released. |
| 1426 | + if err := gceCloud.DeleteRegionAddress(staticIPName, gceCloud.Region()); err != nil { |
| 1427 | + framework.Logf("failed to release static IP %s: %v", staticIPName, err) |
| 1428 | + } |
| 1429 | + } |
| 1430 | + }() |
| 1431 | + framework.ExpectNoError(err, "failed to create region address: %s", staticIPName) |
| 1432 | + reservedAddr, err := gceCloud.GetRegionAddress(staticIPName, gceCloud.Region()) |
| 1433 | + framework.ExpectNoError(err, "failed to get region address: %s", staticIPName) |
| 1434 | + |
| 1435 | + requestedIP = reservedAddr.Address |
| 1436 | + framework.Logf("Allocated static load balancer IP: %s", requestedIP) |
| 1437 | + } |
| 1438 | + |
| 1439 | + ginkgo.By("changing the TCP service to type=LoadBalancer") |
| 1440 | + tcpService, err = tcpJig.UpdateService(func(s *v1.Service) { |
| 1441 | + s.Spec.LoadBalancerIP = requestedIP // will be "" if not applicable |
| 1442 | + s.Spec.Type = v1.ServiceTypeLoadBalancer |
| 1443 | + }) |
| 1444 | + framework.ExpectNoError(err) |
| 1445 | + |
| 1446 | + if loadBalancerSupportsUDP { |
| 1447 | + ginkgo.By("changing the UDP service to type=LoadBalancer") |
| 1448 | + udpService, err = udpJig.UpdateService(func(s *v1.Service) { |
| 1449 | + s.Spec.Type = v1.ServiceTypeLoadBalancer |
| 1450 | + }) |
| 1451 | + framework.ExpectNoError(err) |
| 1452 | + } |
| 1453 | + serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(tcpService)) |
| 1454 | + if loadBalancerSupportsUDP { |
| 1455 | + serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(udpService)) |
| 1456 | + } |
| 1457 | + |
| 1458 | + ginkgo.By("waiting for the TCP service to have a load balancer") |
| 1459 | + // Wait for the load balancer to be created asynchronously |
| 1460 | + tcpService, err = tcpJig.WaitForLoadBalancer(loadBalancerCreateTimeout) |
| 1461 | + framework.ExpectNoError(err) |
| 1462 | + if int(tcpService.Spec.Ports[0].NodePort) != tcpNodePort { |
| 1463 | + framework.Failf("TCP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", tcpNodePort, tcpService.Spec.Ports[0].NodePort) |
| 1464 | + } |
| 1465 | + if requestedIP != "" && e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != requestedIP { |
| 1466 | + framework.Failf("unexpected TCP Status.LoadBalancer.Ingress (expected %s, got %s)", requestedIP, e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) |
| 1467 | + } |
| 1468 | + tcpIngressIP := e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) |
| 1469 | + framework.Logf("TCP load balancer: %s", tcpIngressIP) |
| 1470 | + |
| 1471 | + if framework.ProviderIs("gce", "gke") { |
| 1472 | + // Do this as early as possible, which overrides the `defer` above. |
| 1473 | + // This is mostly out of fear of leaking the IP in a timeout case |
| 1474 | + // (as of this writing we're not 100% sure where the leaks are |
| 1475 | + // coming from, so this is first-aid rather than surgery). |
| 1476 | + ginkgo.By("demoting the static IP to ephemeral") |
| 1477 | + if staticIPName != "" { |
| 1478 | + gceCloud, err := gce.GetGCECloud() |
| 1479 | + framework.ExpectNoError(err, "failed to get GCE cloud provider") |
| 1480 | + // Deleting it after it is attached "demotes" it to an |
| 1481 | + // ephemeral IP, which can be auto-released. |
| 1482 | + if err := gceCloud.DeleteRegionAddress(staticIPName, gceCloud.Region()); err != nil { |
| 1483 | + framework.Failf("failed to release static IP %s: %v", staticIPName, err) |
| 1484 | + } |
| 1485 | + staticIPName = "" |
| 1486 | + } |
| 1487 | + } |
| 1488 | + |
| 1489 | + var udpIngressIP string |
| 1490 | + if loadBalancerSupportsUDP { |
| 1491 | + ginkgo.By("waiting for the UDP service to have a load balancer") |
| 1492 | + // 2nd one should be faster since they ran in parallel. |
| 1493 | + udpService, err = udpJig.WaitForLoadBalancer(loadBalancerCreateTimeout) |
| 1494 | + framework.ExpectNoError(err) |
| 1495 | + if int(udpService.Spec.Ports[0].NodePort) != udpNodePort { |
| 1496 | + framework.Failf("UDP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", udpNodePort, udpService.Spec.Ports[0].NodePort) |
| 1497 | + } |
| 1498 | + udpIngressIP = e2eservice.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) |
| 1499 | + framework.Logf("UDP load balancer: %s", udpIngressIP) |
| 1500 | + |
| 1501 | + ginkgo.By("verifying that TCP and UDP use different load balancers") |
| 1502 | + if tcpIngressIP == udpIngressIP { |
| 1503 | + framework.Failf("Load balancers are not different: %s", e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) |
| 1504 | + } |
| 1505 | + } |
| 1506 | + |
| 1507 | + ginkgo.By("hitting the TCP service's NodePort") |
| 1508 | + e2eservice.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout) |
| 1509 | + |
| 1510 | + ginkgo.By("hitting the UDP service's NodePort") |
| 1511 | + testReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout) |
| 1512 | + |
| 1513 | + ginkgo.By("hitting the TCP service's LoadBalancer") |
| 1514 | + e2eservice.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout) |
| 1515 | + |
| 1516 | + if loadBalancerSupportsUDP { |
| 1517 | + ginkgo.By("hitting the UDP service's LoadBalancer") |
| 1518 | + testReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout) |
| 1519 | + } |
| 1520 | + |
| 1521 | + // Change the services' node ports. |
| 1522 | + |
| 1523 | + ginkgo.By("changing the TCP service's NodePort") |
| 1524 | + tcpService, err = tcpJig.ChangeServiceNodePort(tcpNodePort) |
| 1525 | + framework.ExpectNoError(err) |
| 1526 | + tcpNodePortOld := tcpNodePort |
| 1527 | + tcpNodePort = int(tcpService.Spec.Ports[0].NodePort) |
| 1528 | + if tcpNodePort == tcpNodePortOld { |
| 1529 | + framework.Failf("TCP Spec.Ports[0].NodePort (%d) did not change", tcpNodePort) |
| 1530 | + } |
| 1531 | + if e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != tcpIngressIP { |
| 1532 | + framework.Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) |
| 1533 | + } |
| 1534 | + framework.Logf("TCP node port: %d", tcpNodePort) |
| 1535 | + |
| 1536 | + ginkgo.By("changing the UDP service's NodePort") |
| 1537 | + udpService, err = udpJig.ChangeServiceNodePort(udpNodePort) |
| 1538 | + framework.ExpectNoError(err) |
| 1539 | + udpNodePortOld := udpNodePort |
| 1540 | + udpNodePort = int(udpService.Spec.Ports[0].NodePort) |
| 1541 | + if udpNodePort == udpNodePortOld { |
| 1542 | + framework.Failf("UDP Spec.Ports[0].NodePort (%d) did not change", udpNodePort) |
| 1543 | + } |
| 1544 | + if loadBalancerSupportsUDP && e2eservice.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) != udpIngressIP { |
| 1545 | + framework.Failf("UDP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", udpIngressIP, e2eservice.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0])) |
| 1546 | + } |
| 1547 | + framework.Logf("UDP node port: %d", udpNodePort) |
| 1548 | + |
| 1549 | + ginkgo.By("hitting the TCP service's new NodePort") |
| 1550 | + e2eservice.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout) |
| 1551 | + |
| 1552 | + ginkgo.By("hitting the UDP service's new NodePort") |
| 1553 | + testReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout) |
| 1554 | + |
| 1555 | + ginkgo.By("checking the old TCP NodePort is closed") |
| 1556 | + testNotReachableHTTP(nodeIP, tcpNodePortOld, e2eservice.KubeProxyLagTimeout) |
| 1557 | + |
| 1558 | + ginkgo.By("checking the old UDP NodePort is closed") |
| 1559 | + testNotReachableUDP(nodeIP, udpNodePortOld, e2eservice.KubeProxyLagTimeout) |
| 1560 | + |
| 1561 | + ginkgo.By("hitting the TCP service's LoadBalancer") |
| 1562 | + e2eservice.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout) |
| 1563 | + |
| 1564 | + if loadBalancerSupportsUDP { |
| 1565 | + ginkgo.By("hitting the UDP service's LoadBalancer") |
| 1566 | + testReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout) |
| 1567 | + } |
| 1568 | + |
| 1569 | + // Change the services' main ports. |
| 1570 | + |
| 1571 | + ginkgo.By("changing the TCP service's port") |
| 1572 | + tcpService, err = tcpJig.UpdateService(func(s *v1.Service) { |
| 1573 | + s.Spec.Ports[0].Port++ |
| 1574 | + }) |
| 1575 | + framework.ExpectNoError(err) |
| 1576 | + svcPortOld := svcPort |
| 1577 | + svcPort = int(tcpService.Spec.Ports[0].Port) |
| 1578 | + if svcPort == svcPortOld { |
| 1579 | + framework.Failf("TCP Spec.Ports[0].Port (%d) did not change", svcPort) |
| 1580 | + } |
| 1581 | + if int(tcpService.Spec.Ports[0].NodePort) != tcpNodePort { |
| 1582 | + framework.Failf("TCP Spec.Ports[0].NodePort (%d) changed", tcpService.Spec.Ports[0].NodePort) |
| 1583 | + } |
| 1584 | + if e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != tcpIngressIP { |
| 1585 | + framework.Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) |
| 1586 | + } |
| 1587 | + |
| 1588 | + ginkgo.By("changing the UDP service's port") |
| 1589 | + udpService, err = udpJig.UpdateService(func(s *v1.Service) { |
| 1590 | + s.Spec.Ports[0].Port++ |
| 1591 | + }) |
| 1592 | + framework.ExpectNoError(err) |
| 1593 | + if int(udpService.Spec.Ports[0].Port) != svcPort { |
| 1594 | + framework.Failf("UDP Spec.Ports[0].Port (%d) did not change", udpService.Spec.Ports[0].Port) |
| 1595 | + } |
| 1596 | + if int(udpService.Spec.Ports[0].NodePort) != udpNodePort { |
| 1597 | + framework.Failf("UDP Spec.Ports[0].NodePort (%d) changed", udpService.Spec.Ports[0].NodePort) |
| 1598 | + } |
| 1599 | + if loadBalancerSupportsUDP && e2eservice.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) != udpIngressIP { |
| 1600 | + framework.Failf("UDP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", udpIngressIP, e2eservice.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0])) |
| 1601 | + } |
| 1602 | + |
| 1603 | + framework.Logf("service port (TCP and UDP): %d", svcPort) |
| 1604 | + |
| 1605 | + ginkgo.By("hitting the TCP service's NodePort") |
| 1606 | + e2eservice.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout) |
| 1607 | + |
| 1608 | + ginkgo.By("hitting the UDP service's NodePort") |
| 1609 | + testReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout) |
| 1610 | + |
| 1611 | + ginkgo.By("hitting the TCP service's LoadBalancer") |
| 1612 | + e2eservice.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout) |
| 1613 | + |
| 1614 | + if loadBalancerSupportsUDP { |
| 1615 | + ginkgo.By("hitting the UDP service's LoadBalancer") |
| 1616 | + testReachableUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout) |
| 1617 | + } |
| 1618 | + |
| 1619 | + ginkgo.By("Scaling the pods to 0") |
| 1620 | + err = tcpJig.Scale(0) |
| 1621 | + framework.ExpectNoError(err) |
| 1622 | + err = udpJig.Scale(0) |
| 1623 | + framework.ExpectNoError(err) |
| 1624 | + |
| 1625 | + ginkgo.By("looking for ICMP REJECT on the TCP service's NodePort") |
| 1626 | + testRejectedHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout) |
| 1627 | + |
| 1628 | + ginkgo.By("looking for ICMP REJECT on the UDP service's NodePort") |
| 1629 | + testRejectedUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout) |
| 1630 | + |
| 1631 | + ginkgo.By("looking for ICMP REJECT on the TCP service's LoadBalancer") |
| 1632 | + testRejectedHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout) |
| 1633 | + |
| 1634 | + if loadBalancerSupportsUDP { |
| 1635 | + ginkgo.By("looking for ICMP REJECT on the UDP service's LoadBalancer") |
| 1636 | + testRejectedUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout) |
| 1637 | + } |
| 1638 | + |
| 1639 | + ginkgo.By("Scaling the pods to 1") |
| 1640 | + err = tcpJig.Scale(1) |
| 1641 | + framework.ExpectNoError(err) |
| 1642 | + err = udpJig.Scale(1) |
| 1643 | + framework.ExpectNoError(err) |
| 1644 | + |
| 1645 | + ginkgo.By("hitting the TCP service's NodePort") |
| 1646 | + e2eservice.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout) |
| 1647 | + |
| 1648 | + ginkgo.By("hitting the UDP service's NodePort") |
| 1649 | + testReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout) |
| 1650 | + |
| 1651 | + ginkgo.By("hitting the TCP service's LoadBalancer") |
| 1652 | + e2eservice.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout) |
| 1653 | + |
| 1654 | + if loadBalancerSupportsUDP { |
| 1655 | + ginkgo.By("hitting the UDP service's LoadBalancer") |
| 1656 | + testReachableUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout) |
| 1657 | + } |
| 1658 | + |
| 1659 | + // Change the services back to ClusterIP. |
| 1660 | + |
| 1661 | + ginkgo.By("changing TCP service back to type=ClusterIP") |
| 1662 | + _, err = tcpJig.UpdateService(func(s *v1.Service) { |
| 1663 | + s.Spec.Type = v1.ServiceTypeClusterIP |
| 1664 | + s.Spec.Ports[0].NodePort = 0 |
| 1665 | + }) |
| 1666 | + framework.ExpectNoError(err) |
| 1667 | + // Wait for the load balancer to be destroyed asynchronously |
| 1668 | + _, err = tcpJig.WaitForLoadBalancerDestroy(tcpIngressIP, svcPort, loadBalancerCreateTimeout) |
| 1669 | + framework.ExpectNoError(err) |
| 1670 | + |
| 1671 | + ginkgo.By("changing UDP service back to type=ClusterIP") |
| 1672 | + _, err = udpJig.UpdateService(func(s *v1.Service) { |
| 1673 | + s.Spec.Type = v1.ServiceTypeClusterIP |
| 1674 | + s.Spec.Ports[0].NodePort = 0 |
| 1675 | + }) |
| 1676 | + framework.ExpectNoError(err) |
| 1677 | + if loadBalancerSupportsUDP { |
| 1678 | + // Wait for the load balancer to be destroyed asynchronously |
| 1679 | + _, err = udpJig.WaitForLoadBalancerDestroy(udpIngressIP, svcPort, loadBalancerCreateTimeout) |
| 1680 | + framework.ExpectNoError(err) |
| 1681 | + } |
| 1682 | + |
| 1683 | + ginkgo.By("checking the TCP NodePort is closed") |
| 1684 | + testNotReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout) |
| 1685 | + |
| 1686 | + ginkgo.By("checking the UDP NodePort is closed") |
| 1687 | + testNotReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout) |
| 1688 | + |
| 1689 | + ginkgo.By("checking the TCP LoadBalancer is closed") |
| 1690 | + testNotReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout) |
| 1691 | + |
| 1692 | + if loadBalancerSupportsUDP { |
| 1693 | + ginkgo.By("checking the UDP LoadBalancer is closed") |
| 1694 | + testNotReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout) |
| 1695 | + } |
| 1696 | + }) |
| 1697 | + |
| 1698 | + ginkgo.It("should be able to create an internal type load balancer [Slow]", func() { |
| 1699 | + e2eskipper.SkipUnlessProviderIs("azure", "gke", "gce") |
| 1700 | + |
| 1701 | + createTimeout := e2eservice.GetServiceLoadBalancerCreationTimeout(cs) |
| 1702 | + pollInterval := framework.Poll * 10 |
| 1703 | + |
| 1704 | + namespace := f.Namespace.Name |
| 1705 | + serviceName := "lb-internal" |
| 1706 | + jig := e2eservice.NewTestJig(cs, namespace, serviceName) |
| 1707 | + |
| 1708 | + ginkgo.By("creating pod to be part of service " + serviceName) |
| 1709 | + _, err := jig.Run(nil) |
| 1710 | + framework.ExpectNoError(err) |
| 1711 | + |
| 1712 | + enableILB, disableILB := enableAndDisableInternalLB() |
| 1713 | + |
| 1714 | + isInternalEndpoint := func(lbIngress *v1.LoadBalancerIngress) bool { |
| 1715 | + ingressEndpoint := e2eservice.GetIngressPoint(lbIngress) |
| 1716 | + // Needs update for providers using hostname as endpoint. |
| 1717 | + return strings.HasPrefix(ingressEndpoint, "10.") |
| 1718 | + } |
| 1719 | + |
| 1720 | + ginkgo.By("creating a service with type LoadBalancer and cloud specific Internal-LB annotation enabled") |
| 1721 | + svc, err := jig.CreateTCPService(func(svc *v1.Service) { |
| 1722 | + svc.Spec.Type = v1.ServiceTypeLoadBalancer |
| 1723 | + enableILB(svc) |
| 1724 | + }) |
| 1725 | + framework.ExpectNoError(err) |
| 1726 | + |
| 1727 | + defer func() { |
| 1728 | + ginkgo.By("Clean up loadbalancer service") |
| 1729 | + e2eservice.WaitForServiceDeletedWithFinalizer(cs, svc.Namespace, svc.Name) |
| 1730 | + }() |
| 1731 | + |
| 1732 | + svc, err = jig.WaitForLoadBalancer(createTimeout) |
| 1733 | + framework.ExpectNoError(err) |
| 1734 | + lbIngress := &svc.Status.LoadBalancer.Ingress[0] |
| 1735 | + svcPort := int(svc.Spec.Ports[0].Port) |
| 1736 | + // should have an internal IP. |
| 1737 | + framework.ExpectEqual(isInternalEndpoint(lbIngress), true) |
| 1738 | + |
| 1739 | + // ILBs are not accessible from the test orchestrator, so it's necessary to use |
| 1740 | + // a pod to test the service. |
| 1741 | + ginkgo.By("hitting the internal load balancer from pod") |
| 1742 | + framework.Logf("creating pod with host network") |
| 1743 | + hostExec := launchHostExecPod(f.ClientSet, f.Namespace.Name, "ilb-host-exec") |
| 1744 | + |
| 1745 | + framework.Logf("Waiting up to %v for service %q's internal LB to respond to requests", createTimeout, serviceName) |
| 1746 | + tcpIngressIP := e2eservice.GetIngressPoint(lbIngress) |
| 1747 | + if pollErr := wait.PollImmediate(pollInterval, createTimeout, func() (bool, error) { |
| 1748 | + cmd := fmt.Sprintf(`curl -m 5 'http://%v:%v/echo?msg=hello'`, tcpIngressIP, svcPort) |
| 1749 | + stdout, err := framework.RunHostCmd(hostExec.Namespace, hostExec.Name, cmd) |
| 1750 | + if err != nil { |
| 1751 | + framework.Logf("error curling; stdout: %v. err: %v", stdout, err) |
| 1752 | + return false, nil |
| 1753 | + } |
| 1754 | + |
| 1755 | + if !strings.Contains(stdout, "hello") { |
| 1756 | + framework.Logf("Expected output to contain 'hello', got %q; retrying...", stdout) |
| 1757 | + return false, nil |
| 1758 | + } |
| 1759 | + |
| 1760 | + framework.Logf("Successful curl; stdout: %v", stdout) |
| 1761 | + return true, nil |
| 1762 | + }); pollErr != nil { |
| 1763 | + framework.Failf("ginkgo.Failed to hit ILB IP, err: %v", pollErr) |
| 1764 | + } |
| 1765 | + |
| 1766 | + ginkgo.By("switching to external type LoadBalancer") |
| 1767 | + svc, err = jig.UpdateService(func(svc *v1.Service) { |
| 1768 | + disableILB(svc) |
| 1769 | + }) |
| 1770 | + framework.ExpectNoError(err) |
| 1771 | + framework.Logf("Waiting up to %v for service %q to have an external LoadBalancer", createTimeout, serviceName) |
| 1772 | + if pollErr := wait.PollImmediate(pollInterval, createTimeout, func() (bool, error) { |
| 1773 | + svc, err := cs.CoreV1().Services(namespace).Get(context.TODO(), serviceName, metav1.GetOptions{}) |
| 1774 | + if err != nil { |
| 1775 | + return false, err |
| 1776 | + } |
| 1777 | + lbIngress = &svc.Status.LoadBalancer.Ingress[0] |
| 1778 | + return !isInternalEndpoint(lbIngress), nil |
| 1779 | + }); pollErr != nil { |
| 1780 | + framework.Failf("Loadbalancer IP not changed to external.") |
| 1781 | + } |
| 1782 | + // should have an external IP. |
| 1783 | + gomega.Expect(isInternalEndpoint(lbIngress)).To(gomega.BeFalse()) |
| 1784 | + |
| 1785 | + ginkgo.By("hitting the external load balancer") |
| 1786 | + framework.Logf("Waiting up to %v for service %q's external LB to respond to requests", createTimeout, serviceName) |
| 1787 | + tcpIngressIP = e2eservice.GetIngressPoint(lbIngress) |
| 1788 | + e2eservice.TestReachableHTTP(tcpIngressIP, svcPort, e2eservice.LoadBalancerLagTimeoutDefault) |
| 1789 | + |
| 1790 | + // GCE cannot test a specific IP because the test may not own it. This cloud specific condition |
| 1791 | + // will be removed when GCP supports similar functionality. |
| 1792 | + if framework.ProviderIs("azure") { |
| 1793 | + ginkgo.By("switching back to interal type LoadBalancer, with static IP specified.") |
| 1794 | + internalStaticIP := "10.240.11.11" |
| 1795 | + svc, err = jig.UpdateService(func(svc *v1.Service) { |
| 1796 | + svc.Spec.LoadBalancerIP = internalStaticIP |
| 1797 | + enableILB(svc) |
| 1798 | + }) |
| 1799 | + framework.ExpectNoError(err) |
| 1800 | + framework.Logf("Waiting up to %v for service %q to have an internal LoadBalancer", createTimeout, serviceName) |
| 1801 | + if pollErr := wait.PollImmediate(pollInterval, createTimeout, func() (bool, error) { |
| 1802 | + svc, err := cs.CoreV1().Services(namespace).Get(context.TODO(), serviceName, metav1.GetOptions{}) |
| 1803 | + if err != nil { |
| 1804 | + return false, err |
| 1805 | + } |
| 1806 | + lbIngress = &svc.Status.LoadBalancer.Ingress[0] |
| 1807 | + return isInternalEndpoint(lbIngress), nil |
| 1808 | + }); pollErr != nil { |
| 1809 | + framework.Failf("Loadbalancer IP not changed to internal.") |
| 1810 | + } |
| 1811 | + // should have the given static internal IP. |
| 1812 | + framework.ExpectEqual(e2eservice.GetIngressPoint(lbIngress), internalStaticIP) |
| 1813 | + } |
| 1814 | + }) |
| 1815 | + |
| 1816 | + // This test creates a load balancer, make sure its health check interval |
| 1817 | + // equals to gceHcCheckIntervalSeconds. Then the interval is manipulated |
| 1818 | + // to be something else, see if the interval will be reconciled. |
| 1819 | + ginkgo.It("should reconcile LB health check interval [Slow][Serial]", func() { |
| 1820 | + const gceHcCheckIntervalSeconds = int64(8) |
| 1821 | + // This test is for clusters on GCE. |
| 1822 | + // (It restarts kube-controller-manager, which we don't support on GKE) |
| 1823 | + e2eskipper.SkipUnlessProviderIs("gce") |
| 1824 | + e2eskipper.SkipUnlessSSHKeyPresent() |
| 1825 | + |
| 1826 | + clusterID, err := gce.GetClusterID(cs) |
| 1827 | + if err != nil { |
| 1828 | + framework.Failf("framework.GetClusterID(cs) = _, %v; want nil", err) |
| 1829 | + } |
| 1830 | + gceCloud, err := gce.GetGCECloud() |
| 1831 | + if err != nil { |
| 1832 | + framework.Failf("framework.GetGCECloud() = _, %v; want nil", err) |
| 1833 | + } |
| 1834 | + |
| 1835 | + namespace := f.Namespace.Name |
| 1836 | + serviceName := "lb-hc-int" |
| 1837 | + jig := e2eservice.NewTestJig(cs, namespace, serviceName) |
| 1838 | + |
| 1839 | + ginkgo.By("create load balancer service") |
| 1840 | + // Create loadbalancer service with source range from node[0] and podAccept |
| 1841 | + svc, err := jig.CreateTCPService(func(svc *v1.Service) { |
| 1842 | + svc.Spec.Type = v1.ServiceTypeLoadBalancer |
| 1843 | + }) |
| 1844 | + framework.ExpectNoError(err) |
| 1845 | + |
| 1846 | + defer func() { |
| 1847 | + ginkgo.By("Clean up loadbalancer service") |
| 1848 | + e2eservice.WaitForServiceDeletedWithFinalizer(cs, svc.Namespace, svc.Name) |
| 1849 | + }() |
| 1850 | + |
| 1851 | + svc, err = jig.WaitForLoadBalancer(e2eservice.GetServiceLoadBalancerCreationTimeout(cs)) |
| 1852 | + framework.ExpectNoError(err) |
| 1853 | + |
| 1854 | + hcName := gcecloud.MakeNodesHealthCheckName(clusterID) |
| 1855 | + hc, err := gceCloud.GetHTTPHealthCheck(hcName) |
| 1856 | + if err != nil { |
| 1857 | + framework.Failf("gceCloud.GetHttpHealthCheck(%q) = _, %v; want nil", hcName, err) |
| 1858 | + } |
| 1859 | + framework.ExpectEqual(hc.CheckIntervalSec, gceHcCheckIntervalSeconds) |
| 1860 | + |
| 1861 | + ginkgo.By("modify the health check interval") |
| 1862 | + hc.CheckIntervalSec = gceHcCheckIntervalSeconds - 1 |
| 1863 | + if err = gceCloud.UpdateHTTPHealthCheck(hc); err != nil { |
| 1864 | + framework.Failf("gcecloud.UpdateHttpHealthCheck(%#v) = %v; want nil", hc, err) |
| 1865 | + } |
| 1866 | + |
| 1867 | + ginkgo.By("restart kube-controller-manager") |
| 1868 | + if err := e2ekubesystem.RestartControllerManager(); err != nil { |
| 1869 | + framework.Failf("e2ekubesystem.RestartControllerManager() = %v; want nil", err) |
| 1870 | + } |
| 1871 | + if err := e2ekubesystem.WaitForControllerManagerUp(); err != nil { |
| 1872 | + framework.Failf("e2ekubesystem.WaitForControllerManagerUp() = %v; want nil", err) |
| 1873 | + } |
| 1874 | + |
| 1875 | + ginkgo.By("health check should be reconciled") |
| 1876 | + pollInterval := framework.Poll * 10 |
| 1877 | + loadBalancerPropagationTimeout := e2eservice.GetServiceLoadBalancerPropagationTimeout(cs) |
| 1878 | + if pollErr := wait.PollImmediate(pollInterval, loadBalancerPropagationTimeout, func() (bool, error) { |
| 1879 | + hc, err := gceCloud.GetHTTPHealthCheck(hcName) |
| 1880 | + if err != nil { |
| 1881 | + framework.Logf("ginkgo.Failed to get HttpHealthCheck(%q): %v", hcName, err) |
| 1882 | + return false, err |
| 1883 | + } |
| 1884 | + framework.Logf("hc.CheckIntervalSec = %v", hc.CheckIntervalSec) |
| 1885 | + return hc.CheckIntervalSec == gceHcCheckIntervalSeconds, nil |
| 1886 | + }); pollErr != nil { |
| 1887 | + framework.Failf("Health check %q does not reconcile its check interval to %d.", hcName, gceHcCheckIntervalSeconds) |
| 1888 | + } |
| 1889 | + }) |
| 1890 | + |
| 1891 | + var _ = SIGDescribe("ESIPP [Slow]", func() { |
| 1892 | + f := framework.NewDefaultFramework("esipp") |
| 1893 | + var loadBalancerCreateTimeout time.Duration |
| 1894 | + |
| 1895 | + var cs clientset.Interface |
| 1896 | + serviceLBNames := []string{} |
| 1897 | + |
| 1898 | + ginkgo.BeforeEach(func() { |
| 1899 | + // requires cloud load-balancer support - this feature currently supported only on GCE/GKE |
| 1900 | + e2eskipper.SkipUnlessProviderIs("gce", "gke") |
| 1901 | + |
| 1902 | + cs = f.ClientSet |
| 1903 | + loadBalancerCreateTimeout = e2eservice.GetServiceLoadBalancerCreationTimeout(cs) |
| 1904 | + }) |
| 1905 | + |
| 1906 | + ginkgo.AfterEach(func() { |
| 1907 | + if ginkgo.CurrentGinkgoTestDescription().Failed { |
| 1908 | + DescribeSvc(f.Namespace.Name) |
| 1909 | + } |
| 1910 | + for _, lb := range serviceLBNames { |
| 1911 | + framework.Logf("cleaning load balancer resource for %s", lb) |
| 1912 | + e2eservice.CleanupServiceResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone) |
| 1913 | + } |
| 1914 | + //reset serviceLBNames |
| 1915 | + serviceLBNames = []string{} |
| 1916 | + }) |
| 1917 | + |
| 1918 | + ginkgo.It("should work for type=LoadBalancer", func() { |
| 1919 | + namespace := f.Namespace.Name |
| 1920 | + serviceName := "external-local-lb" |
| 1921 | + jig := e2eservice.NewTestJig(cs, namespace, serviceName) |
| 1922 | + |
| 1923 | + svc, err := jig.CreateOnlyLocalLoadBalancerService(loadBalancerCreateTimeout, true, nil) |
| 1924 | + framework.ExpectNoError(err) |
| 1925 | + serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc)) |
| 1926 | + healthCheckNodePort := int(svc.Spec.HealthCheckNodePort) |
| 1927 | + if healthCheckNodePort == 0 { |
| 1928 | + framework.Failf("Service HealthCheck NodePort was not allocated") |
| 1929 | + } |
| 1930 | + defer func() { |
| 1931 | + err = jig.ChangeServiceType(v1.ServiceTypeClusterIP, loadBalancerCreateTimeout) |
| 1932 | + framework.ExpectNoError(err) |
| 1933 | + |
| 1934 | + // Make sure we didn't leak the health check node port. |
| 1935 | + threshold := 2 |
| 1936 | + nodes, err := jig.GetEndpointNodes() |
| 1937 | + framework.ExpectNoError(err) |
| 1938 | + for _, ips := range nodes { |
| 1939 | + err := TestHTTPHealthCheckNodePort(ips[0], healthCheckNodePort, "/healthz", e2eservice.KubeProxyEndpointLagTimeout, false, threshold) |
| 1940 | + framework.ExpectNoError(err) |
| 1941 | + } |
| 1942 | + err = cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{}) |
| 1943 | + framework.ExpectNoError(err) |
| 1944 | + }() |
| 1945 | + |
| 1946 | + svcTCPPort := int(svc.Spec.Ports[0].Port) |
| 1947 | + ingressIP := e2eservice.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0]) |
| 1948 | + |
| 1949 | + ginkgo.By("reading clientIP using the TCP service's service port via its external VIP") |
| 1950 | + content := GetHTTPContent(ingressIP, svcTCPPort, e2eservice.KubeProxyLagTimeout, "/clientip") |
| 1951 | + clientIP := content.String() |
| 1952 | + framework.Logf("ClientIP detected by target pod using VIP:SvcPort is %s", clientIP) |
| 1953 | + |
| 1954 | + ginkgo.By("checking if Source IP is preserved") |
| 1955 | + if strings.HasPrefix(clientIP, "10.") { |
| 1956 | + framework.Failf("Source IP was NOT preserved") |
| 1957 | + } |
| 1958 | + }) |
| 1959 | + |
| 1960 | + ginkgo.It("should work for type=NodePort", func() { |
| 1961 | + namespace := f.Namespace.Name |
| 1962 | + serviceName := "external-local-nodeport" |
| 1963 | + jig := e2eservice.NewTestJig(cs, namespace, serviceName) |
| 1964 | + |
| 1965 | + svc, err := jig.CreateOnlyLocalNodePortService(true) |
| 1966 | + framework.ExpectNoError(err) |
| 1967 | + defer func() { |
| 1968 | + err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{}) |
| 1969 | + framework.ExpectNoError(err) |
| 1970 | + }() |
| 1971 | + |
| 1972 | + tcpNodePort := int(svc.Spec.Ports[0].NodePort) |
| 1973 | + endpointsNodeMap, err := jig.GetEndpointNodes() |
| 1974 | + framework.ExpectNoError(err) |
| 1975 | + path := "/clientip" |
| 1976 | + |
| 1977 | + for nodeName, nodeIPs := range endpointsNodeMap { |
| 1978 | + nodeIP := nodeIPs[0] |
| 1979 | + ginkgo.By(fmt.Sprintf("reading clientIP using the TCP service's NodePort, on node %v: %v%v%v", nodeName, nodeIP, tcpNodePort, path)) |
| 1980 | + content := GetHTTPContent(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout, path) |
| 1981 | + clientIP := content.String() |
| 1982 | + framework.Logf("ClientIP detected by target pod using NodePort is %s", clientIP) |
| 1983 | + if strings.HasPrefix(clientIP, "10.") { |
| 1984 | + framework.Failf("Source IP was NOT preserved") |
| 1985 | + } |
| 1986 | + } |
| 1987 | + }) |
| 1988 | + |
| 1989 | + ginkgo.It("should only target nodes with endpoints", func() { |
| 1990 | + namespace := f.Namespace.Name |
| 1991 | + serviceName := "external-local-nodes" |
| 1992 | + jig := e2eservice.NewTestJig(cs, namespace, serviceName) |
| 1993 | + nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, e2eservice.MaxNodesForEndpointsTests) |
| 1994 | + framework.ExpectNoError(err) |
| 1995 | + |
| 1996 | + svc, err := jig.CreateOnlyLocalLoadBalancerService(loadBalancerCreateTimeout, false, |
| 1997 | + func(svc *v1.Service) { |
| 1998 | + // Change service port to avoid collision with opened hostPorts |
| 1999 | + // in other tests that run in parallel. |
| 2000 | + if len(svc.Spec.Ports) != 0 { |
| 2001 | + svc.Spec.Ports[0].TargetPort = intstr.FromInt(int(svc.Spec.Ports[0].Port)) |
| 2002 | + svc.Spec.Ports[0].Port = 8081 |
| 2003 | + } |
| 2004 | + |
| 2005 | + }) |
| 2006 | + framework.ExpectNoError(err) |
| 2007 | + serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc)) |
| 2008 | + defer func() { |
| 2009 | + err = jig.ChangeServiceType(v1.ServiceTypeClusterIP, loadBalancerCreateTimeout) |
| 2010 | + framework.ExpectNoError(err) |
| 2011 | + err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{}) |
| 2012 | + framework.ExpectNoError(err) |
| 2013 | + }() |
| 2014 | + |
| 2015 | + healthCheckNodePort := int(svc.Spec.HealthCheckNodePort) |
| 2016 | + if healthCheckNodePort == 0 { |
| 2017 | + framework.Failf("Service HealthCheck NodePort was not allocated") |
| 2018 | + } |
| 2019 | + |
| 2020 | + ips := e2enode.CollectAddresses(nodes, v1.NodeExternalIP) |
| 2021 | + |
| 2022 | + ingressIP := e2eservice.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0]) |
| 2023 | + svcTCPPort := int(svc.Spec.Ports[0].Port) |
| 2024 | + |
| 2025 | + threshold := 2 |
| 2026 | + path := "/healthz" |
| 2027 | + for i := 0; i < len(nodes.Items); i++ { |
| 2028 | + endpointNodeName := nodes.Items[i].Name |
| 2029 | + |
| 2030 | + ginkgo.By("creating a pod to be part of the service " + serviceName + " on node " + endpointNodeName) |
| 2031 | + _, err = jig.Run(func(rc *v1.ReplicationController) { |
| 2032 | + rc.Name = serviceName |
| 2033 | + if endpointNodeName != "" { |
| 2034 | + rc.Spec.Template.Spec.NodeName = endpointNodeName |
| 2035 | + } |
| 2036 | + }) |
| 2037 | + framework.ExpectNoError(err) |
| 2038 | + |
| 2039 | + ginkgo.By(fmt.Sprintf("waiting for service endpoint on node %v", endpointNodeName)) |
| 2040 | + err = jig.WaitForEndpointOnNode(endpointNodeName) |
| 2041 | + framework.ExpectNoError(err) |
| 2042 | + |
| 2043 | + // HealthCheck should pass only on the node where num(endpoints) > 0 |
| 2044 | + // All other nodes should fail the healthcheck on the service healthCheckNodePort |
| 2045 | + for n, publicIP := range ips { |
| 2046 | + // Make sure the loadbalancer picked up the health check change. |
| 2047 | + // Confirm traffic can reach backend through LB before checking healthcheck nodeport. |
| 2048 | + e2eservice.TestReachableHTTP(ingressIP, svcTCPPort, e2eservice.KubeProxyLagTimeout) |
| 2049 | + expectedSuccess := nodes.Items[n].Name == endpointNodeName |
| 2050 | + port := strconv.Itoa(healthCheckNodePort) |
| 2051 | + ipPort := net.JoinHostPort(publicIP, port) |
| 2052 | + framework.Logf("Health checking %s, http://%s%s, expectedSuccess %v", nodes.Items[n].Name, ipPort, path, expectedSuccess) |
| 2053 | + err := TestHTTPHealthCheckNodePort(publicIP, healthCheckNodePort, path, e2eservice.KubeProxyEndpointLagTimeout, expectedSuccess, threshold) |
| 2054 | + framework.ExpectNoError(err) |
| 2055 | + } |
| 2056 | + framework.ExpectNoError(e2erc.DeleteRCAndWaitForGC(f.ClientSet, namespace, serviceName)) |
| 2057 | + } |
| 2058 | + }) |
| 2059 | + |
| 2060 | + ginkgo.It("should work from pods", func() { |
| 2061 | + var err error |
| 2062 | + namespace := f.Namespace.Name |
| 2063 | + serviceName := "external-local-pods" |
| 2064 | + jig := e2eservice.NewTestJig(cs, namespace, serviceName) |
| 2065 | + |
| 2066 | + svc, err := jig.CreateOnlyLocalLoadBalancerService(loadBalancerCreateTimeout, true, nil) |
| 2067 | + framework.ExpectNoError(err) |
| 2068 | + serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc)) |
| 2069 | + defer func() { |
| 2070 | + err = jig.ChangeServiceType(v1.ServiceTypeClusterIP, loadBalancerCreateTimeout) |
| 2071 | + framework.ExpectNoError(err) |
| 2072 | + err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{}) |
| 2073 | + framework.ExpectNoError(err) |
| 2074 | + }() |
| 2075 | + |
| 2076 | + ingressIP := e2eservice.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0]) |
| 2077 | + port := strconv.Itoa(int(svc.Spec.Ports[0].Port)) |
| 2078 | + ipPort := net.JoinHostPort(ingressIP, port) |
| 2079 | + path := fmt.Sprintf("%s/clientip", ipPort) |
| 2080 | + |
| 2081 | + ginkgo.By("Creating pause pod deployment to make sure, pausePods are in desired state") |
| 2082 | + deployment := createPausePodDeployment(cs, "pause-pod-deployment", namespace, 1) |
| 2083 | + framework.ExpectNoError(e2edeployment.WaitForDeploymentComplete(cs, deployment), "Failed to complete pause pod deployment") |
| 2084 | + |
| 2085 | + defer func() { |
| 2086 | + framework.Logf("Deleting deployment") |
| 2087 | + err = cs.AppsV1().Deployments(namespace).Delete(context.TODO(), deployment.Name, metav1.DeleteOptions{}) |
| 2088 | + framework.ExpectNoError(err, "Failed to delete deployment %s", deployment.Name) |
| 2089 | + }() |
| 2090 | + |
| 2091 | + deployment, err = cs.AppsV1().Deployments(namespace).Get(context.TODO(), deployment.Name, metav1.GetOptions{}) |
| 2092 | + framework.ExpectNoError(err, "Error in retrieving pause pod deployment") |
| 2093 | + labelSelector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) |
| 2094 | + framework.ExpectNoError(err, "Error in setting LabelSelector as selector from deployment") |
| 2095 | + |
| 2096 | + pausePods, err := cs.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector.String()}) |
| 2097 | + framework.ExpectNoError(err, "Error in listing pods associated with pause pod deployments") |
| 2098 | + |
| 2099 | + pausePod := pausePods.Items[0] |
| 2100 | + framework.Logf("Waiting up to %v curl %v", e2eservice.KubeProxyLagTimeout, path) |
| 2101 | + cmd := fmt.Sprintf(`curl -q -s --connect-timeout 30 %v`, path) |
| 2102 | + |
| 2103 | + var srcIP string |
| 2104 | + loadBalancerPropagationTimeout := e2eservice.GetServiceLoadBalancerPropagationTimeout(cs) |
| 2105 | + ginkgo.By(fmt.Sprintf("Hitting external lb %v from pod %v on node %v", ingressIP, pausePod.Name, pausePod.Spec.NodeName)) |
| 2106 | + if pollErr := wait.PollImmediate(framework.Poll, loadBalancerPropagationTimeout, func() (bool, error) { |
| 2107 | + stdout, err := framework.RunHostCmd(pausePod.Namespace, pausePod.Name, cmd) |
| 2108 | + if err != nil { |
| 2109 | + framework.Logf("got err: %v, retry until timeout", err) |
| 2110 | + return false, nil |
| 2111 | + } |
| 2112 | + srcIP = strings.TrimSpace(strings.Split(stdout, ":")[0]) |
| 2113 | + return srcIP == pausePod.Status.PodIP, nil |
| 2114 | + }); pollErr != nil { |
| 2115 | + framework.Failf("Source IP not preserved from %v, expected '%v' got '%v'", pausePod.Name, pausePod.Status.PodIP, srcIP) |
| 2116 | + } |
| 2117 | + }) |
| 2118 | + |
| 2119 | + // TODO: Get rid of [DisabledForLargeClusters] tag when issue #90047 is fixed. |
| 2120 | + ginkgo.It("should handle updates to ExternalTrafficPolicy field [DisabledForLargeClusters]", func() { |
| 2121 | + namespace := f.Namespace.Name |
| 2122 | + serviceName := "external-local-update" |
| 2123 | + jig := e2eservice.NewTestJig(cs, namespace, serviceName) |
| 2124 | + |
| 2125 | + nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, e2eservice.MaxNodesForEndpointsTests) |
| 2126 | + framework.ExpectNoError(err) |
| 2127 | + if len(nodes.Items) < 2 { |
| 2128 | + framework.Failf("Need at least 2 nodes to verify source ip from a node without endpoint") |
| 2129 | + } |
| 2130 | + |
| 2131 | + svc, err := jig.CreateOnlyLocalLoadBalancerService(loadBalancerCreateTimeout, true, nil) |
| 2132 | + framework.ExpectNoError(err) |
| 2133 | + serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc)) |
| 2134 | + defer func() { |
| 2135 | + err = jig.ChangeServiceType(v1.ServiceTypeClusterIP, loadBalancerCreateTimeout) |
| 2136 | + framework.ExpectNoError(err) |
| 2137 | + err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{}) |
| 2138 | + framework.ExpectNoError(err) |
| 2139 | + }() |
| 2140 | + |
| 2141 | + // save the health check node port because it disappears when ESIPP is turned off. |
| 2142 | + healthCheckNodePort := int(svc.Spec.HealthCheckNodePort) |
| 2143 | + |
| 2144 | + ginkgo.By("turning ESIPP off") |
| 2145 | + svc, err = jig.UpdateService(func(svc *v1.Service) { |
| 2146 | + svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeCluster |
| 2147 | + }) |
| 2148 | + framework.ExpectNoError(err) |
| 2149 | + if svc.Spec.HealthCheckNodePort > 0 { |
| 2150 | + framework.Failf("Service HealthCheck NodePort still present") |
| 2151 | + } |
| 2152 | + |
| 2153 | + endpointNodeMap, err := jig.GetEndpointNodes() |
| 2154 | + framework.ExpectNoError(err) |
| 2155 | + noEndpointNodeMap := map[string][]string{} |
| 2156 | + for _, n := range nodes.Items { |
| 2157 | + if _, ok := endpointNodeMap[n.Name]; ok { |
| 2158 | + continue |
| 2159 | + } |
| 2160 | + noEndpointNodeMap[n.Name] = e2enode.GetAddresses(&n, v1.NodeExternalIP) |
| 2161 | + } |
| 2162 | + |
| 2163 | + svcTCPPort := int(svc.Spec.Ports[0].Port) |
| 2164 | + svcNodePort := int(svc.Spec.Ports[0].NodePort) |
| 2165 | + ingressIP := e2eservice.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0]) |
| 2166 | + path := "/clientip" |
| 2167 | + |
| 2168 | + ginkgo.By(fmt.Sprintf("endpoints present on nodes %v, absent on nodes %v", endpointNodeMap, noEndpointNodeMap)) |
| 2169 | + for nodeName, nodeIPs := range noEndpointNodeMap { |
| 2170 | + ginkgo.By(fmt.Sprintf("Checking %v (%v:%v%v) proxies to endpoints on another node", nodeName, nodeIPs[0], svcNodePort, path)) |
| 2171 | + GetHTTPContent(nodeIPs[0], svcNodePort, e2eservice.KubeProxyLagTimeout, path) |
| 2172 | + } |
| 2173 | + |
| 2174 | + for nodeName, nodeIPs := range endpointNodeMap { |
| 2175 | + ginkgo.By(fmt.Sprintf("checking kube-proxy health check fails on node with endpoint (%s), public IP %s", nodeName, nodeIPs[0])) |
| 2176 | + var body bytes.Buffer |
| 2177 | + pollfn := func() (bool, error) { |
| 2178 | + result := e2enetwork.PokeHTTP(nodeIPs[0], healthCheckNodePort, "/healthz", nil) |
| 2179 | + if result.Code == 0 { |
| 2180 | + return true, nil |
| 2181 | + } |
| 2182 | + body.Reset() |
| 2183 | + body.Write(result.Body) |
| 2184 | + return false, nil |
| 2185 | + } |
| 2186 | + if pollErr := wait.PollImmediate(framework.Poll, e2eservice.TestTimeout, pollfn); pollErr != nil { |
| 2187 | + framework.Failf("Kube-proxy still exposing health check on node %v:%v, after ESIPP was turned off. body %s", |
| 2188 | + nodeName, healthCheckNodePort, body.String()) |
| 2189 | + } |
| 2190 | + } |
| 2191 | + |
| 2192 | + // Poll till kube-proxy re-adds the MASQUERADE rule on the node. |
| 2193 | + ginkgo.By(fmt.Sprintf("checking source ip is NOT preserved through loadbalancer %v", ingressIP)) |
| 2194 | + var clientIP string |
| 2195 | + pollErr := wait.PollImmediate(framework.Poll, e2eservice.KubeProxyLagTimeout, func() (bool, error) { |
| 2196 | + content := GetHTTPContent(ingressIP, svcTCPPort, e2eservice.KubeProxyLagTimeout, "/clientip") |
| 2197 | + clientIP = content.String() |
| 2198 | + if strings.HasPrefix(clientIP, "10.") { |
| 2199 | + return true, nil |
| 2200 | + } |
| 2201 | + return false, nil |
| 2202 | + }) |
| 2203 | + if pollErr != nil { |
| 2204 | + framework.Failf("Source IP WAS preserved even after ESIPP turned off. Got %v, expected a ten-dot cluster ip.", clientIP) |
| 2205 | + } |
| 2206 | + |
| 2207 | + // TODO: We need to attempt to create another service with the previously |
| 2208 | + // allocated healthcheck nodePort. If the health check nodePort has been |
| 2209 | + // freed, the new service creation will succeed, upon which we cleanup. |
| 2210 | + // If the health check nodePort has NOT been freed, the new service |
| 2211 | + // creation will fail. |
| 2212 | + |
| 2213 | + ginkgo.By("setting ExternalTraffic field back to OnlyLocal") |
| 2214 | + svc, err = jig.UpdateService(func(svc *v1.Service) { |
| 2215 | + svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal |
| 2216 | + // Request the same healthCheckNodePort as before, to test the user-requested allocation path |
| 2217 | + svc.Spec.HealthCheckNodePort = int32(healthCheckNodePort) |
| 2218 | + }) |
| 2219 | + framework.ExpectNoError(err) |
| 2220 | + pollErr = wait.PollImmediate(framework.Poll, e2eservice.KubeProxyLagTimeout, func() (bool, error) { |
| 2221 | + content := GetHTTPContent(ingressIP, svcTCPPort, e2eservice.KubeProxyLagTimeout, path) |
| 2222 | + clientIP = content.String() |
| 2223 | + ginkgo.By(fmt.Sprintf("Endpoint %v:%v%v returned client ip %v", ingressIP, svcTCPPort, path, clientIP)) |
| 2224 | + if !strings.HasPrefix(clientIP, "10.") { |
| 2225 | + return true, nil |
| 2226 | + } |
| 2227 | + return false, nil |
| 2228 | + }) |
| 2229 | + if pollErr != nil { |
| 2230 | + framework.Failf("Source IP (%v) is not the client IP even after ESIPP turned on, expected a public IP.", clientIP) |
| 2231 | + } |
| 2232 | + }) |
| 2233 | + }) |
| 2234 | +}) |
| 2235 | diff --git a/test/e2e/node/recreate_node.go b/test/e2e/node/recreate_node.go |
| 2236 | index da3fc974485..b403fa7f737 100644 |
| 2237 | --- a/test/e2e/node/recreate_node.go |
| 2238 | +++ b/test/e2e/node/recreate_node.go |
| 2239 | @@ -1,3 +1,5 @@ |
| 2240 | +// +build !providerless |
| 2241 | + |
| 2242 | /* |
| 2243 | Copyright 2019 The Kubernetes Authors. |
| 2244 | |
| 2245 | diff --git a/test/e2e/scheduling/nvidia-gpus.go b/test/e2e/scheduling/nvidia-gpus.go |
| 2246 | index 334a6f5b9f6..59fbdee014b 100644 |
| 2247 | --- a/test/e2e/scheduling/nvidia-gpus.go |
| 2248 | +++ b/test/e2e/scheduling/nvidia-gpus.go |
| 2249 | @@ -1,3 +1,5 @@ |
| 2250 | +// +build !providerless |
| 2251 | + |
| 2252 | /* |
| 2253 | Copyright 2017 The Kubernetes Authors. |
| 2254 | |
| 2255 | diff --git a/test/e2e/scheduling/ubernetes_lite_volumes.go b/test/e2e/scheduling/ubernetes_lite_volumes.go |
| 2256 | index 513ed07543f..78c0f081990 100644 |
| 2257 | --- a/test/e2e/scheduling/ubernetes_lite_volumes.go |
| 2258 | +++ b/test/e2e/scheduling/ubernetes_lite_volumes.go |
| 2259 | @@ -1,3 +1,5 @@ |
| 2260 | +// +build !providerless |
| 2261 | + |
| 2262 | /* |
| 2263 | Copyright 2017 The Kubernetes Authors. |
| 2264 | |
| 2265 | diff --git a/test/e2e/storage/drivers/in_tree.go b/test/e2e/storage/drivers/in_tree.go |
| 2266 | index d5183f28081..de25e2cf007 100644 |
| 2267 | --- a/test/e2e/storage/drivers/in_tree.go |
| 2268 | +++ b/test/e2e/storage/drivers/in_tree.go |
| 2269 | @@ -38,10 +38,8 @@ package drivers |
| 2270 | import ( |
| 2271 | "context" |
| 2272 | "fmt" |
| 2273 | - "os/exec" |
| 2274 | "strconv" |
| 2275 | "strings" |
| 2276 | - "time" |
| 2277 | |
| 2278 | "github.com/onsi/ginkgo" |
| 2279 | v1 "k8s.io/api/core/v1" |
| 2280 | @@ -57,13 +55,11 @@ import ( |
| 2281 | e2eauth "k8s.io/kubernetes/test/e2e/framework/auth" |
| 2282 | e2enode "k8s.io/kubernetes/test/e2e/framework/node" |
| 2283 | e2epod "k8s.io/kubernetes/test/e2e/framework/pod" |
| 2284 | - e2epv "k8s.io/kubernetes/test/e2e/framework/pv" |
| 2285 | e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" |
| 2286 | e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" |
| 2287 | "k8s.io/kubernetes/test/e2e/storage/testpatterns" |
| 2288 | "k8s.io/kubernetes/test/e2e/storage/testsuites" |
| 2289 | "k8s.io/kubernetes/test/e2e/storage/utils" |
| 2290 | - vspheretest "k8s.io/kubernetes/test/e2e/storage/vsphere" |
| 2291 | imageutils "k8s.io/kubernetes/test/utils/image" |
| 2292 | ) |
| 2293 | |
| 2294 | @@ -1032,734 +1028,6 @@ func (e *emptydirDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTes |
| 2295 | }, func() {} |
| 2296 | } |
| 2297 | |
| 2298 | -// Cinder |
| 2299 | -// This driver assumes that OpenStack client tools are installed |
| 2300 | -// (/usr/bin/nova, /usr/bin/cinder and /usr/bin/keystone) |
| 2301 | -// and that the usual OpenStack authentication env. variables are set |
| 2302 | -// (OS_USERNAME, OS_PASSWORD, OS_TENANT_NAME at least). |
| 2303 | -type cinderDriver struct { |
| 2304 | - driverInfo testsuites.DriverInfo |
| 2305 | -} |
| 2306 | - |
| 2307 | -type cinderVolume struct { |
| 2308 | - volumeName string |
| 2309 | - volumeID string |
| 2310 | -} |
| 2311 | - |
| 2312 | -var _ testsuites.TestDriver = &cinderDriver{} |
| 2313 | -var _ testsuites.PreprovisionedVolumeTestDriver = &cinderDriver{} |
| 2314 | -var _ testsuites.InlineVolumeTestDriver = &cinderDriver{} |
| 2315 | -var _ testsuites.PreprovisionedPVTestDriver = &cinderDriver{} |
| 2316 | -var _ testsuites.DynamicPVTestDriver = &cinderDriver{} |
| 2317 | - |
| 2318 | -// InitCinderDriver returns cinderDriver that implements TestDriver interface |
| 2319 | -func InitCinderDriver() testsuites.TestDriver { |
| 2320 | - return &cinderDriver{ |
| 2321 | - driverInfo: testsuites.DriverInfo{ |
| 2322 | - Name: "cinder", |
| 2323 | - InTreePluginName: "kubernetes.io/cinder", |
| 2324 | - MaxFileSize: testpatterns.FileSizeMedium, |
| 2325 | - SupportedSizeRange: e2evolume.SizeRange{ |
| 2326 | - Min: "5Gi", |
| 2327 | - }, |
| 2328 | - SupportedFsType: sets.NewString( |
| 2329 | - "", // Default fsType |
| 2330 | - "ext3", |
| 2331 | - ), |
| 2332 | - TopologyKeys: []string{v1.LabelZoneFailureDomain}, |
| 2333 | - Capabilities: map[testsuites.Capability]bool{ |
| 2334 | - testsuites.CapPersistence: true, |
| 2335 | - testsuites.CapFsGroup: true, |
| 2336 | - testsuites.CapExec: true, |
| 2337 | - testsuites.CapBlock: true, |
| 2338 | - // Cinder supports volume limits, but the test creates large |
| 2339 | - // number of volumes and times out test suites. |
| 2340 | - testsuites.CapVolumeLimits: false, |
| 2341 | - testsuites.CapTopology: true, |
| 2342 | - }, |
| 2343 | - }, |
| 2344 | - } |
| 2345 | -} |
| 2346 | - |
| 2347 | -func (c *cinderDriver) GetDriverInfo() *testsuites.DriverInfo { |
| 2348 | - return &c.driverInfo |
| 2349 | -} |
| 2350 | - |
| 2351 | -func (c *cinderDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { |
| 2352 | - e2eskipper.SkipUnlessProviderIs("openstack") |
| 2353 | -} |
| 2354 | - |
| 2355 | -func (c *cinderDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource { |
| 2356 | - cv, ok := e2evolume.(*cinderVolume) |
| 2357 | - framework.ExpectEqual(ok, true, "Failed to cast test volume to Cinder test volume") |
| 2358 | - |
| 2359 | - volSource := v1.VolumeSource{ |
| 2360 | - Cinder: &v1.CinderVolumeSource{ |
| 2361 | - VolumeID: cv.volumeID, |
| 2362 | - ReadOnly: readOnly, |
| 2363 | - }, |
| 2364 | - } |
| 2365 | - if fsType != "" { |
| 2366 | - volSource.Cinder.FSType = fsType |
| 2367 | - } |
| 2368 | - return &volSource |
| 2369 | -} |
| 2370 | - |
| 2371 | -func (c *cinderDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { |
| 2372 | - cv, ok := e2evolume.(*cinderVolume) |
| 2373 | - framework.ExpectEqual(ok, true, "Failed to cast test volume to Cinder test volume") |
| 2374 | - |
| 2375 | - pvSource := v1.PersistentVolumeSource{ |
| 2376 | - Cinder: &v1.CinderPersistentVolumeSource{ |
| 2377 | - VolumeID: cv.volumeID, |
| 2378 | - ReadOnly: readOnly, |
| 2379 | - }, |
| 2380 | - } |
| 2381 | - if fsType != "" { |
| 2382 | - pvSource.Cinder.FSType = fsType |
| 2383 | - } |
| 2384 | - return &pvSource, nil |
| 2385 | -} |
| 2386 | - |
| 2387 | -func (c *cinderDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass { |
| 2388 | - provisioner := "kubernetes.io/cinder" |
| 2389 | - parameters := map[string]string{} |
| 2390 | - if fsType != "" { |
| 2391 | - parameters["fsType"] = fsType |
| 2392 | - } |
| 2393 | - ns := config.Framework.Namespace.Name |
| 2394 | - suffix := fmt.Sprintf("%s-sc", c.driverInfo.Name) |
| 2395 | - |
| 2396 | - return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix) |
| 2397 | -} |
| 2398 | - |
| 2399 | -func (c *cinderDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { |
| 2400 | - return &testsuites.PerTestConfig{ |
| 2401 | - Driver: c, |
| 2402 | - Prefix: "cinder", |
| 2403 | - Framework: f, |
| 2404 | - }, func() {} |
| 2405 | -} |
| 2406 | - |
| 2407 | -func (c *cinderDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { |
| 2408 | - f := config.Framework |
| 2409 | - ns := f.Namespace |
| 2410 | - |
| 2411 | - // We assume that namespace.Name is a random string |
| 2412 | - volumeName := ns.Name |
| 2413 | - ginkgo.By("creating a test Cinder volume") |
| 2414 | - output, err := exec.Command("cinder", "create", "--display-name="+volumeName, "1").CombinedOutput() |
| 2415 | - outputString := string(output[:]) |
| 2416 | - framework.Logf("cinder output:\n%s", outputString) |
| 2417 | - framework.ExpectNoError(err) |
| 2418 | - |
| 2419 | - // Parse 'id'' from stdout. Expected format: |
| 2420 | - // | attachments | [] | |
| 2421 | - // | availability_zone | nova | |
| 2422 | - // ... |
| 2423 | - // | id | 1d6ff08f-5d1c-41a4-ad72-4ef872cae685 | |
| 2424 | - volumeID := "" |
| 2425 | - for _, line := range strings.Split(outputString, "\n") { |
| 2426 | - fields := strings.Fields(line) |
| 2427 | - if len(fields) != 5 { |
| 2428 | - continue |
| 2429 | - } |
| 2430 | - if fields[1] != "id" { |
| 2431 | - continue |
| 2432 | - } |
| 2433 | - volumeID = fields[3] |
| 2434 | - break |
| 2435 | - } |
| 2436 | - framework.Logf("Volume ID: %s", volumeID) |
| 2437 | - framework.ExpectNotEqual(volumeID, "") |
| 2438 | - return &cinderVolume{ |
| 2439 | - volumeName: volumeName, |
| 2440 | - volumeID: volumeID, |
| 2441 | - } |
| 2442 | -} |
| 2443 | - |
| 2444 | -func (v *cinderVolume) DeleteVolume() { |
| 2445 | - name := v.volumeName |
| 2446 | - |
| 2447 | - // Try to delete the volume for several seconds - it takes |
| 2448 | - // a while for the plugin to detach it. |
| 2449 | - var output []byte |
| 2450 | - var err error |
| 2451 | - timeout := time.Second * 120 |
| 2452 | - |
| 2453 | - framework.Logf("Waiting up to %v for removal of cinder volume %s", timeout, name) |
| 2454 | - for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) { |
| 2455 | - output, err = exec.Command("cinder", "delete", name).CombinedOutput() |
| 2456 | - if err == nil { |
| 2457 | - framework.Logf("Cinder volume %s deleted", name) |
| 2458 | - return |
| 2459 | - } |
| 2460 | - framework.Logf("Failed to delete volume %s: %v", name, err) |
| 2461 | - } |
| 2462 | - framework.Logf("Giving up deleting volume %s: %v\n%s", name, err, string(output[:])) |
| 2463 | -} |
| 2464 | - |
| 2465 | -// GCE |
| 2466 | -type gcePdDriver struct { |
| 2467 | - driverInfo testsuites.DriverInfo |
| 2468 | -} |
| 2469 | - |
| 2470 | -type gcePdVolume struct { |
| 2471 | - volumeName string |
| 2472 | -} |
| 2473 | - |
| 2474 | -var _ testsuites.TestDriver = &gcePdDriver{} |
| 2475 | -var _ testsuites.PreprovisionedVolumeTestDriver = &gcePdDriver{} |
| 2476 | -var _ testsuites.InlineVolumeTestDriver = &gcePdDriver{} |
| 2477 | -var _ testsuites.PreprovisionedPVTestDriver = &gcePdDriver{} |
| 2478 | -var _ testsuites.DynamicPVTestDriver = &gcePdDriver{} |
| 2479 | - |
| 2480 | -// InitGcePdDriver returns gcePdDriver that implements TestDriver interface |
| 2481 | -func InitGcePdDriver() testsuites.TestDriver { |
| 2482 | - // In current test structure, it first initialize the driver and then set up |
| 2483 | - // the new framework, so we cannot get the correct OS here. So here set to |
| 2484 | - // support all fs types including both linux and windows. We have code to check Node OS later |
| 2485 | - // during test. |
| 2486 | - supportedTypes := sets.NewString( |
| 2487 | - "", // Default fsType |
| 2488 | - "ext2", |
| 2489 | - "ext3", |
| 2490 | - "ext4", |
| 2491 | - "xfs", |
| 2492 | - "ntfs", |
| 2493 | - ) |
| 2494 | - return &gcePdDriver{ |
| 2495 | - driverInfo: testsuites.DriverInfo{ |
| 2496 | - Name: "gcepd", |
| 2497 | - InTreePluginName: "kubernetes.io/gce-pd", |
| 2498 | - MaxFileSize: testpatterns.FileSizeMedium, |
| 2499 | - SupportedSizeRange: e2evolume.SizeRange{ |
| 2500 | - Min: "5Gi", |
| 2501 | - }, |
| 2502 | - SupportedFsType: supportedTypes, |
| 2503 | - SupportedMountOption: sets.NewString("debug", "nouid32"), |
| 2504 | - TopologyKeys: []string{v1.LabelZoneFailureDomain}, |
| 2505 | - Capabilities: map[testsuites.Capability]bool{ |
| 2506 | - testsuites.CapPersistence: true, |
| 2507 | - testsuites.CapFsGroup: true, |
| 2508 | - testsuites.CapBlock: true, |
| 2509 | - testsuites.CapExec: true, |
| 2510 | - testsuites.CapMultiPODs: true, |
| 2511 | - testsuites.CapControllerExpansion: true, |
| 2512 | - testsuites.CapNodeExpansion: true, |
| 2513 | - // GCE supports volume limits, but the test creates large |
| 2514 | - // number of volumes and times out test suites. |
| 2515 | - testsuites.CapVolumeLimits: false, |
| 2516 | - testsuites.CapTopology: true, |
| 2517 | - }, |
| 2518 | - }, |
| 2519 | - } |
| 2520 | -} |
| 2521 | - |
| 2522 | -func (g *gcePdDriver) GetDriverInfo() *testsuites.DriverInfo { |
| 2523 | - return &g.driverInfo |
| 2524 | -} |
| 2525 | - |
| 2526 | -func (g *gcePdDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { |
| 2527 | - e2eskipper.SkipUnlessProviderIs("gce", "gke") |
| 2528 | - if pattern.FeatureTag == "[sig-windows]" { |
| 2529 | - e2eskipper.SkipUnlessNodeOSDistroIs("windows") |
| 2530 | - } |
| 2531 | -} |
| 2532 | - |
| 2533 | -func (g *gcePdDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource { |
| 2534 | - gv, ok := e2evolume.(*gcePdVolume) |
| 2535 | - framework.ExpectEqual(ok, true, "Failed to cast test volume to GCE PD test volume") |
| 2536 | - volSource := v1.VolumeSource{ |
| 2537 | - GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ |
| 2538 | - PDName: gv.volumeName, |
| 2539 | - ReadOnly: readOnly, |
| 2540 | - }, |
| 2541 | - } |
| 2542 | - if fsType != "" { |
| 2543 | - volSource.GCEPersistentDisk.FSType = fsType |
| 2544 | - } |
| 2545 | - return &volSource |
| 2546 | -} |
| 2547 | - |
| 2548 | -func (g *gcePdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { |
| 2549 | - gv, ok := e2evolume.(*gcePdVolume) |
| 2550 | - framework.ExpectEqual(ok, true, "Failed to cast test volume to GCE PD test volume") |
| 2551 | - pvSource := v1.PersistentVolumeSource{ |
| 2552 | - GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ |
| 2553 | - PDName: gv.volumeName, |
| 2554 | - ReadOnly: readOnly, |
| 2555 | - }, |
| 2556 | - } |
| 2557 | - if fsType != "" { |
| 2558 | - pvSource.GCEPersistentDisk.FSType = fsType |
| 2559 | - } |
| 2560 | - return &pvSource, nil |
| 2561 | -} |
| 2562 | - |
| 2563 | -func (g *gcePdDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass { |
| 2564 | - provisioner := "kubernetes.io/gce-pd" |
| 2565 | - parameters := map[string]string{} |
| 2566 | - if fsType != "" { |
| 2567 | - parameters["fsType"] = fsType |
| 2568 | - } |
| 2569 | - ns := config.Framework.Namespace.Name |
| 2570 | - suffix := fmt.Sprintf("%s-sc", g.driverInfo.Name) |
| 2571 | - delayedBinding := storagev1.VolumeBindingWaitForFirstConsumer |
| 2572 | - |
| 2573 | - return testsuites.GetStorageClass(provisioner, parameters, &delayedBinding, ns, suffix) |
| 2574 | -} |
| 2575 | - |
| 2576 | -func (g *gcePdDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { |
| 2577 | - config := &testsuites.PerTestConfig{ |
| 2578 | - Driver: g, |
| 2579 | - Prefix: "gcepd", |
| 2580 | - Framework: f, |
| 2581 | - } |
| 2582 | - |
| 2583 | - if framework.NodeOSDistroIs("windows") { |
| 2584 | - config.ClientNodeSelection = e2epod.NodeSelection{ |
| 2585 | - Selector: map[string]string{ |
| 2586 | - "kubernetes.io/os": "windows", |
| 2587 | - }, |
| 2588 | - } |
| 2589 | - } |
| 2590 | - return config, func() {} |
| 2591 | - |
| 2592 | -} |
| 2593 | - |
| 2594 | -func (g *gcePdDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { |
| 2595 | - zone := getInlineVolumeZone(config.Framework) |
| 2596 | - if volType == testpatterns.InlineVolume { |
| 2597 | - // PD will be created in framework.TestContext.CloudConfig.Zone zone, |
| 2598 | - // so pods should be also scheduled there. |
| 2599 | - config.ClientNodeSelection = e2epod.NodeSelection{ |
| 2600 | - Selector: map[string]string{ |
| 2601 | - v1.LabelZoneFailureDomain: zone, |
| 2602 | - }, |
| 2603 | - } |
| 2604 | - } |
| 2605 | - ginkgo.By("creating a test gce pd volume") |
| 2606 | - vname, err := e2epv.CreatePDWithRetryAndZone(zone) |
| 2607 | - framework.ExpectNoError(err) |
| 2608 | - return &gcePdVolume{ |
| 2609 | - volumeName: vname, |
| 2610 | - } |
| 2611 | -} |
| 2612 | - |
| 2613 | -func (v *gcePdVolume) DeleteVolume() { |
| 2614 | - e2epv.DeletePDWithRetry(v.volumeName) |
| 2615 | -} |
| 2616 | - |
| 2617 | -// vSphere |
| 2618 | -type vSphereDriver struct { |
| 2619 | - driverInfo testsuites.DriverInfo |
| 2620 | -} |
| 2621 | - |
| 2622 | -type vSphereVolume struct { |
| 2623 | - volumePath string |
| 2624 | - nodeInfo *vspheretest.NodeInfo |
| 2625 | -} |
| 2626 | - |
| 2627 | -var _ testsuites.TestDriver = &vSphereDriver{} |
| 2628 | -var _ testsuites.PreprovisionedVolumeTestDriver = &vSphereDriver{} |
| 2629 | -var _ testsuites.InlineVolumeTestDriver = &vSphereDriver{} |
| 2630 | -var _ testsuites.PreprovisionedPVTestDriver = &vSphereDriver{} |
| 2631 | -var _ testsuites.DynamicPVTestDriver = &vSphereDriver{} |
| 2632 | - |
| 2633 | -// InitVSphereDriver returns vSphereDriver that implements TestDriver interface |
| 2634 | -func InitVSphereDriver() testsuites.TestDriver { |
| 2635 | - return &vSphereDriver{ |
| 2636 | - driverInfo: testsuites.DriverInfo{ |
| 2637 | - Name: "vsphere", |
| 2638 | - InTreePluginName: "kubernetes.io/vsphere-volume", |
| 2639 | - MaxFileSize: testpatterns.FileSizeMedium, |
| 2640 | - SupportedSizeRange: e2evolume.SizeRange{ |
| 2641 | - Min: "5Gi", |
| 2642 | - }, |
| 2643 | - SupportedFsType: sets.NewString( |
| 2644 | - "", // Default fsType |
| 2645 | - "ext4", |
| 2646 | - ), |
| 2647 | - TopologyKeys: []string{v1.LabelZoneFailureDomain}, |
| 2648 | - Capabilities: map[testsuites.Capability]bool{ |
| 2649 | - testsuites.CapPersistence: true, |
| 2650 | - testsuites.CapFsGroup: true, |
| 2651 | - testsuites.CapExec: true, |
| 2652 | - testsuites.CapMultiPODs: true, |
| 2653 | - testsuites.CapTopology: true, |
| 2654 | - }, |
| 2655 | - }, |
| 2656 | - } |
| 2657 | -} |
| 2658 | -func (v *vSphereDriver) GetDriverInfo() *testsuites.DriverInfo { |
| 2659 | - return &v.driverInfo |
| 2660 | -} |
| 2661 | - |
| 2662 | -func (v *vSphereDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { |
| 2663 | - e2eskipper.SkipUnlessProviderIs("vsphere") |
| 2664 | -} |
| 2665 | - |
| 2666 | -func (v *vSphereDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource { |
| 2667 | - vsv, ok := e2evolume.(*vSphereVolume) |
| 2668 | - framework.ExpectEqual(ok, true, "Failed to cast test volume to vSphere test volume") |
| 2669 | - |
| 2670 | - // vSphere driver doesn't seem to support readOnly volume |
| 2671 | - // TODO: check if it is correct |
| 2672 | - if readOnly { |
| 2673 | - return nil |
| 2674 | - } |
| 2675 | - volSource := v1.VolumeSource{ |
| 2676 | - VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{ |
| 2677 | - VolumePath: vsv.volumePath, |
| 2678 | - }, |
| 2679 | - } |
| 2680 | - if fsType != "" { |
| 2681 | - volSource.VsphereVolume.FSType = fsType |
| 2682 | - } |
| 2683 | - return &volSource |
| 2684 | -} |
| 2685 | - |
| 2686 | -func (v *vSphereDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { |
| 2687 | - vsv, ok := e2evolume.(*vSphereVolume) |
| 2688 | - framework.ExpectEqual(ok, true, "Failed to cast test volume to vSphere test volume") |
| 2689 | - |
| 2690 | - // vSphere driver doesn't seem to support readOnly volume |
| 2691 | - // TODO: check if it is correct |
| 2692 | - if readOnly { |
| 2693 | - return nil, nil |
| 2694 | - } |
| 2695 | - pvSource := v1.PersistentVolumeSource{ |
| 2696 | - VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{ |
| 2697 | - VolumePath: vsv.volumePath, |
| 2698 | - }, |
| 2699 | - } |
| 2700 | - if fsType != "" { |
| 2701 | - pvSource.VsphereVolume.FSType = fsType |
| 2702 | - } |
| 2703 | - return &pvSource, nil |
| 2704 | -} |
| 2705 | - |
| 2706 | -func (v *vSphereDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass { |
| 2707 | - provisioner := "kubernetes.io/vsphere-volume" |
| 2708 | - parameters := map[string]string{} |
| 2709 | - if fsType != "" { |
| 2710 | - parameters["fsType"] = fsType |
| 2711 | - } |
| 2712 | - ns := config.Framework.Namespace.Name |
| 2713 | - suffix := fmt.Sprintf("%s-sc", v.driverInfo.Name) |
| 2714 | - |
| 2715 | - return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix) |
| 2716 | -} |
| 2717 | - |
| 2718 | -func (v *vSphereDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { |
| 2719 | - return &testsuites.PerTestConfig{ |
| 2720 | - Driver: v, |
| 2721 | - Prefix: "vsphere", |
| 2722 | - Framework: f, |
| 2723 | - }, func() {} |
| 2724 | -} |
| 2725 | - |
| 2726 | -func (v *vSphereDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { |
| 2727 | - f := config.Framework |
| 2728 | - vspheretest.Bootstrap(f) |
| 2729 | - nodeInfo := vspheretest.GetReadySchedulableRandomNodeInfo() |
| 2730 | - volumePath, err := nodeInfo.VSphere.CreateVolume(&vspheretest.VolumeOptions{}, nodeInfo.DataCenterRef) |
| 2731 | - framework.ExpectNoError(err) |
| 2732 | - return &vSphereVolume{ |
| 2733 | - volumePath: volumePath, |
| 2734 | - nodeInfo: nodeInfo, |
| 2735 | - } |
| 2736 | -} |
| 2737 | - |
| 2738 | -func (v *vSphereVolume) DeleteVolume() { |
| 2739 | - v.nodeInfo.VSphere.DeleteVolume(v.volumePath, v.nodeInfo.DataCenterRef) |
| 2740 | -} |
| 2741 | - |
| 2742 | -// Azure Disk |
| 2743 | -type azureDiskDriver struct { |
| 2744 | - driverInfo testsuites.DriverInfo |
| 2745 | -} |
| 2746 | - |
| 2747 | -type azureDiskVolume struct { |
| 2748 | - volumeName string |
| 2749 | -} |
| 2750 | - |
| 2751 | -var _ testsuites.TestDriver = &azureDiskDriver{} |
| 2752 | -var _ testsuites.PreprovisionedVolumeTestDriver = &azureDiskDriver{} |
| 2753 | -var _ testsuites.InlineVolumeTestDriver = &azureDiskDriver{} |
| 2754 | -var _ testsuites.PreprovisionedPVTestDriver = &azureDiskDriver{} |
| 2755 | -var _ testsuites.DynamicPVTestDriver = &azureDiskDriver{} |
| 2756 | - |
| 2757 | -// InitAzureDiskDriver returns azureDiskDriver that implements TestDriver interface |
| 2758 | -func InitAzureDiskDriver() testsuites.TestDriver { |
| 2759 | - return &azureDiskDriver{ |
| 2760 | - driverInfo: testsuites.DriverInfo{ |
| 2761 | - Name: "azure-disk", |
| 2762 | - InTreePluginName: "kubernetes.io/azure-disk", |
| 2763 | - MaxFileSize: testpatterns.FileSizeMedium, |
| 2764 | - SupportedSizeRange: e2evolume.SizeRange{ |
| 2765 | - Min: "5Gi", |
| 2766 | - }, |
| 2767 | - SupportedFsType: sets.NewString( |
| 2768 | - "", // Default fsType |
| 2769 | - "ext3", |
| 2770 | - "ext4", |
| 2771 | - "xfs", |
| 2772 | - ), |
| 2773 | - TopologyKeys: []string{v1.LabelZoneFailureDomain}, |
| 2774 | - Capabilities: map[testsuites.Capability]bool{ |
| 2775 | - testsuites.CapPersistence: true, |
| 2776 | - testsuites.CapFsGroup: true, |
| 2777 | - testsuites.CapBlock: true, |
| 2778 | - testsuites.CapExec: true, |
| 2779 | - testsuites.CapMultiPODs: true, |
| 2780 | - // Azure supports volume limits, but the test creates large |
| 2781 | - // number of volumes and times out test suites. |
| 2782 | - testsuites.CapVolumeLimits: false, |
| 2783 | - testsuites.CapTopology: true, |
| 2784 | - }, |
| 2785 | - }, |
| 2786 | - } |
| 2787 | -} |
| 2788 | - |
| 2789 | -func (a *azureDiskDriver) GetDriverInfo() *testsuites.DriverInfo { |
| 2790 | - return &a.driverInfo |
| 2791 | -} |
| 2792 | - |
| 2793 | -func (a *azureDiskDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { |
| 2794 | - e2eskipper.SkipUnlessProviderIs("azure") |
| 2795 | -} |
| 2796 | - |
| 2797 | -func (a *azureDiskDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource { |
| 2798 | - av, ok := e2evolume.(*azureDiskVolume) |
| 2799 | - framework.ExpectEqual(ok, true, "Failed to cast test volume to Azure test volume") |
| 2800 | - diskName := av.volumeName[(strings.LastIndex(av.volumeName, "/") + 1):] |
| 2801 | - |
| 2802 | - kind := v1.AzureManagedDisk |
| 2803 | - volSource := v1.VolumeSource{ |
| 2804 | - AzureDisk: &v1.AzureDiskVolumeSource{ |
| 2805 | - DiskName: diskName, |
| 2806 | - DataDiskURI: av.volumeName, |
| 2807 | - Kind: &kind, |
| 2808 | - ReadOnly: &readOnly, |
| 2809 | - }, |
| 2810 | - } |
| 2811 | - if fsType != "" { |
| 2812 | - volSource.AzureDisk.FSType = &fsType |
| 2813 | - } |
| 2814 | - return &volSource |
| 2815 | -} |
| 2816 | - |
| 2817 | -func (a *azureDiskDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { |
| 2818 | - av, ok := e2evolume.(*azureDiskVolume) |
| 2819 | - framework.ExpectEqual(ok, true, "Failed to cast test volume to Azure test volume") |
| 2820 | - |
| 2821 | - diskName := av.volumeName[(strings.LastIndex(av.volumeName, "/") + 1):] |
| 2822 | - |
| 2823 | - kind := v1.AzureManagedDisk |
| 2824 | - pvSource := v1.PersistentVolumeSource{ |
| 2825 | - AzureDisk: &v1.AzureDiskVolumeSource{ |
| 2826 | - DiskName: diskName, |
| 2827 | - DataDiskURI: av.volumeName, |
| 2828 | - Kind: &kind, |
| 2829 | - ReadOnly: &readOnly, |
| 2830 | - }, |
| 2831 | - } |
| 2832 | - if fsType != "" { |
| 2833 | - pvSource.AzureDisk.FSType = &fsType |
| 2834 | - } |
| 2835 | - return &pvSource, nil |
| 2836 | -} |
| 2837 | - |
| 2838 | -func (a *azureDiskDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass { |
| 2839 | - provisioner := "kubernetes.io/azure-disk" |
| 2840 | - parameters := map[string]string{} |
| 2841 | - if fsType != "" { |
| 2842 | - parameters["fsType"] = fsType |
| 2843 | - } |
| 2844 | - ns := config.Framework.Namespace.Name |
| 2845 | - suffix := fmt.Sprintf("%s-sc", a.driverInfo.Name) |
| 2846 | - delayedBinding := storagev1.VolumeBindingWaitForFirstConsumer |
| 2847 | - |
| 2848 | - return testsuites.GetStorageClass(provisioner, parameters, &delayedBinding, ns, suffix) |
| 2849 | -} |
| 2850 | - |
| 2851 | -func (a *azureDiskDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { |
| 2852 | - return &testsuites.PerTestConfig{ |
| 2853 | - Driver: a, |
| 2854 | - Prefix: "azure", |
| 2855 | - Framework: f, |
| 2856 | - }, func() {} |
| 2857 | -} |
| 2858 | - |
| 2859 | -func (a *azureDiskDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { |
| 2860 | - ginkgo.By("creating a test azure disk volume") |
| 2861 | - zone := getInlineVolumeZone(config.Framework) |
| 2862 | - if volType == testpatterns.InlineVolume { |
| 2863 | - // PD will be created in framework.TestContext.CloudConfig.Zone zone, |
| 2864 | - // so pods should be also scheduled there. |
| 2865 | - config.ClientNodeSelection = e2epod.NodeSelection{ |
| 2866 | - Selector: map[string]string{ |
| 2867 | - v1.LabelZoneFailureDomain: zone, |
| 2868 | - }, |
| 2869 | - } |
| 2870 | - } |
| 2871 | - volumeName, err := e2epv.CreatePDWithRetryAndZone(zone) |
| 2872 | - framework.ExpectNoError(err) |
| 2873 | - return &azureDiskVolume{ |
| 2874 | - volumeName: volumeName, |
| 2875 | - } |
| 2876 | -} |
| 2877 | - |
| 2878 | -func (v *azureDiskVolume) DeleteVolume() { |
| 2879 | - e2epv.DeletePDWithRetry(v.volumeName) |
| 2880 | -} |
| 2881 | - |
| 2882 | -// AWS |
| 2883 | -type awsDriver struct { |
| 2884 | - driverInfo testsuites.DriverInfo |
| 2885 | -} |
| 2886 | - |
| 2887 | -type awsVolume struct { |
| 2888 | - volumeName string |
| 2889 | -} |
| 2890 | - |
| 2891 | -var _ testsuites.TestDriver = &awsDriver{} |
| 2892 | - |
| 2893 | -var _ testsuites.PreprovisionedVolumeTestDriver = &awsDriver{} |
| 2894 | -var _ testsuites.InlineVolumeTestDriver = &awsDriver{} |
| 2895 | -var _ testsuites.PreprovisionedPVTestDriver = &awsDriver{} |
| 2896 | -var _ testsuites.DynamicPVTestDriver = &awsDriver{} |
| 2897 | - |
| 2898 | -// InitAwsDriver returns awsDriver that implements TestDriver interface |
| 2899 | -func InitAwsDriver() testsuites.TestDriver { |
| 2900 | - return &awsDriver{ |
| 2901 | - driverInfo: testsuites.DriverInfo{ |
| 2902 | - Name: "aws", |
| 2903 | - InTreePluginName: "kubernetes.io/aws-ebs", |
| 2904 | - MaxFileSize: testpatterns.FileSizeMedium, |
| 2905 | - SupportedSizeRange: e2evolume.SizeRange{ |
| 2906 | - Min: "5Gi", |
| 2907 | - }, |
| 2908 | - SupportedFsType: sets.NewString( |
| 2909 | - "", // Default fsType |
| 2910 | - "ext2", |
| 2911 | - "ext3", |
| 2912 | - "ext4", |
| 2913 | - "xfs", |
| 2914 | - "ntfs", |
| 2915 | - ), |
| 2916 | - SupportedMountOption: sets.NewString("debug", "nouid32"), |
| 2917 | - TopologyKeys: []string{v1.LabelZoneFailureDomain}, |
| 2918 | - Capabilities: map[testsuites.Capability]bool{ |
| 2919 | - testsuites.CapPersistence: true, |
| 2920 | - testsuites.CapFsGroup: true, |
| 2921 | - testsuites.CapBlock: true, |
| 2922 | - testsuites.CapExec: true, |
| 2923 | - testsuites.CapMultiPODs: true, |
| 2924 | - testsuites.CapControllerExpansion: true, |
| 2925 | - testsuites.CapNodeExpansion: true, |
| 2926 | - // AWS supports volume limits, but the test creates large |
| 2927 | - // number of volumes and times out test suites. |
| 2928 | - testsuites.CapVolumeLimits: false, |
| 2929 | - testsuites.CapTopology: true, |
| 2930 | - }, |
| 2931 | - }, |
| 2932 | - } |
| 2933 | -} |
| 2934 | - |
| 2935 | -func (a *awsDriver) GetDriverInfo() *testsuites.DriverInfo { |
| 2936 | - return &a.driverInfo |
| 2937 | -} |
| 2938 | - |
| 2939 | -func (a *awsDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { |
| 2940 | - e2eskipper.SkipUnlessProviderIs("aws") |
| 2941 | -} |
| 2942 | - |
| 2943 | -func (a *awsDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource { |
| 2944 | - av, ok := e2evolume.(*awsVolume) |
| 2945 | - framework.ExpectEqual(ok, true, "Failed to cast test volume to AWS test volume") |
| 2946 | - volSource := v1.VolumeSource{ |
| 2947 | - AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{ |
| 2948 | - VolumeID: av.volumeName, |
| 2949 | - ReadOnly: readOnly, |
| 2950 | - }, |
| 2951 | - } |
| 2952 | - if fsType != "" { |
| 2953 | - volSource.AWSElasticBlockStore.FSType = fsType |
| 2954 | - } |
| 2955 | - return &volSource |
| 2956 | -} |
| 2957 | - |
| 2958 | -func (a *awsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { |
| 2959 | - av, ok := e2evolume.(*awsVolume) |
| 2960 | - framework.ExpectEqual(ok, true, "Failed to cast test volume to AWS test volume") |
| 2961 | - pvSource := v1.PersistentVolumeSource{ |
| 2962 | - AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{ |
| 2963 | - VolumeID: av.volumeName, |
| 2964 | - ReadOnly: readOnly, |
| 2965 | - }, |
| 2966 | - } |
| 2967 | - if fsType != "" { |
| 2968 | - pvSource.AWSElasticBlockStore.FSType = fsType |
| 2969 | - } |
| 2970 | - return &pvSource, nil |
| 2971 | -} |
| 2972 | - |
| 2973 | -func (a *awsDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass { |
| 2974 | - provisioner := "kubernetes.io/aws-ebs" |
| 2975 | - parameters := map[string]string{} |
| 2976 | - if fsType != "" { |
| 2977 | - parameters["fsType"] = fsType |
| 2978 | - } |
| 2979 | - ns := config.Framework.Namespace.Name |
| 2980 | - suffix := fmt.Sprintf("%s-sc", a.driverInfo.Name) |
| 2981 | - delayedBinding := storagev1.VolumeBindingWaitForFirstConsumer |
| 2982 | - |
| 2983 | - return testsuites.GetStorageClass(provisioner, parameters, &delayedBinding, ns, suffix) |
| 2984 | -} |
| 2985 | - |
| 2986 | -func (a *awsDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { |
| 2987 | - config := &testsuites.PerTestConfig{ |
| 2988 | - Driver: a, |
| 2989 | - Prefix: "aws", |
| 2990 | - Framework: f, |
| 2991 | - } |
| 2992 | - |
| 2993 | - if framework.NodeOSDistroIs("windows") { |
| 2994 | - config.ClientNodeSelection = e2epod.NodeSelection{ |
| 2995 | - Selector: map[string]string{ |
| 2996 | - "kubernetes.io/os": "windows", |
| 2997 | - }, |
| 2998 | - } |
| 2999 | - } |
| 3000 | - return config, func() {} |
| 3001 | -} |
| 3002 | - |
| 3003 | -func (a *awsDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { |
| 3004 | - zone := getInlineVolumeZone(config.Framework) |
| 3005 | - if volType == testpatterns.InlineVolume { |
| 3006 | - // PD will be created in framework.TestContext.CloudConfig.Zone zone, |
| 3007 | - // so pods should be also scheduled there. |
| 3008 | - config.ClientNodeSelection = e2epod.NodeSelection{ |
| 3009 | - Selector: map[string]string{ |
| 3010 | - v1.LabelZoneFailureDomain: zone, |
| 3011 | - }, |
| 3012 | - } |
| 3013 | - } |
| 3014 | - ginkgo.By("creating a test aws volume") |
| 3015 | - vname, err := e2epv.CreatePDWithRetryAndZone(zone) |
| 3016 | - framework.ExpectNoError(err) |
| 3017 | - return &awsVolume{ |
| 3018 | - volumeName: vname, |
| 3019 | - } |
| 3020 | -} |
| 3021 | - |
| 3022 | -func (v *awsVolume) DeleteVolume() { |
| 3023 | - e2epv.DeletePDWithRetry(v.volumeName) |
| 3024 | -} |
| 3025 | - |
| 3026 | // local |
| 3027 | type localDriver struct { |
| 3028 | driverInfo testsuites.DriverInfo |
| 3029 | diff --git a/test/e2e/storage/drivers/in_tree_providers.go b/test/e2e/storage/drivers/in_tree_providers.go |
| 3030 | new file mode 100644 |
| 3031 | index 00000000000..c7f5dd3052e |
| 3032 | --- /dev/null |
| 3033 | +++ b/test/e2e/storage/drivers/in_tree_providers.go |
| 3034 | @@ -0,0 +1,751 @@ |
| 3035 | +// +build !providerless |
| 3036 | + |
| 3037 | +package drivers |
| 3038 | + |
| 3039 | +import ( |
| 3040 | + "fmt" |
| 3041 | + "os/exec" |
| 3042 | + "strings" |
| 3043 | + "time" |
| 3044 | + |
| 3045 | + "github.com/onsi/ginkgo" |
| 3046 | + v1 "k8s.io/api/core/v1" |
| 3047 | + storagev1 "k8s.io/api/storage/v1" |
| 3048 | + "k8s.io/apimachinery/pkg/util/sets" |
| 3049 | + "k8s.io/kubernetes/test/e2e/framework" |
| 3050 | + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" |
| 3051 | + e2epv "k8s.io/kubernetes/test/e2e/framework/pv" |
| 3052 | + e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" |
| 3053 | + e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" |
| 3054 | + "k8s.io/kubernetes/test/e2e/storage/testpatterns" |
| 3055 | + "k8s.io/kubernetes/test/e2e/storage/testsuites" |
| 3056 | + vspheretest "k8s.io/kubernetes/test/e2e/storage/vsphere" |
| 3057 | +) |
| 3058 | + |
| 3059 | +// Cinder |
| 3060 | +// This driver assumes that OpenStack client tools are installed |
| 3061 | +// (/usr/bin/nova, /usr/bin/cinder and /usr/bin/keystone) |
| 3062 | +// and that the usual OpenStack authentication env. variables are set |
| 3063 | +// (OS_USERNAME, OS_PASSWORD, OS_TENANT_NAME at least). |
| 3064 | +type cinderDriver struct { |
| 3065 | + driverInfo testsuites.DriverInfo |
| 3066 | +} |
| 3067 | + |
| 3068 | +type cinderVolume struct { |
| 3069 | + volumeName string |
| 3070 | + volumeID string |
| 3071 | +} |
| 3072 | + |
| 3073 | +var _ testsuites.TestDriver = &cinderDriver{} |
| 3074 | +var _ testsuites.PreprovisionedVolumeTestDriver = &cinderDriver{} |
| 3075 | +var _ testsuites.InlineVolumeTestDriver = &cinderDriver{} |
| 3076 | +var _ testsuites.PreprovisionedPVTestDriver = &cinderDriver{} |
| 3077 | +var _ testsuites.DynamicPVTestDriver = &cinderDriver{} |
| 3078 | + |
| 3079 | +// InitCinderDriver returns cinderDriver that implements TestDriver interface |
| 3080 | +func InitCinderDriver() testsuites.TestDriver { |
| 3081 | + return &cinderDriver{ |
| 3082 | + driverInfo: testsuites.DriverInfo{ |
| 3083 | + Name: "cinder", |
| 3084 | + InTreePluginName: "kubernetes.io/cinder", |
| 3085 | + MaxFileSize: testpatterns.FileSizeMedium, |
| 3086 | + SupportedSizeRange: e2evolume.SizeRange{ |
| 3087 | + Min: "5Gi", |
| 3088 | + }, |
| 3089 | + SupportedFsType: sets.NewString( |
| 3090 | + "", // Default fsType |
| 3091 | + "ext3", |
| 3092 | + ), |
| 3093 | + TopologyKeys: []string{v1.LabelZoneFailureDomain}, |
| 3094 | + Capabilities: map[testsuites.Capability]bool{ |
| 3095 | + testsuites.CapPersistence: true, |
| 3096 | + testsuites.CapFsGroup: true, |
| 3097 | + testsuites.CapExec: true, |
| 3098 | + testsuites.CapBlock: true, |
| 3099 | + // Cinder supports volume limits, but the test creates large |
| 3100 | + // number of volumes and times out test suites. |
| 3101 | + testsuites.CapVolumeLimits: false, |
| 3102 | + testsuites.CapTopology: true, |
| 3103 | + }, |
| 3104 | + }, |
| 3105 | + } |
| 3106 | +} |
| 3107 | + |
| 3108 | +func (c *cinderDriver) GetDriverInfo() *testsuites.DriverInfo { |
| 3109 | + return &c.driverInfo |
| 3110 | +} |
| 3111 | + |
| 3112 | +func (c *cinderDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { |
| 3113 | + e2eskipper.SkipUnlessProviderIs("openstack") |
| 3114 | +} |
| 3115 | + |
| 3116 | +func (c *cinderDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource { |
| 3117 | + cv, ok := e2evolume.(*cinderVolume) |
| 3118 | + framework.ExpectEqual(ok, true, "Failed to cast test volume to Cinder test volume") |
| 3119 | + |
| 3120 | + volSource := v1.VolumeSource{ |
| 3121 | + Cinder: &v1.CinderVolumeSource{ |
| 3122 | + VolumeID: cv.volumeID, |
| 3123 | + ReadOnly: readOnly, |
| 3124 | + }, |
| 3125 | + } |
| 3126 | + if fsType != "" { |
| 3127 | + volSource.Cinder.FSType = fsType |
| 3128 | + } |
| 3129 | + return &volSource |
| 3130 | +} |
| 3131 | + |
| 3132 | +func (c *cinderDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { |
| 3133 | + cv, ok := e2evolume.(*cinderVolume) |
| 3134 | + framework.ExpectEqual(ok, true, "Failed to cast test volume to Cinder test volume") |
| 3135 | + |
| 3136 | + pvSource := v1.PersistentVolumeSource{ |
| 3137 | + Cinder: &v1.CinderPersistentVolumeSource{ |
| 3138 | + VolumeID: cv.volumeID, |
| 3139 | + ReadOnly: readOnly, |
| 3140 | + }, |
| 3141 | + } |
| 3142 | + if fsType != "" { |
| 3143 | + pvSource.Cinder.FSType = fsType |
| 3144 | + } |
| 3145 | + return &pvSource, nil |
| 3146 | +} |
| 3147 | + |
| 3148 | +func (c *cinderDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass { |
| 3149 | + provisioner := "kubernetes.io/cinder" |
| 3150 | + parameters := map[string]string{} |
| 3151 | + if fsType != "" { |
| 3152 | + parameters["fsType"] = fsType |
| 3153 | + } |
| 3154 | + ns := config.Framework.Namespace.Name |
| 3155 | + suffix := fmt.Sprintf("%s-sc", c.driverInfo.Name) |
| 3156 | + |
| 3157 | + return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix) |
| 3158 | +} |
| 3159 | + |
| 3160 | +func (c *cinderDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { |
| 3161 | + return &testsuites.PerTestConfig{ |
| 3162 | + Driver: c, |
| 3163 | + Prefix: "cinder", |
| 3164 | + Framework: f, |
| 3165 | + }, func() {} |
| 3166 | +} |
| 3167 | + |
| 3168 | +func (c *cinderDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { |
| 3169 | + f := config.Framework |
| 3170 | + ns := f.Namespace |
| 3171 | + |
| 3172 | + // We assume that namespace.Name is a random string |
| 3173 | + volumeName := ns.Name |
| 3174 | + ginkgo.By("creating a test Cinder volume") |
| 3175 | + output, err := exec.Command("cinder", "create", "--display-name="+volumeName, "1").CombinedOutput() |
| 3176 | + outputString := string(output[:]) |
| 3177 | + framework.Logf("cinder output:\n%s", outputString) |
| 3178 | + framework.ExpectNoError(err) |
| 3179 | + |
| 3180 | + // Parse 'id'' from stdout. Expected format: |
| 3181 | + // | attachments | [] | |
| 3182 | + // | availability_zone | nova | |
| 3183 | + // ... |
| 3184 | + // | id | 1d6ff08f-5d1c-41a4-ad72-4ef872cae685 | |
| 3185 | + volumeID := "" |
| 3186 | + for _, line := range strings.Split(outputString, "\n") { |
| 3187 | + fields := strings.Fields(line) |
| 3188 | + if len(fields) != 5 { |
| 3189 | + continue |
| 3190 | + } |
| 3191 | + if fields[1] != "id" { |
| 3192 | + continue |
| 3193 | + } |
| 3194 | + volumeID = fields[3] |
| 3195 | + break |
| 3196 | + } |
| 3197 | + framework.Logf("Volume ID: %s", volumeID) |
| 3198 | + framework.ExpectNotEqual(volumeID, "") |
| 3199 | + return &cinderVolume{ |
| 3200 | + volumeName: volumeName, |
| 3201 | + volumeID: volumeID, |
| 3202 | + } |
| 3203 | +} |
| 3204 | + |
| 3205 | +func (v *cinderVolume) DeleteVolume() { |
| 3206 | + name := v.volumeName |
| 3207 | + |
| 3208 | + // Try to delete the volume for several seconds - it takes |
| 3209 | + // a while for the plugin to detach it. |
| 3210 | + var output []byte |
| 3211 | + var err error |
| 3212 | + timeout := time.Second * 120 |
| 3213 | + |
| 3214 | + framework.Logf("Waiting up to %v for removal of cinder volume %s", timeout, name) |
| 3215 | + for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) { |
| 3216 | + output, err = exec.Command("cinder", "delete", name).CombinedOutput() |
| 3217 | + if err == nil { |
| 3218 | + framework.Logf("Cinder volume %s deleted", name) |
| 3219 | + return |
| 3220 | + } |
| 3221 | + framework.Logf("Failed to delete volume %s: %v", name, err) |
| 3222 | + } |
| 3223 | + framework.Logf("Giving up deleting volume %s: %v\n%s", name, err, string(output[:])) |
| 3224 | +} |
| 3225 | + |
| 3226 | +// GCE |
| 3227 | +type gcePdDriver struct { |
| 3228 | + driverInfo testsuites.DriverInfo |
| 3229 | +} |
| 3230 | + |
| 3231 | +type gcePdVolume struct { |
| 3232 | + volumeName string |
| 3233 | +} |
| 3234 | + |
| 3235 | +var _ testsuites.TestDriver = &gcePdDriver{} |
| 3236 | +var _ testsuites.PreprovisionedVolumeTestDriver = &gcePdDriver{} |
| 3237 | +var _ testsuites.InlineVolumeTestDriver = &gcePdDriver{} |
| 3238 | +var _ testsuites.PreprovisionedPVTestDriver = &gcePdDriver{} |
| 3239 | +var _ testsuites.DynamicPVTestDriver = &gcePdDriver{} |
| 3240 | + |
| 3241 | +// InitGcePdDriver returns gcePdDriver that implements TestDriver interface |
| 3242 | +func InitGcePdDriver() testsuites.TestDriver { |
| 3243 | + // In current test structure, it first initialize the driver and then set up |
| 3244 | + // the new framework, so we cannot get the correct OS here. So here set to |
| 3245 | + // support all fs types including both linux and windows. We have code to check Node OS later |
| 3246 | + // during test. |
| 3247 | + supportedTypes := sets.NewString( |
| 3248 | + "", // Default fsType |
| 3249 | + "ext2", |
| 3250 | + "ext3", |
| 3251 | + "ext4", |
| 3252 | + "xfs", |
| 3253 | + "ntfs", |
| 3254 | + ) |
| 3255 | + return &gcePdDriver{ |
| 3256 | + driverInfo: testsuites.DriverInfo{ |
| 3257 | + Name: "gcepd", |
| 3258 | + InTreePluginName: "kubernetes.io/gce-pd", |
| 3259 | + MaxFileSize: testpatterns.FileSizeMedium, |
| 3260 | + SupportedSizeRange: e2evolume.SizeRange{ |
| 3261 | + Min: "5Gi", |
| 3262 | + }, |
| 3263 | + SupportedFsType: supportedTypes, |
| 3264 | + SupportedMountOption: sets.NewString("debug", "nouid32"), |
| 3265 | + TopologyKeys: []string{v1.LabelZoneFailureDomain}, |
| 3266 | + Capabilities: map[testsuites.Capability]bool{ |
| 3267 | + testsuites.CapPersistence: true, |
| 3268 | + testsuites.CapFsGroup: true, |
| 3269 | + testsuites.CapBlock: true, |
| 3270 | + testsuites.CapExec: true, |
| 3271 | + testsuites.CapMultiPODs: true, |
| 3272 | + testsuites.CapControllerExpansion: true, |
| 3273 | + testsuites.CapNodeExpansion: true, |
| 3274 | + // GCE supports volume limits, but the test creates large |
| 3275 | + // number of volumes and times out test suites. |
| 3276 | + testsuites.CapVolumeLimits: false, |
| 3277 | + testsuites.CapTopology: true, |
| 3278 | + }, |
| 3279 | + }, |
| 3280 | + } |
| 3281 | +} |
| 3282 | + |
| 3283 | +func (g *gcePdDriver) GetDriverInfo() *testsuites.DriverInfo { |
| 3284 | + return &g.driverInfo |
| 3285 | +} |
| 3286 | + |
| 3287 | +func (g *gcePdDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { |
| 3288 | + e2eskipper.SkipUnlessProviderIs("gce", "gke") |
| 3289 | + if pattern.FeatureTag == "[sig-windows]" { |
| 3290 | + e2eskipper.SkipUnlessNodeOSDistroIs("windows") |
| 3291 | + } |
| 3292 | +} |
| 3293 | + |
| 3294 | +func (g *gcePdDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource { |
| 3295 | + gv, ok := e2evolume.(*gcePdVolume) |
| 3296 | + framework.ExpectEqual(ok, true, "Failed to cast test volume to GCE PD test volume") |
| 3297 | + volSource := v1.VolumeSource{ |
| 3298 | + GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ |
| 3299 | + PDName: gv.volumeName, |
| 3300 | + ReadOnly: readOnly, |
| 3301 | + }, |
| 3302 | + } |
| 3303 | + if fsType != "" { |
| 3304 | + volSource.GCEPersistentDisk.FSType = fsType |
| 3305 | + } |
| 3306 | + return &volSource |
| 3307 | +} |
| 3308 | + |
| 3309 | +func (g *gcePdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { |
| 3310 | + gv, ok := e2evolume.(*gcePdVolume) |
| 3311 | + framework.ExpectEqual(ok, true, "Failed to cast test volume to GCE PD test volume") |
| 3312 | + pvSource := v1.PersistentVolumeSource{ |
| 3313 | + GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ |
| 3314 | + PDName: gv.volumeName, |
| 3315 | + ReadOnly: readOnly, |
| 3316 | + }, |
| 3317 | + } |
| 3318 | + if fsType != "" { |
| 3319 | + pvSource.GCEPersistentDisk.FSType = fsType |
| 3320 | + } |
| 3321 | + return &pvSource, nil |
| 3322 | +} |
| 3323 | + |
| 3324 | +func (g *gcePdDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass { |
| 3325 | + provisioner := "kubernetes.io/gce-pd" |
| 3326 | + parameters := map[string]string{} |
| 3327 | + if fsType != "" { |
| 3328 | + parameters["fsType"] = fsType |
| 3329 | + } |
| 3330 | + ns := config.Framework.Namespace.Name |
| 3331 | + suffix := fmt.Sprintf("%s-sc", g.driverInfo.Name) |
| 3332 | + delayedBinding := storagev1.VolumeBindingWaitForFirstConsumer |
| 3333 | + |
| 3334 | + return testsuites.GetStorageClass(provisioner, parameters, &delayedBinding, ns, suffix) |
| 3335 | +} |
| 3336 | + |
| 3337 | +func (g *gcePdDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { |
| 3338 | + config := &testsuites.PerTestConfig{ |
| 3339 | + Driver: g, |
| 3340 | + Prefix: "gcepd", |
| 3341 | + Framework: f, |
| 3342 | + } |
| 3343 | + |
| 3344 | + if framework.NodeOSDistroIs("windows") { |
| 3345 | + config.ClientNodeSelection = e2epod.NodeSelection{ |
| 3346 | + Selector: map[string]string{ |
| 3347 | + "kubernetes.io/os": "windows", |
| 3348 | + }, |
| 3349 | + } |
| 3350 | + } |
| 3351 | + return config, func() {} |
| 3352 | + |
| 3353 | +} |
| 3354 | + |
| 3355 | +func (g *gcePdDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { |
| 3356 | + zone := getInlineVolumeZone(config.Framework) |
| 3357 | + if volType == testpatterns.InlineVolume { |
| 3358 | + // PD will be created in framework.TestContext.CloudConfig.Zone zone, |
| 3359 | + // so pods should be also scheduled there. |
| 3360 | + config.ClientNodeSelection = e2epod.NodeSelection{ |
| 3361 | + Selector: map[string]string{ |
| 3362 | + v1.LabelZoneFailureDomain: zone, |
| 3363 | + }, |
| 3364 | + } |
| 3365 | + } |
| 3366 | + ginkgo.By("creating a test gce pd volume") |
| 3367 | + vname, err := e2epv.CreatePDWithRetryAndZone(zone) |
| 3368 | + framework.ExpectNoError(err) |
| 3369 | + return &gcePdVolume{ |
| 3370 | + volumeName: vname, |
| 3371 | + } |
| 3372 | +} |
| 3373 | + |
| 3374 | +func (v *gcePdVolume) DeleteVolume() { |
| 3375 | + e2epv.DeletePDWithRetry(v.volumeName) |
| 3376 | +} |
| 3377 | + |
| 3378 | +// vSphere |
| 3379 | +type vSphereDriver struct { |
| 3380 | + driverInfo testsuites.DriverInfo |
| 3381 | +} |
| 3382 | + |
| 3383 | +type vSphereVolume struct { |
| 3384 | + volumePath string |
| 3385 | + nodeInfo *vspheretest.NodeInfo |
| 3386 | +} |
| 3387 | + |
| 3388 | +var _ testsuites.TestDriver = &vSphereDriver{} |
| 3389 | +var _ testsuites.PreprovisionedVolumeTestDriver = &vSphereDriver{} |
| 3390 | +var _ testsuites.InlineVolumeTestDriver = &vSphereDriver{} |
| 3391 | +var _ testsuites.PreprovisionedPVTestDriver = &vSphereDriver{} |
| 3392 | +var _ testsuites.DynamicPVTestDriver = &vSphereDriver{} |
| 3393 | + |
| 3394 | +// InitVSphereDriver returns vSphereDriver that implements TestDriver interface |
| 3395 | +func InitVSphereDriver() testsuites.TestDriver { |
| 3396 | + return &vSphereDriver{ |
| 3397 | + driverInfo: testsuites.DriverInfo{ |
| 3398 | + Name: "vsphere", |
| 3399 | + InTreePluginName: "kubernetes.io/vsphere-volume", |
| 3400 | + MaxFileSize: testpatterns.FileSizeMedium, |
| 3401 | + SupportedSizeRange: e2evolume.SizeRange{ |
| 3402 | + Min: "5Gi", |
| 3403 | + }, |
| 3404 | + SupportedFsType: sets.NewString( |
| 3405 | + "", // Default fsType |
| 3406 | + "ext4", |
| 3407 | + ), |
| 3408 | + TopologyKeys: []string{v1.LabelZoneFailureDomain}, |
| 3409 | + Capabilities: map[testsuites.Capability]bool{ |
| 3410 | + testsuites.CapPersistence: true, |
| 3411 | + testsuites.CapFsGroup: true, |
| 3412 | + testsuites.CapExec: true, |
| 3413 | + testsuites.CapMultiPODs: true, |
| 3414 | + testsuites.CapTopology: true, |
| 3415 | + }, |
| 3416 | + }, |
| 3417 | + } |
| 3418 | +} |
| 3419 | +func (v *vSphereDriver) GetDriverInfo() *testsuites.DriverInfo { |
| 3420 | + return &v.driverInfo |
| 3421 | +} |
| 3422 | + |
| 3423 | +func (v *vSphereDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { |
| 3424 | + e2eskipper.SkipUnlessProviderIs("vsphere") |
| 3425 | +} |
| 3426 | + |
| 3427 | +func (v *vSphereDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource { |
| 3428 | + vsv, ok := e2evolume.(*vSphereVolume) |
| 3429 | + framework.ExpectEqual(ok, true, "Failed to cast test volume to vSphere test volume") |
| 3430 | + |
| 3431 | + // vSphere driver doesn't seem to support readOnly volume |
| 3432 | + // TODO: check if it is correct |
| 3433 | + if readOnly { |
| 3434 | + return nil |
| 3435 | + } |
| 3436 | + volSource := v1.VolumeSource{ |
| 3437 | + VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{ |
| 3438 | + VolumePath: vsv.volumePath, |
| 3439 | + }, |
| 3440 | + } |
| 3441 | + if fsType != "" { |
| 3442 | + volSource.VsphereVolume.FSType = fsType |
| 3443 | + } |
| 3444 | + return &volSource |
| 3445 | +} |
| 3446 | + |
| 3447 | +func (v *vSphereDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { |
| 3448 | + vsv, ok := e2evolume.(*vSphereVolume) |
| 3449 | + framework.ExpectEqual(ok, true, "Failed to cast test volume to vSphere test volume") |
| 3450 | + |
| 3451 | + // vSphere driver doesn't seem to support readOnly volume |
| 3452 | + // TODO: check if it is correct |
| 3453 | + if readOnly { |
| 3454 | + return nil, nil |
| 3455 | + } |
| 3456 | + pvSource := v1.PersistentVolumeSource{ |
| 3457 | + VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{ |
| 3458 | + VolumePath: vsv.volumePath, |
| 3459 | + }, |
| 3460 | + } |
| 3461 | + if fsType != "" { |
| 3462 | + pvSource.VsphereVolume.FSType = fsType |
| 3463 | + } |
| 3464 | + return &pvSource, nil |
| 3465 | +} |
| 3466 | + |
| 3467 | +func (v *vSphereDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass { |
| 3468 | + provisioner := "kubernetes.io/vsphere-volume" |
| 3469 | + parameters := map[string]string{} |
| 3470 | + if fsType != "" { |
| 3471 | + parameters["fsType"] = fsType |
| 3472 | + } |
| 3473 | + ns := config.Framework.Namespace.Name |
| 3474 | + suffix := fmt.Sprintf("%s-sc", v.driverInfo.Name) |
| 3475 | + |
| 3476 | + return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix) |
| 3477 | +} |
| 3478 | + |
| 3479 | +func (v *vSphereDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { |
| 3480 | + return &testsuites.PerTestConfig{ |
| 3481 | + Driver: v, |
| 3482 | + Prefix: "vsphere", |
| 3483 | + Framework: f, |
| 3484 | + }, func() {} |
| 3485 | +} |
| 3486 | + |
| 3487 | +func (v *vSphereDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { |
| 3488 | + f := config.Framework |
| 3489 | + vspheretest.Bootstrap(f) |
| 3490 | + nodeInfo := vspheretest.GetReadySchedulableRandomNodeInfo() |
| 3491 | + volumePath, err := nodeInfo.VSphere.CreateVolume(&vspheretest.VolumeOptions{}, nodeInfo.DataCenterRef) |
| 3492 | + framework.ExpectNoError(err) |
| 3493 | + return &vSphereVolume{ |
| 3494 | + volumePath: volumePath, |
| 3495 | + nodeInfo: nodeInfo, |
| 3496 | + } |
| 3497 | +} |
| 3498 | + |
| 3499 | +func (v *vSphereVolume) DeleteVolume() { |
| 3500 | + v.nodeInfo.VSphere.DeleteVolume(v.volumePath, v.nodeInfo.DataCenterRef) |
| 3501 | +} |
| 3502 | + |
| 3503 | +// Azure Disk |
| 3504 | +type azureDiskDriver struct { |
| 3505 | + driverInfo testsuites.DriverInfo |
| 3506 | +} |
| 3507 | + |
| 3508 | +type azureDiskVolume struct { |
| 3509 | + volumeName string |
| 3510 | +} |
| 3511 | + |
| 3512 | +var _ testsuites.TestDriver = &azureDiskDriver{} |
| 3513 | +var _ testsuites.PreprovisionedVolumeTestDriver = &azureDiskDriver{} |
| 3514 | +var _ testsuites.InlineVolumeTestDriver = &azureDiskDriver{} |
| 3515 | +var _ testsuites.PreprovisionedPVTestDriver = &azureDiskDriver{} |
| 3516 | +var _ testsuites.DynamicPVTestDriver = &azureDiskDriver{} |
| 3517 | + |
| 3518 | +// InitAzureDiskDriver returns azureDiskDriver that implements TestDriver interface |
| 3519 | +func InitAzureDiskDriver() testsuites.TestDriver { |
| 3520 | + return &azureDiskDriver{ |
| 3521 | + driverInfo: testsuites.DriverInfo{ |
| 3522 | + Name: "azure-disk", |
| 3523 | + InTreePluginName: "kubernetes.io/azure-disk", |
| 3524 | + MaxFileSize: testpatterns.FileSizeMedium, |
| 3525 | + SupportedSizeRange: e2evolume.SizeRange{ |
| 3526 | + Min: "5Gi", |
| 3527 | + }, |
| 3528 | + SupportedFsType: sets.NewString( |
| 3529 | + "", // Default fsType |
| 3530 | + "ext3", |
| 3531 | + "ext4", |
| 3532 | + "xfs", |
| 3533 | + ), |
| 3534 | + TopologyKeys: []string{v1.LabelZoneFailureDomain}, |
| 3535 | + Capabilities: map[testsuites.Capability]bool{ |
| 3536 | + testsuites.CapPersistence: true, |
| 3537 | + testsuites.CapFsGroup: true, |
| 3538 | + testsuites.CapBlock: true, |
| 3539 | + testsuites.CapExec: true, |
| 3540 | + testsuites.CapMultiPODs: true, |
| 3541 | + // Azure supports volume limits, but the test creates large |
| 3542 | + // number of volumes and times out test suites. |
| 3543 | + testsuites.CapVolumeLimits: false, |
| 3544 | + testsuites.CapTopology: true, |
| 3545 | + }, |
| 3546 | + }, |
| 3547 | + } |
| 3548 | +} |
| 3549 | + |
| 3550 | +func (a *azureDiskDriver) GetDriverInfo() *testsuites.DriverInfo { |
| 3551 | + return &a.driverInfo |
| 3552 | +} |
| 3553 | + |
| 3554 | +func (a *azureDiskDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { |
| 3555 | + e2eskipper.SkipUnlessProviderIs("azure") |
| 3556 | +} |
| 3557 | + |
| 3558 | +func (a *azureDiskDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource { |
| 3559 | + av, ok := e2evolume.(*azureDiskVolume) |
| 3560 | + framework.ExpectEqual(ok, true, "Failed to cast test volume to Azure test volume") |
| 3561 | + diskName := av.volumeName[(strings.LastIndex(av.volumeName, "/") + 1):] |
| 3562 | + |
| 3563 | + kind := v1.AzureManagedDisk |
| 3564 | + volSource := v1.VolumeSource{ |
| 3565 | + AzureDisk: &v1.AzureDiskVolumeSource{ |
| 3566 | + DiskName: diskName, |
| 3567 | + DataDiskURI: av.volumeName, |
| 3568 | + Kind: &kind, |
| 3569 | + ReadOnly: &readOnly, |
| 3570 | + }, |
| 3571 | + } |
| 3572 | + if fsType != "" { |
| 3573 | + volSource.AzureDisk.FSType = &fsType |
| 3574 | + } |
| 3575 | + return &volSource |
| 3576 | +} |
| 3577 | + |
| 3578 | +func (a *azureDiskDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { |
| 3579 | + av, ok := e2evolume.(*azureDiskVolume) |
| 3580 | + framework.ExpectEqual(ok, true, "Failed to cast test volume to Azure test volume") |
| 3581 | + |
| 3582 | + diskName := av.volumeName[(strings.LastIndex(av.volumeName, "/") + 1):] |
| 3583 | + |
| 3584 | + kind := v1.AzureManagedDisk |
| 3585 | + pvSource := v1.PersistentVolumeSource{ |
| 3586 | + AzureDisk: &v1.AzureDiskVolumeSource{ |
| 3587 | + DiskName: diskName, |
| 3588 | + DataDiskURI: av.volumeName, |
| 3589 | + Kind: &kind, |
| 3590 | + ReadOnly: &readOnly, |
| 3591 | + }, |
| 3592 | + } |
| 3593 | + if fsType != "" { |
| 3594 | + pvSource.AzureDisk.FSType = &fsType |
| 3595 | + } |
| 3596 | + return &pvSource, nil |
| 3597 | +} |
| 3598 | + |
| 3599 | +func (a *azureDiskDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass { |
| 3600 | + provisioner := "kubernetes.io/azure-disk" |
| 3601 | + parameters := map[string]string{} |
| 3602 | + if fsType != "" { |
| 3603 | + parameters["fsType"] = fsType |
| 3604 | + } |
| 3605 | + ns := config.Framework.Namespace.Name |
| 3606 | + suffix := fmt.Sprintf("%s-sc", a.driverInfo.Name) |
| 3607 | + delayedBinding := storagev1.VolumeBindingWaitForFirstConsumer |
| 3608 | + |
| 3609 | + return testsuites.GetStorageClass(provisioner, parameters, &delayedBinding, ns, suffix) |
| 3610 | +} |
| 3611 | + |
| 3612 | +func (a *azureDiskDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { |
| 3613 | + return &testsuites.PerTestConfig{ |
| 3614 | + Driver: a, |
| 3615 | + Prefix: "azure", |
| 3616 | + Framework: f, |
| 3617 | + }, func() {} |
| 3618 | +} |
| 3619 | + |
| 3620 | +func (a *azureDiskDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { |
| 3621 | + ginkgo.By("creating a test azure disk volume") |
| 3622 | + zone := getInlineVolumeZone(config.Framework) |
| 3623 | + if volType == testpatterns.InlineVolume { |
| 3624 | + // PD will be created in framework.TestContext.CloudConfig.Zone zone, |
| 3625 | + // so pods should be also scheduled there. |
| 3626 | + config.ClientNodeSelection = e2epod.NodeSelection{ |
| 3627 | + Selector: map[string]string{ |
| 3628 | + v1.LabelZoneFailureDomain: zone, |
| 3629 | + }, |
| 3630 | + } |
| 3631 | + } |
| 3632 | + volumeName, err := e2epv.CreatePDWithRetryAndZone(zone) |
| 3633 | + framework.ExpectNoError(err) |
| 3634 | + return &azureDiskVolume{ |
| 3635 | + volumeName: volumeName, |
| 3636 | + } |
| 3637 | +} |
| 3638 | + |
| 3639 | +func (v *azureDiskVolume) DeleteVolume() { |
| 3640 | + e2epv.DeletePDWithRetry(v.volumeName) |
| 3641 | +} |
| 3642 | + |
| 3643 | +// AWS |
| 3644 | +type awsDriver struct { |
| 3645 | + driverInfo testsuites.DriverInfo |
| 3646 | +} |
| 3647 | + |
| 3648 | +type awsVolume struct { |
| 3649 | + volumeName string |
| 3650 | +} |
| 3651 | + |
| 3652 | +var _ testsuites.TestDriver = &awsDriver{} |
| 3653 | + |
| 3654 | +var _ testsuites.PreprovisionedVolumeTestDriver = &awsDriver{} |
| 3655 | +var _ testsuites.InlineVolumeTestDriver = &awsDriver{} |
| 3656 | +var _ testsuites.PreprovisionedPVTestDriver = &awsDriver{} |
| 3657 | +var _ testsuites.DynamicPVTestDriver = &awsDriver{} |
| 3658 | + |
| 3659 | +// InitAwsDriver returns awsDriver that implements TestDriver interface |
| 3660 | +func InitAwsDriver() testsuites.TestDriver { |
| 3661 | + return &awsDriver{ |
| 3662 | + driverInfo: testsuites.DriverInfo{ |
| 3663 | + Name: "aws", |
| 3664 | + InTreePluginName: "kubernetes.io/aws-ebs", |
| 3665 | + MaxFileSize: testpatterns.FileSizeMedium, |
| 3666 | + SupportedSizeRange: e2evolume.SizeRange{ |
| 3667 | + Min: "5Gi", |
| 3668 | + }, |
| 3669 | + SupportedFsType: sets.NewString( |
| 3670 | + "", // Default fsType |
| 3671 | + "ext2", |
| 3672 | + "ext3", |
| 3673 | + "ext4", |
| 3674 | + "xfs", |
| 3675 | + "ntfs", |
| 3676 | + ), |
| 3677 | + SupportedMountOption: sets.NewString("debug", "nouid32"), |
| 3678 | + TopologyKeys: []string{v1.LabelZoneFailureDomain}, |
| 3679 | + Capabilities: map[testsuites.Capability]bool{ |
| 3680 | + testsuites.CapPersistence: true, |
| 3681 | + testsuites.CapFsGroup: true, |
| 3682 | + testsuites.CapBlock: true, |
| 3683 | + testsuites.CapExec: true, |
| 3684 | + testsuites.CapMultiPODs: true, |
| 3685 | + testsuites.CapControllerExpansion: true, |
| 3686 | + testsuites.CapNodeExpansion: true, |
| 3687 | + // AWS supports volume limits, but the test creates large |
| 3688 | + // number of volumes and times out test suites. |
| 3689 | + testsuites.CapVolumeLimits: false, |
| 3690 | + testsuites.CapTopology: true, |
| 3691 | + }, |
| 3692 | + }, |
| 3693 | + } |
| 3694 | +} |
| 3695 | + |
| 3696 | +func (a *awsDriver) GetDriverInfo() *testsuites.DriverInfo { |
| 3697 | + return &a.driverInfo |
| 3698 | +} |
| 3699 | + |
| 3700 | +func (a *awsDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { |
| 3701 | + e2eskipper.SkipUnlessProviderIs("aws") |
| 3702 | +} |
| 3703 | + |
| 3704 | +func (a *awsDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource { |
| 3705 | + av, ok := e2evolume.(*awsVolume) |
| 3706 | + framework.ExpectEqual(ok, true, "Failed to cast test volume to AWS test volume") |
| 3707 | + volSource := v1.VolumeSource{ |
| 3708 | + AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{ |
| 3709 | + VolumeID: av.volumeName, |
| 3710 | + ReadOnly: readOnly, |
| 3711 | + }, |
| 3712 | + } |
| 3713 | + if fsType != "" { |
| 3714 | + volSource.AWSElasticBlockStore.FSType = fsType |
| 3715 | + } |
| 3716 | + return &volSource |
| 3717 | +} |
| 3718 | + |
| 3719 | +func (a *awsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { |
| 3720 | + av, ok := e2evolume.(*awsVolume) |
| 3721 | + framework.ExpectEqual(ok, true, "Failed to cast test volume to AWS test volume") |
| 3722 | + pvSource := v1.PersistentVolumeSource{ |
| 3723 | + AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{ |
| 3724 | + VolumeID: av.volumeName, |
| 3725 | + ReadOnly: readOnly, |
| 3726 | + }, |
| 3727 | + } |
| 3728 | + if fsType != "" { |
| 3729 | + pvSource.AWSElasticBlockStore.FSType = fsType |
| 3730 | + } |
| 3731 | + return &pvSource, nil |
| 3732 | +} |
| 3733 | + |
| 3734 | +func (a *awsDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass { |
| 3735 | + provisioner := "kubernetes.io/aws-ebs" |
| 3736 | + parameters := map[string]string{} |
| 3737 | + if fsType != "" { |
| 3738 | + parameters["fsType"] = fsType |
| 3739 | + } |
| 3740 | + ns := config.Framework.Namespace.Name |
| 3741 | + suffix := fmt.Sprintf("%s-sc", a.driverInfo.Name) |
| 3742 | + delayedBinding := storagev1.VolumeBindingWaitForFirstConsumer |
| 3743 | + |
| 3744 | + return testsuites.GetStorageClass(provisioner, parameters, &delayedBinding, ns, suffix) |
| 3745 | +} |
| 3746 | + |
| 3747 | +func (a *awsDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { |
| 3748 | + config := &testsuites.PerTestConfig{ |
| 3749 | + Driver: a, |
| 3750 | + Prefix: "aws", |
| 3751 | + Framework: f, |
| 3752 | + } |
| 3753 | + |
| 3754 | + if framework.NodeOSDistroIs("windows") { |
| 3755 | + config.ClientNodeSelection = e2epod.NodeSelection{ |
| 3756 | + Selector: map[string]string{ |
| 3757 | + "kubernetes.io/os": "windows", |
| 3758 | + }, |
| 3759 | + } |
| 3760 | + } |
| 3761 | + return config, func() {} |
| 3762 | +} |
| 3763 | + |
| 3764 | +func (a *awsDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { |
| 3765 | + zone := getInlineVolumeZone(config.Framework) |
| 3766 | + if volType == testpatterns.InlineVolume { |
| 3767 | + // PD will be created in framework.TestContext.CloudConfig.Zone zone, |
| 3768 | + // so pods should be also scheduled there. |
| 3769 | + config.ClientNodeSelection = e2epod.NodeSelection{ |
| 3770 | + Selector: map[string]string{ |
| 3771 | + v1.LabelZoneFailureDomain: zone, |
| 3772 | + }, |
| 3773 | + } |
| 3774 | + } |
| 3775 | + ginkgo.By("creating a test aws volume") |
| 3776 | + vname, err := e2epv.CreatePDWithRetryAndZone(zone) |
| 3777 | + framework.ExpectNoError(err) |
| 3778 | + return &awsVolume{ |
| 3779 | + volumeName: vname, |
| 3780 | + } |
| 3781 | +} |
| 3782 | + |
| 3783 | +func (v *awsVolume) DeleteVolume() { |
| 3784 | + e2epv.DeletePDWithRetry(v.volumeName) |
| 3785 | +} |
| 3786 | diff --git a/test/e2e/storage/in_tree_volumes.go b/test/e2e/storage/in_tree_volumes.go |
| 3787 | index 19372062407..8322db743cd 100644 |
| 3788 | --- a/test/e2e/storage/in_tree_volumes.go |
| 3789 | +++ b/test/e2e/storage/in_tree_volumes.go |
| 3790 | @@ -33,11 +33,6 @@ var testDrivers = []func() testsuites.TestDriver{ |
| 3791 | drivers.InitHostPathDriver, |
| 3792 | drivers.InitHostPathSymlinkDriver, |
| 3793 | drivers.InitEmptydirDriver, |
| 3794 | - drivers.InitCinderDriver, |
| 3795 | - drivers.InitGcePdDriver, |
| 3796 | - drivers.InitVSphereDriver, |
| 3797 | - drivers.InitAzureDiskDriver, |
| 3798 | - drivers.InitAwsDriver, |
| 3799 | drivers.InitLocalDriverWithVolumeType(utils.LocalVolumeDirectory), |
| 3800 | drivers.InitLocalDriverWithVolumeType(utils.LocalVolumeDirectoryLink), |
| 3801 | drivers.InitLocalDriverWithVolumeType(utils.LocalVolumeDirectoryBindMounted), |
| 3802 | diff --git a/test/e2e/storage/in_tree_volumes_providers.go b/test/e2e/storage/in_tree_volumes_providers.go |
| 3803 | new file mode 100644 |
| 3804 | index 00000000000..d6a5dbca191 |
| 3805 | --- /dev/null |
| 3806 | +++ b/test/e2e/storage/in_tree_volumes_providers.go |
| 3807 | @@ -0,0 +1,46 @@ |
| 3808 | +// +build !providerless |
| 3809 | + |
| 3810 | +/* |
| 3811 | +Copyright 2020 The Kubernetes Authors. |
| 3812 | + |
| 3813 | +Licensed under the Apache License, Version 2.0 (the "License"); |
| 3814 | +you may not use this file except in compliance with the License. |
| 3815 | +You may obtain a copy of the License at |
| 3816 | + |
| 3817 | + http://www.apache.org/licenses/LICENSE-2.0 |
| 3818 | + |
| 3819 | +Unless required by applicable law or agreed to in writing, software |
| 3820 | +distributed under the License is distributed on an "AS IS" BASIS, |
| 3821 | +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 3822 | +See the License for the specific language governing permissions and |
| 3823 | +limitations under the License. |
| 3824 | +*/ |
| 3825 | + |
| 3826 | +package storage |
| 3827 | + |
| 3828 | +import ( |
| 3829 | + "github.com/onsi/ginkgo" |
| 3830 | + "k8s.io/kubernetes/test/e2e/storage/drivers" |
| 3831 | + "k8s.io/kubernetes/test/e2e/storage/testsuites" |
| 3832 | + "k8s.io/kubernetes/test/e2e/storage/utils" |
| 3833 | +) |
| 3834 | + |
| 3835 | +// List of testDrivers to be executed in below loop |
| 3836 | +var testDriversProviders = []func() testsuites.TestDriver{ |
| 3837 | + drivers.InitCinderDriver, |
| 3838 | + drivers.InitGcePdDriver, |
| 3839 | + drivers.InitVSphereDriver, |
| 3840 | + drivers.InitAzureDiskDriver, |
| 3841 | + drivers.InitAwsDriver, |
| 3842 | +} |
| 3843 | + |
| 3844 | +// This executes testSuites for in-tree volumes. |
| 3845 | +var _ = utils.SIGDescribe("In-tree Volumes for Cloud Providers", func() { |
| 3846 | + for _, initDriver := range testDriversProviders { |
| 3847 | + curDriver := initDriver() |
| 3848 | + |
| 3849 | + ginkgo.Context(testsuites.GetDriverNameWithFeatureTags(curDriver), func() { |
| 3850 | + testsuites.DefineTestSuite(curDriver, testsuites.BaseSuites) |
| 3851 | + }) |
| 3852 | + } |
| 3853 | +}) |
| 3854 | diff --git a/test/e2e/storage/nfs_persistent_volume-disruptive.go b/test/e2e/storage/nfs_persistent_volume-disruptive.go |
| 3855 | index 5afebb5e903..b197eee99a6 100644 |
| 3856 | --- a/test/e2e/storage/nfs_persistent_volume-disruptive.go |
| 3857 | +++ b/test/e2e/storage/nfs_persistent_volume-disruptive.go |
| 3858 | @@ -91,7 +91,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() { |
| 3859 | |
| 3860 | ginkgo.BeforeEach(func() { |
| 3861 | // To protect the NFS volume pod from the kubelet restart, we isolate it on its own node. |
| 3862 | - e2eskipper.SkipUnlessNodeCountIsAtLeast(minNodes) |
| 3863 | + e2eskipper.SkipUnlessNodeCountIsAtLeast(2) |
| 3864 | e2eskipper.SkipIfProviderIs("local") |
| 3865 | |
| 3866 | c = f.ClientSet |
| 3867 | diff --git a/test/e2e/storage/pd.go b/test/e2e/storage/pd.go |
| 3868 | index b6d720406aa..86fa0cae488 100644 |
| 3869 | --- a/test/e2e/storage/pd.go |
| 3870 | +++ b/test/e2e/storage/pd.go |
| 3871 | @@ -1,3 +1,5 @@ |
| 3872 | +// +build !providerless |
| 3873 | + |
| 3874 | /* |
| 3875 | Copyright 2015 The Kubernetes Authors. |
| 3876 | |
| 3877 | diff --git a/test/e2e/storage/persistent_volumes-gce.go b/test/e2e/storage/persistent_volumes-gce.go |
| 3878 | index 8b0343e4b58..76a80042811 100644 |
| 3879 | --- a/test/e2e/storage/persistent_volumes-gce.go |
| 3880 | +++ b/test/e2e/storage/persistent_volumes-gce.go |
| 3881 | @@ -1,3 +1,5 @@ |
| 3882 | +// +build !providerless |
| 3883 | + |
| 3884 | /* |
| 3885 | Copyright 2017 The Kubernetes Authors. |
| 3886 | |
| 3887 | @@ -18,6 +20,7 @@ package storage |
| 3888 | |
| 3889 | import ( |
| 3890 | "context" |
| 3891 | + |
| 3892 | "github.com/onsi/ginkgo" |
| 3893 | v1 "k8s.io/api/core/v1" |
| 3894 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" |
| 3895 | diff --git a/test/e2e/storage/regional_pd.go b/test/e2e/storage/regional_pd.go |
| 3896 | index 7763afaf6b1..a042dcc9d4a 100644 |
| 3897 | --- a/test/e2e/storage/regional_pd.go |
| 3898 | +++ b/test/e2e/storage/regional_pd.go |
| 3899 | @@ -1,3 +1,5 @@ |
| 3900 | +// +build !providerless |
| 3901 | + |
| 3902 | /* |
| 3903 | Copyright 2016 The Kubernetes Authors. |
| 3904 | |
| 3905 | @@ -18,6 +20,7 @@ package storage |
| 3906 | |
| 3907 | import ( |
| 3908 | "context" |
| 3909 | + |
| 3910 | "github.com/onsi/ginkgo" |
| 3911 | "github.com/onsi/gomega" |
| 3912 | |
| 3913 | diff --git a/test/e2e/storage/volume_provisioning.go b/test/e2e/storage/volume_provisioning.go |
| 3914 | index a8b494eb3ac..c070a81283c 100644 |
| 3915 | --- a/test/e2e/storage/volume_provisioning.go |
| 3916 | +++ b/test/e2e/storage/volume_provisioning.go |
| 3917 | @@ -24,11 +24,6 @@ import ( |
| 3918 | "time" |
| 3919 | |
| 3920 | "github.com/onsi/ginkgo" |
| 3921 | - "github.com/onsi/gomega" |
| 3922 | - |
| 3923 | - "github.com/aws/aws-sdk-go/aws" |
| 3924 | - "github.com/aws/aws-sdk-go/aws/session" |
| 3925 | - "github.com/aws/aws-sdk-go/service/ec2" |
| 3926 | |
| 3927 | v1 "k8s.io/api/core/v1" |
| 3928 | rbacv1 "k8s.io/api/rbac/v1" |
| 3929 | @@ -37,9 +32,7 @@ import ( |
| 3930 | apierrors "k8s.io/apimachinery/pkg/api/errors" |
| 3931 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" |
| 3932 | "k8s.io/apimachinery/pkg/runtime/schema" |
| 3933 | - "k8s.io/apimachinery/pkg/types" |
| 3934 | "k8s.io/apimachinery/pkg/util/rand" |
| 3935 | - "k8s.io/apimachinery/pkg/util/sets" |
| 3936 | "k8s.io/apimachinery/pkg/util/wait" |
| 3937 | "k8s.io/apiserver/pkg/authentication/serviceaccount" |
| 3938 | clientset "k8s.io/client-go/kubernetes" |
| 3939 | @@ -48,7 +41,6 @@ import ( |
| 3940 | e2eauth "k8s.io/kubernetes/test/e2e/framework/auth" |
| 3941 | e2enode "k8s.io/kubernetes/test/e2e/framework/node" |
| 3942 | e2epod "k8s.io/kubernetes/test/e2e/framework/pod" |
| 3943 | - "k8s.io/kubernetes/test/e2e/framework/providers/gce" |
| 3944 | e2epv "k8s.io/kubernetes/test/e2e/framework/pv" |
| 3945 | e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" |
| 3946 | "k8s.io/kubernetes/test/e2e/storage/testsuites" |
| 3947 | @@ -61,80 +53,6 @@ const ( |
| 3948 | externalPluginName = "example.com/nfs" |
| 3949 | ) |
| 3950 | |
| 3951 | -// checkAWSEBS checks properties of an AWS EBS. Test framework does not |
| 3952 | -// instantiate full AWS provider, therefore we need use ec2 API directly. |
| 3953 | -func checkAWSEBS(volume *v1.PersistentVolume, volumeType string, encrypted bool) error { |
| 3954 | - diskName := volume.Spec.AWSElasticBlockStore.VolumeID |
| 3955 | - |
| 3956 | - var client *ec2.EC2 |
| 3957 | - |
| 3958 | - tokens := strings.Split(diskName, "/") |
| 3959 | - volumeID := tokens[len(tokens)-1] |
| 3960 | - |
| 3961 | - zone := framework.TestContext.CloudConfig.Zone |
| 3962 | - |
| 3963 | - awsSession, err := session.NewSession() |
| 3964 | - if err != nil { |
| 3965 | - return fmt.Errorf("error creating session: %v", err) |
| 3966 | - } |
| 3967 | - |
| 3968 | - if len(zone) > 0 { |
| 3969 | - region := zone[:len(zone)-1] |
| 3970 | - cfg := aws.Config{Region: ®ion} |
| 3971 | - framework.Logf("using region %s", region) |
| 3972 | - client = ec2.New(awsSession, &cfg) |
| 3973 | - } else { |
| 3974 | - framework.Logf("no region configured") |
| 3975 | - client = ec2.New(awsSession) |
| 3976 | - } |
| 3977 | - |
| 3978 | - request := &ec2.DescribeVolumesInput{ |
| 3979 | - VolumeIds: []*string{&volumeID}, |
| 3980 | - } |
| 3981 | - info, err := client.DescribeVolumes(request) |
| 3982 | - if err != nil { |
| 3983 | - return fmt.Errorf("error querying ec2 for volume %q: %v", volumeID, err) |
| 3984 | - } |
| 3985 | - if len(info.Volumes) == 0 { |
| 3986 | - return fmt.Errorf("no volumes found for volume %q", volumeID) |
| 3987 | - } |
| 3988 | - if len(info.Volumes) > 1 { |
| 3989 | - return fmt.Errorf("multiple volumes found for volume %q", volumeID) |
| 3990 | - } |
| 3991 | - |
| 3992 | - awsVolume := info.Volumes[0] |
| 3993 | - if awsVolume.VolumeType == nil { |
| 3994 | - return fmt.Errorf("expected volume type %q, got nil", volumeType) |
| 3995 | - } |
| 3996 | - if *awsVolume.VolumeType != volumeType { |
| 3997 | - return fmt.Errorf("expected volume type %q, got %q", volumeType, *awsVolume.VolumeType) |
| 3998 | - } |
| 3999 | - if encrypted && awsVolume.Encrypted == nil { |
| 4000 | - return fmt.Errorf("expected encrypted volume, got no encryption") |
| 4001 | - } |
| 4002 | - if encrypted && !*awsVolume.Encrypted { |
| 4003 | - return fmt.Errorf("expected encrypted volume, got %v", *awsVolume.Encrypted) |
| 4004 | - } |
| 4005 | - return nil |
| 4006 | -} |
| 4007 | - |
| 4008 | -func checkGCEPD(volume *v1.PersistentVolume, volumeType string) error { |
| 4009 | - cloud, err := gce.GetGCECloud() |
| 4010 | - if err != nil { |
| 4011 | - return err |
| 4012 | - } |
| 4013 | - diskName := volume.Spec.GCEPersistentDisk.PDName |
| 4014 | - disk, err := cloud.GetDiskByNameUnknownZone(diskName) |
| 4015 | - if err != nil { |
| 4016 | - return err |
| 4017 | - } |
| 4018 | - |
| 4019 | - if !strings.HasSuffix(disk.Type, volumeType) { |
| 4020 | - return fmt.Errorf("unexpected disk type %q, expected suffix %q", disk.Type, volumeType) |
| 4021 | - } |
| 4022 | - return nil |
| 4023 | -} |
| 4024 | - |
| 4025 | var _ = utils.SIGDescribe("Dynamic Provisioning", func() { |
| 4026 | f := framework.NewDefaultFramework("volume-provisioning") |
| 4027 | |
| 4028 | @@ -147,451 +65,6 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { |
| 4029 | ns = f.Namespace.Name |
| 4030 | }) |
| 4031 | |
| 4032 | - ginkgo.Describe("DynamicProvisioner [Slow]", func() { |
| 4033 | - ginkgo.It("should provision storage with different parameters", func() { |
| 4034 | - |
| 4035 | - // This test checks that dynamic provisioning can provision a volume |
| 4036 | - // that can be used to persist data among pods. |
| 4037 | - tests := []testsuites.StorageClassTest{ |
| 4038 | - // GCE/GKE |
| 4039 | - { |
| 4040 | - Name: "SSD PD on GCE/GKE", |
| 4041 | - CloudProviders: []string{"gce", "gke"}, |
| 4042 | - Provisioner: "kubernetes.io/gce-pd", |
| 4043 | - Parameters: map[string]string{ |
| 4044 | - "type": "pd-ssd", |
| 4045 | - "zone": getRandomClusterZone(c), |
| 4046 | - }, |
| 4047 | - ClaimSize: "1.5Gi", |
| 4048 | - ExpectedSize: "2Gi", |
| 4049 | - PvCheck: func(claim *v1.PersistentVolumeClaim) { |
| 4050 | - volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{}) |
| 4051 | - gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") |
| 4052 | - |
| 4053 | - err := checkGCEPD(volume, "pd-ssd") |
| 4054 | - framework.ExpectNoError(err, "checkGCEPD pd-ssd") |
| 4055 | - }, |
| 4056 | - }, |
| 4057 | - { |
| 4058 | - Name: "HDD PD on GCE/GKE", |
| 4059 | - CloudProviders: []string{"gce", "gke"}, |
| 4060 | - Provisioner: "kubernetes.io/gce-pd", |
| 4061 | - Parameters: map[string]string{ |
| 4062 | - "type": "pd-standard", |
| 4063 | - }, |
| 4064 | - ClaimSize: "1.5Gi", |
| 4065 | - ExpectedSize: "2Gi", |
| 4066 | - PvCheck: func(claim *v1.PersistentVolumeClaim) { |
| 4067 | - volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{}) |
| 4068 | - gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") |
| 4069 | - |
| 4070 | - err := checkGCEPD(volume, "pd-standard") |
| 4071 | - framework.ExpectNoError(err, "checkGCEPD pd-standard") |
| 4072 | - }, |
| 4073 | - }, |
| 4074 | - // AWS |
| 4075 | - { |
| 4076 | - Name: "gp2 EBS on AWS", |
| 4077 | - CloudProviders: []string{"aws"}, |
| 4078 | - Provisioner: "kubernetes.io/aws-ebs", |
| 4079 | - Parameters: map[string]string{ |
| 4080 | - "type": "gp2", |
| 4081 | - "zone": getRandomClusterZone(c), |
| 4082 | - }, |
| 4083 | - ClaimSize: "1.5Gi", |
| 4084 | - ExpectedSize: "2Gi", |
| 4085 | - PvCheck: func(claim *v1.PersistentVolumeClaim) { |
| 4086 | - volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{}) |
| 4087 | - gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") |
| 4088 | - |
| 4089 | - err := checkAWSEBS(volume, "gp2", false) |
| 4090 | - framework.ExpectNoError(err, "checkAWSEBS gp2") |
| 4091 | - }, |
| 4092 | - }, |
| 4093 | - { |
| 4094 | - Name: "io1 EBS on AWS", |
| 4095 | - CloudProviders: []string{"aws"}, |
| 4096 | - Provisioner: "kubernetes.io/aws-ebs", |
| 4097 | - Parameters: map[string]string{ |
| 4098 | - "type": "io1", |
| 4099 | - "iopsPerGB": "50", |
| 4100 | - }, |
| 4101 | - ClaimSize: "3.5Gi", |
| 4102 | - ExpectedSize: "4Gi", // 4 GiB is minimum for io1 |
| 4103 | - PvCheck: func(claim *v1.PersistentVolumeClaim) { |
| 4104 | - volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{}) |
| 4105 | - gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") |
| 4106 | - |
| 4107 | - err := checkAWSEBS(volume, "io1", false) |
| 4108 | - framework.ExpectNoError(err, "checkAWSEBS io1") |
| 4109 | - }, |
| 4110 | - }, |
| 4111 | - { |
| 4112 | - Name: "sc1 EBS on AWS", |
| 4113 | - CloudProviders: []string{"aws"}, |
| 4114 | - Provisioner: "kubernetes.io/aws-ebs", |
| 4115 | - Parameters: map[string]string{ |
| 4116 | - "type": "sc1", |
| 4117 | - }, |
| 4118 | - ClaimSize: "500Gi", // minimum for sc1 |
| 4119 | - ExpectedSize: "500Gi", |
| 4120 | - PvCheck: func(claim *v1.PersistentVolumeClaim) { |
| 4121 | - volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{}) |
| 4122 | - gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") |
| 4123 | - |
| 4124 | - err := checkAWSEBS(volume, "sc1", false) |
| 4125 | - framework.ExpectNoError(err, "checkAWSEBS sc1") |
| 4126 | - }, |
| 4127 | - }, |
| 4128 | - { |
| 4129 | - Name: "st1 EBS on AWS", |
| 4130 | - CloudProviders: []string{"aws"}, |
| 4131 | - Provisioner: "kubernetes.io/aws-ebs", |
| 4132 | - Parameters: map[string]string{ |
| 4133 | - "type": "st1", |
| 4134 | - }, |
| 4135 | - ClaimSize: "500Gi", // minimum for st1 |
| 4136 | - ExpectedSize: "500Gi", |
| 4137 | - PvCheck: func(claim *v1.PersistentVolumeClaim) { |
| 4138 | - volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{}) |
| 4139 | - gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") |
| 4140 | - |
| 4141 | - err := checkAWSEBS(volume, "st1", false) |
| 4142 | - framework.ExpectNoError(err, "checkAWSEBS st1") |
| 4143 | - }, |
| 4144 | - }, |
| 4145 | - { |
| 4146 | - Name: "encrypted EBS on AWS", |
| 4147 | - CloudProviders: []string{"aws"}, |
| 4148 | - Provisioner: "kubernetes.io/aws-ebs", |
| 4149 | - Parameters: map[string]string{ |
| 4150 | - "encrypted": "true", |
| 4151 | - }, |
| 4152 | - ClaimSize: "1Gi", |
| 4153 | - ExpectedSize: "1Gi", |
| 4154 | - PvCheck: func(claim *v1.PersistentVolumeClaim) { |
| 4155 | - volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{}) |
| 4156 | - gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") |
| 4157 | - |
| 4158 | - err := checkAWSEBS(volume, "gp2", true) |
| 4159 | - framework.ExpectNoError(err, "checkAWSEBS gp2 encrypted") |
| 4160 | - }, |
| 4161 | - }, |
| 4162 | - // OpenStack generic tests (works on all OpenStack deployments) |
| 4163 | - { |
| 4164 | - Name: "generic Cinder volume on OpenStack", |
| 4165 | - CloudProviders: []string{"openstack"}, |
| 4166 | - Provisioner: "kubernetes.io/cinder", |
| 4167 | - Parameters: map[string]string{}, |
| 4168 | - ClaimSize: "1.5Gi", |
| 4169 | - ExpectedSize: "2Gi", |
| 4170 | - PvCheck: func(claim *v1.PersistentVolumeClaim) { |
| 4171 | - testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{}) |
| 4172 | - }, |
| 4173 | - }, |
| 4174 | - { |
| 4175 | - Name: "Cinder volume with empty volume type and zone on OpenStack", |
| 4176 | - CloudProviders: []string{"openstack"}, |
| 4177 | - Provisioner: "kubernetes.io/cinder", |
| 4178 | - Parameters: map[string]string{ |
| 4179 | - "type": "", |
| 4180 | - "availability": "", |
| 4181 | - }, |
| 4182 | - ClaimSize: "1.5Gi", |
| 4183 | - ExpectedSize: "2Gi", |
| 4184 | - PvCheck: func(claim *v1.PersistentVolumeClaim) { |
| 4185 | - testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{}) |
| 4186 | - }, |
| 4187 | - }, |
| 4188 | - // vSphere generic test |
| 4189 | - { |
| 4190 | - Name: "generic vSphere volume", |
| 4191 | - CloudProviders: []string{"vsphere"}, |
| 4192 | - Provisioner: "kubernetes.io/vsphere-volume", |
| 4193 | - Parameters: map[string]string{}, |
| 4194 | - ClaimSize: "1.5Gi", |
| 4195 | - ExpectedSize: "1.5Gi", |
| 4196 | - PvCheck: func(claim *v1.PersistentVolumeClaim) { |
| 4197 | - testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{}) |
| 4198 | - }, |
| 4199 | - }, |
| 4200 | - // Azure |
| 4201 | - { |
| 4202 | - Name: "Azure disk volume with empty sku and location", |
| 4203 | - CloudProviders: []string{"azure"}, |
| 4204 | - Provisioner: "kubernetes.io/azure-disk", |
| 4205 | - Parameters: map[string]string{}, |
| 4206 | - ClaimSize: "1Gi", |
| 4207 | - ExpectedSize: "1Gi", |
| 4208 | - PvCheck: func(claim *v1.PersistentVolumeClaim) { |
| 4209 | - testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{}) |
| 4210 | - }, |
| 4211 | - }, |
| 4212 | - } |
| 4213 | - |
| 4214 | - var betaTest *testsuites.StorageClassTest |
| 4215 | - for i, t := range tests { |
| 4216 | - // Beware of clojure, use local variables instead of those from |
| 4217 | - // outer scope |
| 4218 | - test := t |
| 4219 | - |
| 4220 | - if !framework.ProviderIs(test.CloudProviders...) { |
| 4221 | - framework.Logf("Skipping %q: cloud providers is not %v", test.Name, test.CloudProviders) |
| 4222 | - continue |
| 4223 | - } |
| 4224 | - |
| 4225 | - // Remember the last supported test for subsequent test of beta API |
| 4226 | - betaTest = &test |
| 4227 | - |
| 4228 | - ginkgo.By("Testing " + test.Name) |
| 4229 | - suffix := fmt.Sprintf("%d", i) |
| 4230 | - test.Client = c |
| 4231 | - test.Class = newStorageClass(test, ns, suffix) |
| 4232 | - test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ |
| 4233 | - ClaimSize: test.ClaimSize, |
| 4234 | - StorageClassName: &test.Class.Name, |
| 4235 | - VolumeMode: &test.VolumeMode, |
| 4236 | - }, ns) |
| 4237 | - test.TestDynamicProvisioning() |
| 4238 | - } |
| 4239 | - |
| 4240 | - // Run the last test with storage.k8s.io/v1beta1 on pvc |
| 4241 | - if betaTest != nil { |
| 4242 | - ginkgo.By("Testing " + betaTest.Name + " with beta volume provisioning") |
| 4243 | - class := newBetaStorageClass(*betaTest, "beta") |
| 4244 | - // we need to create the class manually, testDynamicProvisioning does not accept beta class |
| 4245 | - class, err := c.StorageV1beta1().StorageClasses().Create(context.TODO(), class, metav1.CreateOptions{}) |
| 4246 | - framework.ExpectNoError(err) |
| 4247 | - defer deleteStorageClass(c, class.Name) |
| 4248 | - |
| 4249 | - betaTest.Client = c |
| 4250 | - betaTest.Class = nil |
| 4251 | - betaTest.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ |
| 4252 | - ClaimSize: betaTest.ClaimSize, |
| 4253 | - StorageClassName: &class.Name, |
| 4254 | - VolumeMode: &betaTest.VolumeMode, |
| 4255 | - }, ns) |
| 4256 | - betaTest.Claim.Spec.StorageClassName = &(class.Name) |
| 4257 | - (*betaTest).TestDynamicProvisioning() |
| 4258 | - } |
| 4259 | - }) |
| 4260 | - |
| 4261 | - ginkgo.It("should provision storage with non-default reclaim policy Retain", func() { |
| 4262 | - e2eskipper.SkipUnlessProviderIs("gce", "gke") |
| 4263 | - |
| 4264 | - test := testsuites.StorageClassTest{ |
| 4265 | - Client: c, |
| 4266 | - Name: "HDD PD on GCE/GKE", |
| 4267 | - CloudProviders: []string{"gce", "gke"}, |
| 4268 | - Provisioner: "kubernetes.io/gce-pd", |
| 4269 | - Parameters: map[string]string{ |
| 4270 | - "type": "pd-standard", |
| 4271 | - }, |
| 4272 | - ClaimSize: "1Gi", |
| 4273 | - ExpectedSize: "1Gi", |
| 4274 | - PvCheck: func(claim *v1.PersistentVolumeClaim) { |
| 4275 | - volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{}) |
| 4276 | - gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") |
| 4277 | - |
| 4278 | - err := checkGCEPD(volume, "pd-standard") |
| 4279 | - framework.ExpectNoError(err, "checkGCEPD") |
| 4280 | - }, |
| 4281 | - } |
| 4282 | - test.Class = newStorageClass(test, ns, "reclaimpolicy") |
| 4283 | - retain := v1.PersistentVolumeReclaimRetain |
| 4284 | - test.Class.ReclaimPolicy = &retain |
| 4285 | - test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ |
| 4286 | - ClaimSize: test.ClaimSize, |
| 4287 | - StorageClassName: &test.Class.Name, |
| 4288 | - VolumeMode: &test.VolumeMode, |
| 4289 | - }, ns) |
| 4290 | - pv := test.TestDynamicProvisioning() |
| 4291 | - |
| 4292 | - ginkgo.By(fmt.Sprintf("waiting for the provisioned PV %q to enter phase %s", pv.Name, v1.VolumeReleased)) |
| 4293 | - framework.ExpectNoError(e2epv.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 1*time.Second, 30*time.Second)) |
| 4294 | - |
| 4295 | - ginkgo.By(fmt.Sprintf("deleting the storage asset backing the PV %q", pv.Name)) |
| 4296 | - framework.ExpectNoError(e2epv.DeletePDWithRetry(pv.Spec.GCEPersistentDisk.PDName)) |
| 4297 | - |
| 4298 | - ginkgo.By(fmt.Sprintf("deleting the PV %q", pv.Name)) |
| 4299 | - framework.ExpectNoError(e2epv.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name) |
| 4300 | - framework.ExpectNoError(e2epv.WaitForPersistentVolumeDeleted(c, pv.Name, 1*time.Second, 30*time.Second)) |
| 4301 | - }) |
| 4302 | - |
| 4303 | - ginkgo.It("should not provision a volume in an unmanaged GCE zone.", func() { |
| 4304 | - e2eskipper.SkipUnlessProviderIs("gce", "gke") |
| 4305 | - var suffix string = "unmananged" |
| 4306 | - |
| 4307 | - ginkgo.By("Discovering an unmanaged zone") |
| 4308 | - allZones := sets.NewString() // all zones in the project |
| 4309 | - |
| 4310 | - gceCloud, err := gce.GetGCECloud() |
| 4311 | - framework.ExpectNoError(err) |
| 4312 | - |
| 4313 | - // Get all k8s managed zones (same as zones with nodes in them for test) |
| 4314 | - managedZones, err := gceCloud.GetAllZonesFromCloudProvider() |
| 4315 | - framework.ExpectNoError(err) |
| 4316 | - |
| 4317 | - // Get a list of all zones in the project |
| 4318 | - zones, err := gceCloud.ComputeServices().GA.Zones.List(framework.TestContext.CloudConfig.ProjectID).Do() |
| 4319 | - framework.ExpectNoError(err) |
| 4320 | - for _, z := range zones.Items { |
| 4321 | - allZones.Insert(z.Name) |
| 4322 | - } |
| 4323 | - |
| 4324 | - // Get the subset of zones not managed by k8s |
| 4325 | - var unmanagedZone string |
| 4326 | - var popped bool |
| 4327 | - unmanagedZones := allZones.Difference(managedZones) |
| 4328 | - // And select one of them at random. |
| 4329 | - if unmanagedZone, popped = unmanagedZones.PopAny(); !popped { |
| 4330 | - e2eskipper.Skipf("No unmanaged zones found.") |
| 4331 | - } |
| 4332 | - |
| 4333 | - ginkgo.By("Creating a StorageClass for the unmanaged zone") |
| 4334 | - test := testsuites.StorageClassTest{ |
| 4335 | - Name: "unmanaged_zone", |
| 4336 | - Provisioner: "kubernetes.io/gce-pd", |
| 4337 | - Parameters: map[string]string{"zone": unmanagedZone}, |
| 4338 | - ClaimSize: "1Gi", |
| 4339 | - } |
| 4340 | - sc := newStorageClass(test, ns, suffix) |
| 4341 | - sc, err = c.StorageV1().StorageClasses().Create(context.TODO(), sc, metav1.CreateOptions{}) |
| 4342 | - framework.ExpectNoError(err) |
| 4343 | - defer deleteStorageClass(c, sc.Name) |
| 4344 | - |
| 4345 | - ginkgo.By("Creating a claim and expecting it to timeout") |
| 4346 | - pvc := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ |
| 4347 | - ClaimSize: test.ClaimSize, |
| 4348 | - StorageClassName: &sc.Name, |
| 4349 | - VolumeMode: &test.VolumeMode, |
| 4350 | - }, ns) |
| 4351 | - pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), pvc, metav1.CreateOptions{}) |
| 4352 | - framework.ExpectNoError(err) |
| 4353 | - defer func() { |
| 4354 | - framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name) |
| 4355 | - }() |
| 4356 | - |
| 4357 | - // The claim should timeout phase:Pending |
| 4358 | - err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 2*time.Second, framework.ClaimProvisionShortTimeout) |
| 4359 | - framework.ExpectError(err) |
| 4360 | - framework.Logf(err.Error()) |
| 4361 | - }) |
| 4362 | - |
| 4363 | - ginkgo.It("should test that deleting a claim before the volume is provisioned deletes the volume.", func() { |
| 4364 | - // This case tests for the regressions of a bug fixed by PR #21268 |
| 4365 | - // REGRESSION: Deleting the PVC before the PV is provisioned can result in the PV |
| 4366 | - // not being deleted. |
| 4367 | - // NOTE: Polls until no PVs are detected, times out at 5 minutes. |
| 4368 | - |
| 4369 | - e2eskipper.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure") |
| 4370 | - |
| 4371 | - const raceAttempts int = 100 |
| 4372 | - var residualPVs []*v1.PersistentVolume |
| 4373 | - ginkgo.By(fmt.Sprintf("Creating and deleting PersistentVolumeClaims %d times", raceAttempts)) |
| 4374 | - test := testsuites.StorageClassTest{ |
| 4375 | - Name: "deletion race", |
| 4376 | - Provisioner: "", // Use a native one based on current cloud provider |
| 4377 | - ClaimSize: "1Gi", |
| 4378 | - } |
| 4379 | - |
| 4380 | - class := newStorageClass(test, ns, "race") |
| 4381 | - class, err := c.StorageV1().StorageClasses().Create(context.TODO(), class, metav1.CreateOptions{}) |
| 4382 | - framework.ExpectNoError(err) |
| 4383 | - defer deleteStorageClass(c, class.Name) |
| 4384 | - |
| 4385 | - // To increase chance of detection, attempt multiple iterations |
| 4386 | - for i := 0; i < raceAttempts; i++ { |
| 4387 | - prefix := fmt.Sprintf("race-%d", i) |
| 4388 | - claim := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ |
| 4389 | - NamePrefix: prefix, |
| 4390 | - ClaimSize: test.ClaimSize, |
| 4391 | - StorageClassName: &class.Name, |
| 4392 | - VolumeMode: &test.VolumeMode, |
| 4393 | - }, ns) |
| 4394 | - tmpClaim, err := e2epv.CreatePVC(c, ns, claim) |
| 4395 | - framework.ExpectNoError(err) |
| 4396 | - framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, tmpClaim.Name, ns)) |
| 4397 | - } |
| 4398 | - |
| 4399 | - ginkgo.By(fmt.Sprintf("Checking for residual PersistentVolumes associated with StorageClass %s", class.Name)) |
| 4400 | - residualPVs, err = waitForProvisionedVolumesDeleted(c, class.Name) |
| 4401 | - // Cleanup the test resources before breaking |
| 4402 | - defer deleteProvisionedVolumesAndDisks(c, residualPVs) |
| 4403 | - framework.ExpectNoError(err, "PersistentVolumes were not deleted as expected. %d remain", len(residualPVs)) |
| 4404 | - |
| 4405 | - framework.Logf("0 PersistentVolumes remain.") |
| 4406 | - }) |
| 4407 | - |
| 4408 | - ginkgo.It("deletion should be idempotent", func() { |
| 4409 | - // This test ensures that deletion of a volume is idempotent. |
| 4410 | - // It creates a PV with Retain policy, deletes underlying AWS / GCE |
| 4411 | - // volume and changes the reclaim policy to Delete. |
| 4412 | - // PV controller should delete the PV even though the underlying volume |
| 4413 | - // is already deleted. |
| 4414 | - e2eskipper.SkipUnlessProviderIs("gce", "gke", "aws") |
| 4415 | - ginkgo.By("creating PD") |
| 4416 | - diskName, err := e2epv.CreatePDWithRetry() |
| 4417 | - framework.ExpectNoError(err) |
| 4418 | - |
| 4419 | - ginkgo.By("creating PV") |
| 4420 | - pv := e2epv.MakePersistentVolume(e2epv.PersistentVolumeConfig{ |
| 4421 | - NamePrefix: "volume-idempotent-delete-", |
| 4422 | - // Use Retain to keep the PV, the test will change it to Delete |
| 4423 | - // when the time comes. |
| 4424 | - ReclaimPolicy: v1.PersistentVolumeReclaimRetain, |
| 4425 | - AccessModes: []v1.PersistentVolumeAccessMode{ |
| 4426 | - v1.ReadWriteOnce, |
| 4427 | - }, |
| 4428 | - Capacity: "1Gi", |
| 4429 | - // PV is bound to non-existing PVC, so it's reclaim policy is |
| 4430 | - // executed immediately |
| 4431 | - Prebind: &v1.PersistentVolumeClaim{ |
| 4432 | - ObjectMeta: metav1.ObjectMeta{ |
| 4433 | - Name: "dummy-claim-name", |
| 4434 | - Namespace: ns, |
| 4435 | - UID: types.UID("01234567890"), |
| 4436 | - }, |
| 4437 | - }, |
| 4438 | - }) |
| 4439 | - switch framework.TestContext.Provider { |
| 4440 | - case "aws": |
| 4441 | - pv.Spec.PersistentVolumeSource = v1.PersistentVolumeSource{ |
| 4442 | - AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{ |
| 4443 | - VolumeID: diskName, |
| 4444 | - }, |
| 4445 | - } |
| 4446 | - case "gce", "gke": |
| 4447 | - pv.Spec.PersistentVolumeSource = v1.PersistentVolumeSource{ |
| 4448 | - GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ |
| 4449 | - PDName: diskName, |
| 4450 | - }, |
| 4451 | - } |
| 4452 | - } |
| 4453 | - pv, err = c.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{}) |
| 4454 | - framework.ExpectNoError(err) |
| 4455 | - |
| 4456 | - ginkgo.By("waiting for the PV to get Released") |
| 4457 | - err = e2epv.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 2*time.Second, e2epv.PVReclaimingTimeout) |
| 4458 | - framework.ExpectNoError(err) |
| 4459 | - |
| 4460 | - ginkgo.By("deleting the PD") |
| 4461 | - err = e2epv.DeletePVSource(&pv.Spec.PersistentVolumeSource) |
| 4462 | - framework.ExpectNoError(err) |
| 4463 | - |
| 4464 | - ginkgo.By("changing the PV reclaim policy") |
| 4465 | - pv, err = c.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{}) |
| 4466 | - framework.ExpectNoError(err) |
| 4467 | - pv.Spec.PersistentVolumeReclaimPolicy = v1.PersistentVolumeReclaimDelete |
| 4468 | - pv, err = c.CoreV1().PersistentVolumes().Update(context.TODO(), pv, metav1.UpdateOptions{}) |
| 4469 | - framework.ExpectNoError(err) |
| 4470 | - |
| 4471 | - ginkgo.By("waiting for the PV to get deleted") |
| 4472 | - err = e2epv.WaitForPersistentVolumeDeleted(c, pv.Name, 5*time.Second, e2epv.PVDeletingTimeout) |
| 4473 | - framework.ExpectNoError(err) |
| 4474 | - }) |
| 4475 | - }) |
| 4476 | - |
| 4477 | ginkgo.Describe("DynamicProvisioner External", func() { |
| 4478 | ginkgo.It("should let an external dynamic provisioner create and delete persistent volumes [Slow]", func() { |
| 4479 | // external dynamic provisioner pods need additional permissions provided by the |
| 4480 | diff --git a/test/e2e/storage/volume_provisioning_providers.go b/test/e2e/storage/volume_provisioning_providers.go |
| 4481 | new file mode 100644 |
| 4482 | index 00000000000..932c644af7a |
| 4483 | --- /dev/null |
| 4484 | +++ b/test/e2e/storage/volume_provisioning_providers.go |
| 4485 | @@ -0,0 +1,577 @@ |
| 4486 | +// +build !providerless |
| 4487 | + |
| 4488 | +/* |
| 4489 | +Copyright 2016 The Kubernetes Authors. |
| 4490 | + |
| 4491 | +Licensed under the Apache License, Version 2.0 (the "License"); |
| 4492 | +you may not use this file except in compliance with the License. |
| 4493 | +You may obtain a copy of the License at |
| 4494 | + |
| 4495 | + http://www.apache.org/licenses/LICENSE-2.0 |
| 4496 | + |
| 4497 | +Unless required by applicable law or agreed to in writing, software |
| 4498 | +distributed under the License is distributed on an "AS IS" BASIS, |
| 4499 | +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 4500 | +See the License for the specific language governing permissions and |
| 4501 | +limitations under the License. |
| 4502 | +*/ |
| 4503 | + |
| 4504 | +package storage |
| 4505 | + |
| 4506 | +import ( |
| 4507 | + "context" |
| 4508 | + "fmt" |
| 4509 | + "strings" |
| 4510 | + "time" |
| 4511 | + |
| 4512 | + "github.com/aws/aws-sdk-go/aws" |
| 4513 | + "github.com/aws/aws-sdk-go/aws/session" |
| 4514 | + "github.com/aws/aws-sdk-go/service/ec2" |
| 4515 | + "github.com/onsi/ginkgo" |
| 4516 | + "github.com/onsi/gomega" |
| 4517 | + |
| 4518 | + v1 "k8s.io/api/core/v1" |
| 4519 | + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" |
| 4520 | + "k8s.io/apimachinery/pkg/types" |
| 4521 | + "k8s.io/apimachinery/pkg/util/sets" |
| 4522 | + clientset "k8s.io/client-go/kubernetes" |
| 4523 | + "k8s.io/kubernetes/test/e2e/framework" |
| 4524 | + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" |
| 4525 | + "k8s.io/kubernetes/test/e2e/framework/providers/gce" |
| 4526 | + e2epv "k8s.io/kubernetes/test/e2e/framework/pv" |
| 4527 | + e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" |
| 4528 | + "k8s.io/kubernetes/test/e2e/storage/testsuites" |
| 4529 | + "k8s.io/kubernetes/test/e2e/storage/utils" |
| 4530 | +) |
| 4531 | + |
| 4532 | +// checkAWSEBS checks properties of an AWS EBS. Test framework does not |
| 4533 | +// instantiate full AWS provider, therefore we need use ec2 API directly. |
| 4534 | +func checkAWSEBS(volume *v1.PersistentVolume, volumeType string, encrypted bool) error { |
| 4535 | + diskName := volume.Spec.AWSElasticBlockStore.VolumeID |
| 4536 | + |
| 4537 | + var client *ec2.EC2 |
| 4538 | + |
| 4539 | + tokens := strings.Split(diskName, "/") |
| 4540 | + volumeID := tokens[len(tokens)-1] |
| 4541 | + |
| 4542 | + zone := framework.TestContext.CloudConfig.Zone |
| 4543 | + |
| 4544 | + awsSession, err := session.NewSession() |
| 4545 | + if err != nil { |
| 4546 | + return fmt.Errorf("error creating session: %v", err) |
| 4547 | + } |
| 4548 | + |
| 4549 | + if len(zone) > 0 { |
| 4550 | + region := zone[:len(zone)-1] |
| 4551 | + cfg := aws.Config{Region: ®ion} |
| 4552 | + framework.Logf("using region %s", region) |
| 4553 | + client = ec2.New(awsSession, &cfg) |
| 4554 | + } else { |
| 4555 | + framework.Logf("no region configured") |
| 4556 | + client = ec2.New(awsSession) |
| 4557 | + } |
| 4558 | + |
| 4559 | + request := &ec2.DescribeVolumesInput{ |
| 4560 | + VolumeIds: []*string{&volumeID}, |
| 4561 | + } |
| 4562 | + info, err := client.DescribeVolumes(request) |
| 4563 | + if err != nil { |
| 4564 | + return fmt.Errorf("error querying ec2 for volume %q: %v", volumeID, err) |
| 4565 | + } |
| 4566 | + if len(info.Volumes) == 0 { |
| 4567 | + return fmt.Errorf("no volumes found for volume %q", volumeID) |
| 4568 | + } |
| 4569 | + if len(info.Volumes) > 1 { |
| 4570 | + return fmt.Errorf("multiple volumes found for volume %q", volumeID) |
| 4571 | + } |
| 4572 | + |
| 4573 | + awsVolume := info.Volumes[0] |
| 4574 | + if awsVolume.VolumeType == nil { |
| 4575 | + return fmt.Errorf("expected volume type %q, got nil", volumeType) |
| 4576 | + } |
| 4577 | + if *awsVolume.VolumeType != volumeType { |
| 4578 | + return fmt.Errorf("expected volume type %q, got %q", volumeType, *awsVolume.VolumeType) |
| 4579 | + } |
| 4580 | + if encrypted && awsVolume.Encrypted == nil { |
| 4581 | + return fmt.Errorf("expected encrypted volume, got no encryption") |
| 4582 | + } |
| 4583 | + if encrypted && !*awsVolume.Encrypted { |
| 4584 | + return fmt.Errorf("expected encrypted volume, got %v", *awsVolume.Encrypted) |
| 4585 | + } |
| 4586 | + return nil |
| 4587 | +} |
| 4588 | + |
| 4589 | +func checkGCEPD(volume *v1.PersistentVolume, volumeType string) error { |
| 4590 | + cloud, err := gce.GetGCECloud() |
| 4591 | + if err != nil { |
| 4592 | + return err |
| 4593 | + } |
| 4594 | + diskName := volume.Spec.GCEPersistentDisk.PDName |
| 4595 | + disk, err := cloud.GetDiskByNameUnknownZone(diskName) |
| 4596 | + if err != nil { |
| 4597 | + return err |
| 4598 | + } |
| 4599 | + |
| 4600 | + if !strings.HasSuffix(disk.Type, volumeType) { |
| 4601 | + return fmt.Errorf("unexpected disk type %q, expected suffix %q", disk.Type, volumeType) |
| 4602 | + } |
| 4603 | + return nil |
| 4604 | +} |
| 4605 | + |
| 4606 | +var _ = utils.SIGDescribe("Dynamic Provisioning with cloud providers", func() { |
| 4607 | + f := framework.NewDefaultFramework("volume-provisioning") |
| 4608 | + |
| 4609 | + // filled in BeforeEach |
| 4610 | + var c clientset.Interface |
| 4611 | + var ns string |
| 4612 | + |
| 4613 | + ginkgo.BeforeEach(func() { |
| 4614 | + c = f.ClientSet |
| 4615 | + ns = f.Namespace.Name |
| 4616 | + }) |
| 4617 | + |
| 4618 | + ginkgo.Describe("DynamicProvisioner [Slow]", func() { |
| 4619 | + ginkgo.It("should provision storage with different parameters", func() { |
| 4620 | + |
| 4621 | + // This test checks that dynamic provisioning can provision a volume |
| 4622 | + // that can be used to persist data among pods. |
| 4623 | + tests := []testsuites.StorageClassTest{ |
| 4624 | + // GCE/GKE |
| 4625 | + { |
| 4626 | + Name: "SSD PD on GCE/GKE", |
| 4627 | + CloudProviders: []string{"gce", "gke"}, |
| 4628 | + Provisioner: "kubernetes.io/gce-pd", |
| 4629 | + Parameters: map[string]string{ |
| 4630 | + "type": "pd-ssd", |
| 4631 | + "zone": getRandomClusterZone(c), |
| 4632 | + }, |
| 4633 | + ClaimSize: "1.5Gi", |
| 4634 | + ExpectedSize: "2Gi", |
| 4635 | + PvCheck: func(claim *v1.PersistentVolumeClaim) { |
| 4636 | + volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{}) |
| 4637 | + gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") |
| 4638 | + |
| 4639 | + err := checkGCEPD(volume, "pd-ssd") |
| 4640 | + framework.ExpectNoError(err, "checkGCEPD pd-ssd") |
| 4641 | + }, |
| 4642 | + }, |
| 4643 | + { |
| 4644 | + Name: "HDD PD on GCE/GKE", |
| 4645 | + CloudProviders: []string{"gce", "gke"}, |
| 4646 | + Provisioner: "kubernetes.io/gce-pd", |
| 4647 | + Parameters: map[string]string{ |
| 4648 | + "type": "pd-standard", |
| 4649 | + }, |
| 4650 | + ClaimSize: "1.5Gi", |
| 4651 | + ExpectedSize: "2Gi", |
| 4652 | + PvCheck: func(claim *v1.PersistentVolumeClaim) { |
| 4653 | + volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{}) |
| 4654 | + gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") |
| 4655 | + |
| 4656 | + err := checkGCEPD(volume, "pd-standard") |
| 4657 | + framework.ExpectNoError(err, "checkGCEPD pd-standard") |
| 4658 | + }, |
| 4659 | + }, |
| 4660 | + // AWS |
| 4661 | + { |
| 4662 | + Name: "gp2 EBS on AWS", |
| 4663 | + CloudProviders: []string{"aws"}, |
| 4664 | + Provisioner: "kubernetes.io/aws-ebs", |
| 4665 | + Parameters: map[string]string{ |
| 4666 | + "type": "gp2", |
| 4667 | + "zone": getRandomClusterZone(c), |
| 4668 | + }, |
| 4669 | + ClaimSize: "1.5Gi", |
| 4670 | + ExpectedSize: "2Gi", |
| 4671 | + PvCheck: func(claim *v1.PersistentVolumeClaim) { |
| 4672 | + volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{}) |
| 4673 | + gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") |
| 4674 | + |
| 4675 | + err := checkAWSEBS(volume, "gp2", false) |
| 4676 | + framework.ExpectNoError(err, "checkAWSEBS gp2") |
| 4677 | + }, |
| 4678 | + }, |
| 4679 | + { |
| 4680 | + Name: "io1 EBS on AWS", |
| 4681 | + CloudProviders: []string{"aws"}, |
| 4682 | + Provisioner: "kubernetes.io/aws-ebs", |
| 4683 | + Parameters: map[string]string{ |
| 4684 | + "type": "io1", |
| 4685 | + "iopsPerGB": "50", |
| 4686 | + }, |
| 4687 | + ClaimSize: "3.5Gi", |
| 4688 | + ExpectedSize: "4Gi", // 4 GiB is minimum for io1 |
| 4689 | + PvCheck: func(claim *v1.PersistentVolumeClaim) { |
| 4690 | + volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{}) |
| 4691 | + gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") |
| 4692 | + |
| 4693 | + err := checkAWSEBS(volume, "io1", false) |
| 4694 | + framework.ExpectNoError(err, "checkAWSEBS io1") |
| 4695 | + }, |
| 4696 | + }, |
| 4697 | + { |
| 4698 | + Name: "sc1 EBS on AWS", |
| 4699 | + CloudProviders: []string{"aws"}, |
| 4700 | + Provisioner: "kubernetes.io/aws-ebs", |
| 4701 | + Parameters: map[string]string{ |
| 4702 | + "type": "sc1", |
| 4703 | + }, |
| 4704 | + ClaimSize: "500Gi", // minimum for sc1 |
| 4705 | + ExpectedSize: "500Gi", |
| 4706 | + PvCheck: func(claim *v1.PersistentVolumeClaim) { |
| 4707 | + volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{}) |
| 4708 | + gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") |
| 4709 | + |
| 4710 | + err := checkAWSEBS(volume, "sc1", false) |
| 4711 | + framework.ExpectNoError(err, "checkAWSEBS sc1") |
| 4712 | + }, |
| 4713 | + }, |
| 4714 | + { |
| 4715 | + Name: "st1 EBS on AWS", |
| 4716 | + CloudProviders: []string{"aws"}, |
| 4717 | + Provisioner: "kubernetes.io/aws-ebs", |
| 4718 | + Parameters: map[string]string{ |
| 4719 | + "type": "st1", |
| 4720 | + }, |
| 4721 | + ClaimSize: "500Gi", // minimum for st1 |
| 4722 | + ExpectedSize: "500Gi", |
| 4723 | + PvCheck: func(claim *v1.PersistentVolumeClaim) { |
| 4724 | + volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{}) |
| 4725 | + gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") |
| 4726 | + |
| 4727 | + err := checkAWSEBS(volume, "st1", false) |
| 4728 | + framework.ExpectNoError(err, "checkAWSEBS st1") |
| 4729 | + }, |
| 4730 | + }, |
| 4731 | + { |
| 4732 | + Name: "encrypted EBS on AWS", |
| 4733 | + CloudProviders: []string{"aws"}, |
| 4734 | + Provisioner: "kubernetes.io/aws-ebs", |
| 4735 | + Parameters: map[string]string{ |
| 4736 | + "encrypted": "true", |
| 4737 | + }, |
| 4738 | + ClaimSize: "1Gi", |
| 4739 | + ExpectedSize: "1Gi", |
| 4740 | + PvCheck: func(claim *v1.PersistentVolumeClaim) { |
| 4741 | + volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{}) |
| 4742 | + gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") |
| 4743 | + |
| 4744 | + err := checkAWSEBS(volume, "gp2", true) |
| 4745 | + framework.ExpectNoError(err, "checkAWSEBS gp2 encrypted") |
| 4746 | + }, |
| 4747 | + }, |
| 4748 | + // OpenStack generic tests (works on all OpenStack deployments) |
| 4749 | + { |
| 4750 | + Name: "generic Cinder volume on OpenStack", |
| 4751 | + CloudProviders: []string{"openstack"}, |
| 4752 | + Provisioner: "kubernetes.io/cinder", |
| 4753 | + Parameters: map[string]string{}, |
| 4754 | + ClaimSize: "1.5Gi", |
| 4755 | + ExpectedSize: "2Gi", |
| 4756 | + PvCheck: func(claim *v1.PersistentVolumeClaim) { |
| 4757 | + testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{}) |
| 4758 | + }, |
| 4759 | + }, |
| 4760 | + { |
| 4761 | + Name: "Cinder volume with empty volume type and zone on OpenStack", |
| 4762 | + CloudProviders: []string{"openstack"}, |
| 4763 | + Provisioner: "kubernetes.io/cinder", |
| 4764 | + Parameters: map[string]string{ |
| 4765 | + "type": "", |
| 4766 | + "availability": "", |
| 4767 | + }, |
| 4768 | + ClaimSize: "1.5Gi", |
| 4769 | + ExpectedSize: "2Gi", |
| 4770 | + PvCheck: func(claim *v1.PersistentVolumeClaim) { |
| 4771 | + testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{}) |
| 4772 | + }, |
| 4773 | + }, |
| 4774 | + // vSphere generic test |
| 4775 | + { |
| 4776 | + Name: "generic vSphere volume", |
| 4777 | + CloudProviders: []string{"vsphere"}, |
| 4778 | + Provisioner: "kubernetes.io/vsphere-volume", |
| 4779 | + Parameters: map[string]string{}, |
| 4780 | + ClaimSize: "1.5Gi", |
| 4781 | + ExpectedSize: "1.5Gi", |
| 4782 | + PvCheck: func(claim *v1.PersistentVolumeClaim) { |
| 4783 | + testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{}) |
| 4784 | + }, |
| 4785 | + }, |
| 4786 | + // Azure |
| 4787 | + { |
| 4788 | + Name: "Azure disk volume with empty sku and location", |
| 4789 | + CloudProviders: []string{"azure"}, |
| 4790 | + Provisioner: "kubernetes.io/azure-disk", |
| 4791 | + Parameters: map[string]string{}, |
| 4792 | + ClaimSize: "1Gi", |
| 4793 | + ExpectedSize: "1Gi", |
| 4794 | + PvCheck: func(claim *v1.PersistentVolumeClaim) { |
| 4795 | + testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{}) |
| 4796 | + }, |
| 4797 | + }, |
| 4798 | + } |
| 4799 | + |
| 4800 | + var betaTest *testsuites.StorageClassTest |
| 4801 | + for i, t := range tests { |
| 4802 | + // Beware of clojure, use local variables instead of those from |
| 4803 | + // outer scope |
| 4804 | + test := t |
| 4805 | + |
| 4806 | + if !framework.ProviderIs(test.CloudProviders...) { |
| 4807 | + framework.Logf("Skipping %q: cloud providers is not %v", test.Name, test.CloudProviders) |
| 4808 | + continue |
| 4809 | + } |
| 4810 | + |
| 4811 | + // Remember the last supported test for subsequent test of beta API |
| 4812 | + betaTest = &test |
| 4813 | + |
| 4814 | + ginkgo.By("Testing " + test.Name) |
| 4815 | + suffix := fmt.Sprintf("%d", i) |
| 4816 | + test.Client = c |
| 4817 | + test.Class = newStorageClass(test, ns, suffix) |
| 4818 | + test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ |
| 4819 | + ClaimSize: test.ClaimSize, |
| 4820 | + StorageClassName: &test.Class.Name, |
| 4821 | + VolumeMode: &test.VolumeMode, |
| 4822 | + }, ns) |
| 4823 | + test.TestDynamicProvisioning() |
| 4824 | + } |
| 4825 | + |
| 4826 | + // Run the last test with storage.k8s.io/v1beta1 on pvc |
| 4827 | + if betaTest != nil { |
| 4828 | + ginkgo.By("Testing " + betaTest.Name + " with beta volume provisioning") |
| 4829 | + class := newBetaStorageClass(*betaTest, "beta") |
| 4830 | + // we need to create the class manually, testDynamicProvisioning does not accept beta class |
| 4831 | + class, err := c.StorageV1beta1().StorageClasses().Create(context.TODO(), class, metav1.CreateOptions{}) |
| 4832 | + framework.ExpectNoError(err) |
| 4833 | + defer deleteStorageClass(c, class.Name) |
| 4834 | + |
| 4835 | + betaTest.Client = c |
| 4836 | + betaTest.Class = nil |
| 4837 | + betaTest.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ |
| 4838 | + ClaimSize: betaTest.ClaimSize, |
| 4839 | + StorageClassName: &class.Name, |
| 4840 | + VolumeMode: &betaTest.VolumeMode, |
| 4841 | + }, ns) |
| 4842 | + betaTest.Claim.Spec.StorageClassName = &(class.Name) |
| 4843 | + (*betaTest).TestDynamicProvisioning() |
| 4844 | + } |
| 4845 | + }) |
| 4846 | + |
| 4847 | + ginkgo.It("should provision storage with non-default reclaim policy Retain", func() { |
| 4848 | + e2eskipper.SkipUnlessProviderIs("gce", "gke") |
| 4849 | + |
| 4850 | + test := testsuites.StorageClassTest{ |
| 4851 | + Client: c, |
| 4852 | + Name: "HDD PD on GCE/GKE", |
| 4853 | + CloudProviders: []string{"gce", "gke"}, |
| 4854 | + Provisioner: "kubernetes.io/gce-pd", |
| 4855 | + Parameters: map[string]string{ |
| 4856 | + "type": "pd-standard", |
| 4857 | + }, |
| 4858 | + ClaimSize: "1Gi", |
| 4859 | + ExpectedSize: "1Gi", |
| 4860 | + PvCheck: func(claim *v1.PersistentVolumeClaim) { |
| 4861 | + volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{}) |
| 4862 | + gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") |
| 4863 | + |
| 4864 | + err := checkGCEPD(volume, "pd-standard") |
| 4865 | + framework.ExpectNoError(err, "checkGCEPD") |
| 4866 | + }, |
| 4867 | + } |
| 4868 | + test.Class = newStorageClass(test, ns, "reclaimpolicy") |
| 4869 | + retain := v1.PersistentVolumeReclaimRetain |
| 4870 | + test.Class.ReclaimPolicy = &retain |
| 4871 | + test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ |
| 4872 | + ClaimSize: test.ClaimSize, |
| 4873 | + StorageClassName: &test.Class.Name, |
| 4874 | + VolumeMode: &test.VolumeMode, |
| 4875 | + }, ns) |
| 4876 | + pv := test.TestDynamicProvisioning() |
| 4877 | + |
| 4878 | + ginkgo.By(fmt.Sprintf("waiting for the provisioned PV %q to enter phase %s", pv.Name, v1.VolumeReleased)) |
| 4879 | + framework.ExpectNoError(e2epv.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 1*time.Second, 30*time.Second)) |
| 4880 | + |
| 4881 | + ginkgo.By(fmt.Sprintf("deleting the storage asset backing the PV %q", pv.Name)) |
| 4882 | + framework.ExpectNoError(e2epv.DeletePDWithRetry(pv.Spec.GCEPersistentDisk.PDName)) |
| 4883 | + |
| 4884 | + ginkgo.By(fmt.Sprintf("deleting the PV %q", pv.Name)) |
| 4885 | + framework.ExpectNoError(e2epv.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name) |
| 4886 | + framework.ExpectNoError(e2epv.WaitForPersistentVolumeDeleted(c, pv.Name, 1*time.Second, 30*time.Second)) |
| 4887 | + }) |
| 4888 | + |
| 4889 | + ginkgo.It("should not provision a volume in an unmanaged GCE zone.", func() { |
| 4890 | + e2eskipper.SkipUnlessProviderIs("gce", "gke") |
| 4891 | + var suffix string = "unmananged" |
| 4892 | + |
| 4893 | + ginkgo.By("Discovering an unmanaged zone") |
| 4894 | + allZones := sets.NewString() // all zones in the project |
| 4895 | + |
| 4896 | + gceCloud, err := gce.GetGCECloud() |
| 4897 | + framework.ExpectNoError(err) |
| 4898 | + |
| 4899 | + // Get all k8s managed zones (same as zones with nodes in them for test) |
| 4900 | + managedZones, err := gceCloud.GetAllZonesFromCloudProvider() |
| 4901 | + framework.ExpectNoError(err) |
| 4902 | + |
| 4903 | + // Get a list of all zones in the project |
| 4904 | + zones, err := gceCloud.ComputeServices().GA.Zones.List(framework.TestContext.CloudConfig.ProjectID).Do() |
| 4905 | + framework.ExpectNoError(err) |
| 4906 | + for _, z := range zones.Items { |
| 4907 | + allZones.Insert(z.Name) |
| 4908 | + } |
| 4909 | + |
| 4910 | + // Get the subset of zones not managed by k8s |
| 4911 | + var unmanagedZone string |
| 4912 | + var popped bool |
| 4913 | + unmanagedZones := allZones.Difference(managedZones) |
| 4914 | + // And select one of them at random. |
| 4915 | + if unmanagedZone, popped = unmanagedZones.PopAny(); !popped { |
| 4916 | + e2eskipper.Skipf("No unmanaged zones found.") |
| 4917 | + } |
| 4918 | + |
| 4919 | + ginkgo.By("Creating a StorageClass for the unmanaged zone") |
| 4920 | + test := testsuites.StorageClassTest{ |
| 4921 | + Name: "unmanaged_zone", |
| 4922 | + Provisioner: "kubernetes.io/gce-pd", |
| 4923 | + Parameters: map[string]string{"zone": unmanagedZone}, |
| 4924 | + ClaimSize: "1Gi", |
| 4925 | + } |
| 4926 | + sc := newStorageClass(test, ns, suffix) |
| 4927 | + sc, err = c.StorageV1().StorageClasses().Create(context.TODO(), sc, metav1.CreateOptions{}) |
| 4928 | + framework.ExpectNoError(err) |
| 4929 | + defer deleteStorageClass(c, sc.Name) |
| 4930 | + |
| 4931 | + ginkgo.By("Creating a claim and expecting it to timeout") |
| 4932 | + pvc := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ |
| 4933 | + ClaimSize: test.ClaimSize, |
| 4934 | + StorageClassName: &sc.Name, |
| 4935 | + VolumeMode: &test.VolumeMode, |
| 4936 | + }, ns) |
| 4937 | + pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), pvc, metav1.CreateOptions{}) |
| 4938 | + framework.ExpectNoError(err) |
| 4939 | + defer func() { |
| 4940 | + framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name) |
| 4941 | + }() |
| 4942 | + |
| 4943 | + // The claim should timeout phase:Pending |
| 4944 | + err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 2*time.Second, framework.ClaimProvisionShortTimeout) |
| 4945 | + framework.ExpectError(err) |
| 4946 | + framework.Logf(err.Error()) |
| 4947 | + }) |
| 4948 | + |
| 4949 | + ginkgo.It("should test that deleting a claim before the volume is provisioned deletes the volume.", func() { |
| 4950 | + // This case tests for the regressions of a bug fixed by PR #21268 |
| 4951 | + // REGRESSION: Deleting the PVC before the PV is provisioned can result in the PV |
| 4952 | + // not being deleted. |
| 4953 | + // NOTE: Polls until no PVs are detected, times out at 5 minutes. |
| 4954 | + |
| 4955 | + e2eskipper.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure") |
| 4956 | + |
| 4957 | + const raceAttempts int = 100 |
| 4958 | + var residualPVs []*v1.PersistentVolume |
| 4959 | + ginkgo.By(fmt.Sprintf("Creating and deleting PersistentVolumeClaims %d times", raceAttempts)) |
| 4960 | + test := testsuites.StorageClassTest{ |
| 4961 | + Name: "deletion race", |
| 4962 | + Provisioner: "", // Use a native one based on current cloud provider |
| 4963 | + ClaimSize: "1Gi", |
| 4964 | + } |
| 4965 | + |
| 4966 | + class := newStorageClass(test, ns, "race") |
| 4967 | + class, err := c.StorageV1().StorageClasses().Create(context.TODO(), class, metav1.CreateOptions{}) |
| 4968 | + framework.ExpectNoError(err) |
| 4969 | + defer deleteStorageClass(c, class.Name) |
| 4970 | + |
| 4971 | + // To increase chance of detection, attempt multiple iterations |
| 4972 | + for i := 0; i < raceAttempts; i++ { |
| 4973 | + prefix := fmt.Sprintf("race-%d", i) |
| 4974 | + claim := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ |
| 4975 | + NamePrefix: prefix, |
| 4976 | + ClaimSize: test.ClaimSize, |
| 4977 | + StorageClassName: &class.Name, |
| 4978 | + VolumeMode: &test.VolumeMode, |
| 4979 | + }, ns) |
| 4980 | + tmpClaim, err := e2epv.CreatePVC(c, ns, claim) |
| 4981 | + framework.ExpectNoError(err) |
| 4982 | + framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, tmpClaim.Name, ns)) |
| 4983 | + } |
| 4984 | + |
| 4985 | + ginkgo.By(fmt.Sprintf("Checking for residual PersistentVolumes associated with StorageClass %s", class.Name)) |
| 4986 | + residualPVs, err = waitForProvisionedVolumesDeleted(c, class.Name) |
| 4987 | + // Cleanup the test resources before breaking |
| 4988 | + defer deleteProvisionedVolumesAndDisks(c, residualPVs) |
| 4989 | + framework.ExpectNoError(err, "PersistentVolumes were not deleted as expected. %d remain", len(residualPVs)) |
| 4990 | + |
| 4991 | + framework.Logf("0 PersistentVolumes remain.") |
| 4992 | + }) |
| 4993 | + |
| 4994 | + ginkgo.It("deletion should be idempotent", func() { |
| 4995 | + // This test ensures that deletion of a volume is idempotent. |
| 4996 | + // It creates a PV with Retain policy, deletes underlying AWS / GCE |
| 4997 | + // volume and changes the reclaim policy to Delete. |
| 4998 | + // PV controller should delete the PV even though the underlying volume |
| 4999 | + // is already deleted. |
| 5000 | + e2eskipper.SkipUnlessProviderIs("gce", "gke", "aws") |
| 5001 | + ginkgo.By("creating PD") |
| 5002 | + diskName, err := e2epv.CreatePDWithRetry() |
| 5003 | + framework.ExpectNoError(err) |
| 5004 | + |
| 5005 | + ginkgo.By("creating PV") |
| 5006 | + pv := e2epv.MakePersistentVolume(e2epv.PersistentVolumeConfig{ |
| 5007 | + NamePrefix: "volume-idempotent-delete-", |
| 5008 | + // Use Retain to keep the PV, the test will change it to Delete |
| 5009 | + // when the time comes. |
| 5010 | + ReclaimPolicy: v1.PersistentVolumeReclaimRetain, |
| 5011 | + AccessModes: []v1.PersistentVolumeAccessMode{ |
| 5012 | + v1.ReadWriteOnce, |
| 5013 | + }, |
| 5014 | + Capacity: "1Gi", |
| 5015 | + // PV is bound to non-existing PVC, so it's reclaim policy is |
| 5016 | + // executed immediately |
| 5017 | + Prebind: &v1.PersistentVolumeClaim{ |
| 5018 | + ObjectMeta: metav1.ObjectMeta{ |
| 5019 | + Name: "dummy-claim-name", |
| 5020 | + Namespace: ns, |
| 5021 | + UID: types.UID("01234567890"), |
| 5022 | + }, |
| 5023 | + }, |
| 5024 | + }) |
| 5025 | + switch framework.TestContext.Provider { |
| 5026 | + case "aws": |
| 5027 | + pv.Spec.PersistentVolumeSource = v1.PersistentVolumeSource{ |
| 5028 | + AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{ |
| 5029 | + VolumeID: diskName, |
| 5030 | + }, |
| 5031 | + } |
| 5032 | + case "gce", "gke": |
| 5033 | + pv.Spec.PersistentVolumeSource = v1.PersistentVolumeSource{ |
| 5034 | + GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ |
| 5035 | + PDName: diskName, |
| 5036 | + }, |
| 5037 | + } |
| 5038 | + } |
| 5039 | + pv, err = c.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{}) |
| 5040 | + framework.ExpectNoError(err) |
| 5041 | + |
| 5042 | + ginkgo.By("waiting for the PV to get Released") |
| 5043 | + err = e2epv.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 2*time.Second, e2epv.PVReclaimingTimeout) |
| 5044 | + framework.ExpectNoError(err) |
| 5045 | + |
| 5046 | + ginkgo.By("deleting the PD") |
| 5047 | + err = e2epv.DeletePVSource(&pv.Spec.PersistentVolumeSource) |
| 5048 | + framework.ExpectNoError(err) |
| 5049 | + |
| 5050 | + ginkgo.By("changing the PV reclaim policy") |
| 5051 | + pv, err = c.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{}) |
| 5052 | + framework.ExpectNoError(err) |
| 5053 | + pv.Spec.PersistentVolumeReclaimPolicy = v1.PersistentVolumeReclaimDelete |
| 5054 | + pv, err = c.CoreV1().PersistentVolumes().Update(context.TODO(), pv, metav1.UpdateOptions{}) |
| 5055 | + framework.ExpectNoError(err) |
| 5056 | + |
| 5057 | + ginkgo.By("waiting for the PV to get deleted") |
| 5058 | + err = e2epv.WaitForPersistentVolumeDeleted(c, pv.Name, 5*time.Second, e2epv.PVDeletingTimeout) |
| 5059 | + framework.ExpectNoError(err) |
| 5060 | + }) |
| 5061 | + }) |
| 5062 | +}) |
| 5063 | diff --git a/test/e2e/upgrades/nvidia-gpu.go b/test/e2e/upgrades/nvidia-gpu.go |
| 5064 | index cf3b8c0cda3..30515197ef7 100644 |
| 5065 | --- a/test/e2e/upgrades/nvidia-gpu.go |
| 5066 | +++ b/test/e2e/upgrades/nvidia-gpu.go |
| 5067 | @@ -1,3 +1,5 @@ |
| 5068 | +// +build !providerless |
| 5069 | + |
| 5070 | /* |
| 5071 | Copyright 2018 The Kubernetes Authors. |
| 5072 | |
| 5073 | diff --git a/test/e2e/instrumentation/logging/imports.go b/test/e2e/instrumentation/logging/imports.go |
| 5074 | index 5dd66717db1..fc15c04bfef 100644 |
| 5075 | --- a/test/e2e/instrumentation/logging/imports.go |
| 5076 | +++ b/test/e2e/instrumentation/logging/imports.go |
| 5077 | @@ -1,3 +1,5 @@ |
| 5078 | +// +build !providerless |
| 5079 | + |
| 5080 | /* |
| 5081 | Copyright 2017 The Kubernetes Authors. |
| 5082 | |
| 5083 | -- |
| 5084 | 2.25.1 |
| 5085 | |