treewide: switch to gomod and bump everything

This switches version resolution from fietsje to gomod and updates
all Go dependencies. It also bumps rules_go (required by gVisor) and
switches the Gazelle naming convention from go_default_xxx to the
standard Bazel convention of the default target having the package
name.

Since Kubernetes dropped upstream Bazel support and doesn't check in
all generated files I manually pregenerated the OpenAPI spec. This
should be fixed, but because of the already-huge scope of this CL
and the rebase complexity this is not in here.

Change-Id: Iec8ea613d06946882426c2f9fad5bda7e8aaf833
Reviewed-on: https://review.monogon.dev/c/monogon/+/639
Reviewed-by: Sergiusz Bazanski <serge@monogon.tech>
Reviewed-by: Leopold Schabel <leo@nexantic.com>
diff --git a/third_party/go/patches/cel-fix-antlr.patch b/third_party/go/patches/cel-fix-antlr.patch
new file mode 100644
index 0000000..5c5636a
--- /dev/null
+++ b/third_party/go/patches/cel-fix-antlr.patch
@@ -0,0 +1,83 @@
+From a0f8916f104566a2538a70153c016de7c5d9f304 Mon Sep 17 00:00:00 2001
+From: Lorenz Brun <lorenz@monogon.tech>
+Date: Wed, 16 Mar 2022 18:54:11 +0100
+Subject: [PATCH] Use conventional import path for antlr
+
+---
+ WORKSPACE              | 9 +++++----
+ checker/BUILD.bazel    | 2 +-
+ parser/BUILD.bazel     | 4 ++--
+ parser/gen/BUILD.bazel | 2 +-
+ 4 files changed, 9 insertions(+), 8 deletions(-)
+
+diff --git a/WORKSPACE b/WORKSPACE
+index 4238e7a..b62cf15 100644
+--- a/WORKSPACE
++++ b/WORKSPACE
+@@ -99,11 +99,12 @@ go_repository(
+     version = "v0.3.2",
+ )
+ 
+-# Antlr deps to pickup golang concurrency fixes 4/30/2020
++# Antlr deps
+ go_repository(
+-    name = "com_github_antlr",
+-    commit = "621b933c7a7f01c67ae9de15103151fa0f9d6d90",
+-    importpath = "github.com/antlr/antlr4",
++    name = "com_github_antlr_antlr4_runtime_go_antlr",
++    importpath = "github.com/antlr/antlr4/runtime/Go/antlr",
++    sum = "h1:zvkJv+9Pxm1nnEMcKnShREt4qtduHKz4iw4AB4ul0Ao=",
++    version = "v0.0.0-20220209173558-ad29539cd2e9",
+ )
+ 
+ # CEL Spec deps
+diff --git a/checker/BUILD.bazel b/checker/BUILD.bazel
+index 2ef326b..bec40b6 100644
+--- a/checker/BUILD.bazel
++++ b/checker/BUILD.bazel
+@@ -54,7 +54,7 @@ go_test(
+         "//test:go_default_library",
+         "//test/proto2pb:go_default_library",
+         "//test/proto3pb:go_default_library",
+-        "@com_github_antlr//runtime/Go/antlr:go_default_library",
++        "@com_github_antlr_antlr4_runtime_go_antlr//:go_default_library",
+         "@org_golang_google_protobuf//proto:go_default_library",
+     ],
+ )
+diff --git a/parser/BUILD.bazel b/parser/BUILD.bazel
+index a1917b4..b76e6e4 100644
+--- a/parser/BUILD.bazel
++++ b/parser/BUILD.bazel
+@@ -23,7 +23,7 @@ go_library(
+         "//common/operators:go_default_library",
+         "//common/runes:go_default_library",
+         "//parser/gen:go_default_library",
+-        "@com_github_antlr//runtime/Go/antlr:go_default_library",
++        "@com_github_antlr_antlr4_runtime_go_antlr//:go_default_library",
+         "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
+         "@org_golang_google_protobuf//proto:go_default_library",
+         "@org_golang_google_protobuf//types/known/structpb:go_default_library",
+@@ -45,7 +45,7 @@ go_test(
+         "//common/debug:go_default_library",
+         "//parser/gen:go_default_library",
+         "//test:go_default_library",
+-        "@com_github_antlr//runtime/Go/antlr:go_default_library",
++        "@com_github_antlr_antlr4_runtime_go_antlr//:go_default_library",
+         "@org_golang_google_protobuf//proto:go_default_library",
+     ],
+ )
+diff --git a/parser/gen/BUILD.bazel b/parser/gen/BUILD.bazel
+index 78e0432..2271131 100644
+--- a/parser/gen/BUILD.bazel
++++ b/parser/gen/BUILD.bazel
+@@ -21,6 +21,6 @@ go_library(
+     ],
+     importpath = "github.com/google/cel-go/parser/gen",
+     deps = [
+-        "@com_github_antlr//runtime/Go/antlr:go_default_library",
++        "@com_github_antlr_antlr4_runtime_go_antlr//:go_default_library",
+     ],
+ )
+-- 
+2.25.1
+
diff --git a/third_party/go/patches/cel-fix-googleapis.patch b/third_party/go/patches/cel-fix-googleapis.patch
new file mode 100644
index 0000000..518cdd0
--- /dev/null
+++ b/third_party/go/patches/cel-fix-googleapis.patch
@@ -0,0 +1,256 @@
+From 86ef097e120745353232555f1d87382cac5ce2f0 Mon Sep 17 00:00:00 2001
+From: Lorenz Brun <lorenz@monogon.tech>
+Date: Mon, 21 Mar 2022 16:11:14 +0100
+Subject: [PATCH] Fix googleapis import paths
+
+---
+ cel/BUILD.bazel               |  4 ++--
+ checker/BUILD.bazel           |  2 +-
+ checker/decls/BUILD.bazel     |  2 +-
+ common/BUILD.bazel            |  2 +-
+ common/containers/BUILD.bazel |  4 ++--
+ common/debug/BUILD.bazel      |  2 +-
+ common/types/BUILD.bazel      |  4 ++--
+ common/types/pb/BUILD.bazel   |  2 +-
+ common/types/ref/BUILD.bazel  |  2 +-
+ ext/BUILD.bazel               |  2 +-
+ interpreter/BUILD.bazel       |  4 ++--
+ parser/BUILD.bazel            |  2 +-
+ server/BUILD.bazel            | 12 ++++++------
+ test/BUILD.bazel              |  2 +-
+ 14 files changed, 23 insertions(+), 23 deletions(-)
+
+diff --git a/cel/BUILD.bazel b/cel/BUILD.bazel
+index aed8274..6a5b8f2 100644
+--- a/cel/BUILD.bazel
++++ b/cel/BUILD.bazel
+@@ -27,7 +27,7 @@ go_library(
+         "//interpreter:go_default_library",
+         "//interpreter/functions:go_default_library",
+         "//parser:go_default_library",
+-        "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
++        "@go_googleapis//google/api/expr/v1alpha1:expr_go_proto",
+         "@org_golang_google_protobuf//proto:go_default_library",
+         "@org_golang_google_protobuf//reflect/protodesc:go_default_library",
+         "@org_golang_google_protobuf//reflect/protoreflect:go_default_library",
+@@ -59,6 +59,6 @@ go_test(
+         "//test/proto2pb:go_default_library",
+         "//test/proto3pb:go_default_library",
+         "@io_bazel_rules_go//proto/wkt:descriptor_go_proto",
+-        "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
++        "@go_googleapis//google/api/expr/v1alpha1:expr_go_proto",
+     ],
+ )
+diff --git a/checker/BUILD.bazel b/checker/BUILD.bazel
+index 0f6e469..0873707 100644
+--- a/checker/BUILD.bazel
++++ b/checker/BUILD.bazel
+@@ -28,7 +28,7 @@ go_library(
+         "//common/types/pb:go_default_library",
+         "//common/types/ref:go_default_library",
+         "//parser:go_default_library",
+-        "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
++        "@go_googleapis//google/api/expr/v1alpha1:expr_go_proto",
+         "@org_golang_google_protobuf//proto:go_default_library",
+         "@org_golang_google_protobuf//types/known/emptypb:go_default_library",
+         "@org_golang_google_protobuf//types/known/structpb:go_default_library",
+diff --git a/checker/decls/BUILD.bazel b/checker/decls/BUILD.bazel
+index 5a24f1d..9c6bfbc 100644
+--- a/checker/decls/BUILD.bazel
++++ b/checker/decls/BUILD.bazel
+@@ -13,7 +13,7 @@ go_library(
+     ],
+     importpath = "github.com/google/cel-go/checker/decls",
+     deps = [
+-        "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
++        "@go_googleapis//google/api/expr/v1alpha1:expr_go_proto",
+         "@org_golang_google_protobuf//types/known/emptypb:go_default_library",
+         "@org_golang_google_protobuf//types/known/structpb:go_default_library",
+     ],
+diff --git a/common/BUILD.bazel b/common/BUILD.bazel
+index 9e4ad65..d306934 100644
+--- a/common/BUILD.bazel
++++ b/common/BUILD.bazel
+@@ -16,7 +16,7 @@ go_library(
+     importpath = "github.com/google/cel-go/common",
+     deps = [
+         "//common/runes:go_default_library",
+-        "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
++        "@go_googleapis//google/api/expr/v1alpha1:expr_go_proto",
+         "@org_golang_x_text//width:go_default_library",
+     ],
+ )
+diff --git a/common/containers/BUILD.bazel b/common/containers/BUILD.bazel
+index 18142d9..3c45794 100644
+--- a/common/containers/BUILD.bazel
++++ b/common/containers/BUILD.bazel
+@@ -12,7 +12,7 @@ go_library(
+     ],
+     importpath = "github.com/google/cel-go/common/containers",
+     deps = [
+-        "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
++        "@go_googleapis//google/api/expr/v1alpha1:expr_go_proto",
+     ],
+ )
+ 
+@@ -26,6 +26,6 @@ go_test(
+         ":go_default_library",
+     ],
+     deps = [
+-        "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
++        "@go_googleapis//google/api/expr/v1alpha1:expr_go_proto",
+     ],
+ )
+diff --git a/common/debug/BUILD.bazel b/common/debug/BUILD.bazel
+index cf5c5d2..6db7f55 100644
+--- a/common/debug/BUILD.bazel
++++ b/common/debug/BUILD.bazel
+@@ -13,6 +13,6 @@ go_library(
+     importpath = "github.com/google/cel-go/common/debug",
+     deps = [
+         "//common:go_default_library",
+-        "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
++        "@go_googleapis//google/api/expr/v1alpha1:expr_go_proto",
+     ],
+ )
+diff --git a/common/types/BUILD.bazel b/common/types/BUILD.bazel
+index 32789f5..e3970fd 100644
+--- a/common/types/BUILD.bazel
++++ b/common/types/BUILD.bazel
+@@ -37,7 +37,7 @@ go_library(
+         "//common/types/ref:go_default_library",
+         "//common/types/traits:go_default_library",
+         "@com_github_stoewer_go_strcase//:go_default_library",
+-        "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
++        "@go_googleapis//google/api/expr/v1alpha1:expr_go_proto",
+         "@org_golang_google_protobuf//encoding/protojson:go_default_library",
+         "@org_golang_google_protobuf//proto:go_default_library",
+         "@org_golang_google_protobuf//reflect/protoreflect:go_default_library",
+@@ -75,7 +75,7 @@ go_test(
+         "//common/types/ref:go_default_library",
+         "//test:go_default_library",
+         "//test/proto3pb:test_all_types_go_proto",
+-        "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
++        "@go_googleapis//google/api/expr/v1alpha1:expr_go_proto",
+         "@org_golang_google_protobuf//encoding/protojson:go_default_library",
+         "@org_golang_google_protobuf//types/known/anypb:go_default_library",
+         "@org_golang_google_protobuf//types/known/durationpb:go_default_library",
+diff --git a/common/types/pb/BUILD.bazel b/common/types/pb/BUILD.bazel
+index b0c79a7..37cd0a7 100644
+--- a/common/types/pb/BUILD.bazel
++++ b/common/types/pb/BUILD.bazel
+@@ -16,7 +16,7 @@ go_library(
+     ],
+     importpath = "github.com/google/cel-go/common/types/pb",
+     deps = [
+-        "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
++        "@go_googleapis//google/api/expr/v1alpha1:expr_go_proto",
+         "@org_golang_google_protobuf//proto:go_default_library",
+         "@org_golang_google_protobuf//reflect/protoreflect:go_default_library",
+         "@org_golang_google_protobuf//reflect/protoregistry:go_default_library",
+diff --git a/common/types/ref/BUILD.bazel b/common/types/ref/BUILD.bazel
+index 1d0f468..9243b47 100644
+--- a/common/types/ref/BUILD.bazel
++++ b/common/types/ref/BUILD.bazel
+@@ -13,7 +13,7 @@ go_library(
+     ],
+     importpath = "github.com/google/cel-go/common/types/ref",
+     deps = [
+-        "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
++        "@go_googleapis//google/api/expr/v1alpha1:expr_go_proto",
+         "@org_golang_google_protobuf//proto:go_default_library",
+         "@org_golang_google_protobuf//reflect/protoreflect:go_default_library",
+     ],
+diff --git a/ext/BUILD.bazel b/ext/BUILD.bazel
+index 2b98ad3..68a9d72 100644
+--- a/ext/BUILD.bazel
++++ b/ext/BUILD.bazel
+@@ -19,7 +19,7 @@ go_library(
+         "//common/types:go_default_library",
+         "//common/types/ref:go_default_library",
+         "//interpreter/functions:go_default_library",
+-        "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
++        "@go_googleapis//google/api/expr/v1alpha1:expr_go_proto",
+     ],
+ )
+ 
+diff --git a/interpreter/BUILD.bazel b/interpreter/BUILD.bazel
+index 2f4079f..6b30db8 100644
+--- a/interpreter/BUILD.bazel
++++ b/interpreter/BUILD.bazel
+@@ -30,7 +30,7 @@ go_library(
+         "//common/types/ref:go_default_library",
+         "//common/types/traits:go_default_library",
+         "//interpreter/functions:go_default_library",
+-        "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
++        "@go_googleapis//google/api/expr/v1alpha1:expr_go_proto",
+         "@org_golang_google_protobuf//proto:go_default_library",
+         "@org_golang_google_protobuf//types/known/durationpb:go_default_library",
+         "@org_golang_google_protobuf//types/known/structpb:go_default_library",
+@@ -63,7 +63,7 @@ go_test(
+         "//test:go_default_library",
+         "//test/proto2pb:go_default_library",
+         "//test/proto3pb:go_default_library",
+-        "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
++        "@go_googleapis//google/api/expr/v1alpha1:expr_go_proto",
+         "@org_golang_google_protobuf//proto:go_default_library",
+         "@org_golang_google_protobuf//types/known/anypb:go_default_library",
+     ],
+diff --git a/parser/BUILD.bazel b/parser/BUILD.bazel
+index b76e6e4..c098fe6 100644
+--- a/parser/BUILD.bazel
++++ b/parser/BUILD.bazel
+@@ -24,7 +24,7 @@ go_library(
+         "//common/runes:go_default_library",
+         "//parser/gen:go_default_library",
+         "@com_github_antlr_antlr4_runtime_go_antlr//:go_default_library",
+-        "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
++        "@go_googleapis//google/api/expr/v1alpha1:expr_go_proto",
+         "@org_golang_google_protobuf//proto:go_default_library",
+         "@org_golang_google_protobuf//types/known/structpb:go_default_library",
+     ],
+diff --git a/server/BUILD.bazel b/server/BUILD.bazel
+index 7174aa9..a4d9ff6 100644
+--- a/server/BUILD.bazel
++++ b/server/BUILD.bazel
+@@ -19,9 +19,9 @@ go_library(
+         "//common/types/traits:go_default_library",
+         "@com_google_cel_spec//proto/test/v1/proto2:test_all_types_go_proto",
+         "@com_google_cel_spec//proto/test/v1/proto3:test_all_types_go_proto",
+-        "@org_golang_google_genproto//googleapis/api/expr/conformance/v1alpha1:go_default_library",
+-        "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
+-        "@org_golang_google_genproto//googleapis/rpc/status:go_default_library",
++        "@go_googleapis//google/api/expr/conformance/v1alpha1:conformance_go_proto",
++        "@go_googleapis//google/api/expr/v1alpha1:expr_go_proto",
++        "@go_googleapis//google/rpc/status:status_go_proto",
+         "@org_golang_google_grpc//codes:go_default_library",
+         "@org_golang_google_grpc//status:go_default_library",
+         "@org_golang_google_protobuf//proto:go_default_library",
+@@ -44,8 +44,8 @@ go_test(
+         "//common/operators:go_default_library",
+         "//test:go_default_library",
+         "@com_google_cel_spec//tools/celrpc:go_default_library",
+-        "@org_golang_google_genproto//googleapis/api/expr/conformance/v1alpha1:go_default_library",
+-        "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
+-        "@org_golang_google_genproto//googleapis/rpc/status:go_default_library",
++        "@go_googleapis//google/api/expr/conformance/v1alpha1:conformance_go_proto",
++        "@go_googleapis//google/api/expr/v1alpha1:expr_go_proto",
++        "@go_googleapis//google/rpc/status:status_go_proto",
+     ],
+ )
+diff --git a/test/BUILD.bazel b/test/BUILD.bazel
+index 0d39c70..3b8c460 100644
+--- a/test/BUILD.bazel
++++ b/test/BUILD.bazel
+@@ -20,7 +20,7 @@ go_library(
+     importpath = "github.com/google/cel-go/test",
+     deps = [
+         "//common/operators:go_default_library",
+-        "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
++        "@go_googleapis//google/api/expr/v1alpha1:expr_go_proto",
+         "@org_golang_google_protobuf//proto:go_default_library",
+         "@org_golang_google_protobuf//types/known/structpb:go_default_library",
+     ],
+-- 
+2.25.1
+
diff --git a/third_party/go/patches/containerd-netns-statedir.patch b/third_party/go/patches/containerd-netns-statedir.patch
deleted file mode 100644
index a693eb7..0000000
--- a/third_party/go/patches/containerd-netns-statedir.patch
+++ /dev/null
@@ -1,96 +0,0 @@
-Copyright 2020 The Monogon Project Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-  http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-
-From 3e7a8cebf9d40487adc7d4a22b5c628add5e7eac Mon Sep 17 00:00:00 2001
-From: Lorenz Brun <lorenz@nexantic.com>
-Date: Wed, 27 Jan 2021 13:05:30 +0100
-Subject: [PATCH] Move netns directory into StateDir
-
----
- pkg/netns/netns_unix.go   | 12 +++++-------
- pkg/server/sandbox_run.go |  3 ++-
- 2 files changed, 7 insertions(+), 8 deletions(-)
-
-diff --git a/pkg/netns/netns_unix.go b/pkg/netns/netns_unix.go
-index 7449e235..b31716cb 100644
---- a/pkg/netns/netns_unix.go
-+++ b/pkg/netns/netns_unix.go
-@@ -48,14 +48,12 @@ import (
- 	osinterface "github.com/containerd/cri/pkg/os"
- )
- 
--const nsRunDir = "/var/run/netns"
--
- // Some of the following functions are migrated from
- // https://github.com/containernetworking/plugins/blob/master/pkg/testutils/netns_linux.go
- 
- // newNS creates a new persistent (bind-mounted) network namespace and returns the
- // path to the network namespace.
--func newNS() (nsPath string, err error) {
-+func newNS(baseDir string) (nsPath string, err error) {
- 	b := make([]byte, 16)
- 	if _, err := rand.Reader.Read(b); err != nil {
- 		return "", errors.Wrap(err, "failed to generate random netns name")
-@@ -64,13 +62,13 @@ func newNS() (nsPath string, err error) {
- 	// Create the directory for mounting network namespaces
- 	// This needs to be a shared mountpoint in case it is mounted in to
- 	// other namespaces (containers)
--	if err := os.MkdirAll(nsRunDir, 0755); err != nil {
-+	if err := os.MkdirAll(baseDir, 0755); err != nil {
- 		return "", err
- 	}
- 
- 	// create an empty file at the mount point
- 	nsName := fmt.Sprintf("cni-%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:])
--	nsPath = path.Join(nsRunDir, nsName)
-+	nsPath = path.Join(baseDir, nsName)
- 	mountPointFd, err := os.Create(nsPath)
- 	if err != nil {
- 		return "", err
-@@ -164,8 +162,8 @@ type NetNS struct {
- }
- 
- // NewNetNS creates a network namespace.
--func NewNetNS() (*NetNS, error) {
--	path, err := newNS()
-+func NewNetNS(baseDir string) (*NetNS, error) {
-+	path, err := newNS(baseDir)
- 	if err != nil {
- 		return nil, errors.Wrap(err, "failed to setup netns")
- 	}
-diff --git a/pkg/server/sandbox_run.go b/pkg/server/sandbox_run.go
-index dd4c51e3..32a2d6e8 100644
---- a/pkg/server/sandbox_run.go
-+++ b/pkg/server/sandbox_run.go
-@@ -19,6 +19,7 @@ package server
- import (
- 	"encoding/json"
- 	"math"
-+	"path/filepath"
- 	goruntime "runtime"
- 	"strings"
- 
-@@ -117,7 +118,7 @@ func (c *criService) RunPodSandbox(ctx context.Context, r *runtime.RunPodSandbox
- 		// handle. NetNSPath in sandbox metadata and NetNS is non empty only for non host network
- 		// namespaces. If the pod is in host network namespace then both are empty and should not
- 		// be used.
--		sandbox.NetNS, err = netns.NewNetNS()
-+		sandbox.NetNS, err = netns.NewNetNS(filepath.Join(c.config.StateDir, "netns"))
- 		if err != nil {
- 			return nil, errors.Wrapf(err, "failed to create network namespace for sandbox %q", id)
- 		}
--- 
-2.25.1
-
diff --git a/third_party/go/patches/containerd-no-tracing.patch b/third_party/go/patches/containerd-no-tracing.patch
new file mode 100644
index 0000000..ee6f8c8
--- /dev/null
+++ b/third_party/go/patches/containerd-no-tracing.patch
@@ -0,0 +1,43 @@
+From 26ac5a008b349b77288d29cc483ea5d6129f298c Mon Sep 17 00:00:00 2001
+From: Lorenz Brun <lorenz@monogon.tech>
+Date: Wed, 16 Mar 2022 18:35:44 +0100
+Subject: [PATCH] Disable tracing for otel compatibility
+
+---
+ cmd/containerd/builtins.go     | 1 -
+ cmd/containerd/command/main.go | 2 --
+ 2 files changed, 3 deletions(-)
+
+diff --git a/cmd/containerd/builtins.go b/cmd/containerd/builtins.go
+index 8c6f1fe86..dd8b1d3d9 100644
+--- a/cmd/containerd/builtins.go
++++ b/cmd/containerd/builtins.go
+@@ -36,5 +36,4 @@ import (
+ 	_ "github.com/containerd/containerd/services/snapshots"
+ 	_ "github.com/containerd/containerd/services/tasks"
+ 	_ "github.com/containerd/containerd/services/version"
+-	_ "github.com/containerd/containerd/tracing/plugin"
+ )
+diff --git a/cmd/containerd/command/main.go b/cmd/containerd/command/main.go
+index f549e08a3..dfde3599e 100644
+--- a/cmd/containerd/command/main.go
++++ b/cmd/containerd/command/main.go
+@@ -35,7 +35,6 @@ import (
+ 	"github.com/containerd/containerd/services/server"
+ 	srvconfig "github.com/containerd/containerd/services/server/config"
+ 	"github.com/containerd/containerd/sys"
+-	"github.com/containerd/containerd/tracing"
+ 	"github.com/containerd/containerd/version"
+ 	"github.com/sirupsen/logrus"
+ 	"github.com/urfave/cli"
+@@ -370,7 +369,6 @@ func setLogFormat(config *srvconfig.Config) error {
+ }
+ 
+ func setLogHooks() {
+-	logrus.StandardLogger().AddHook(tracing.NewLogrusHook())
+ }
+ 
+ func dumpStacks(writeToFile bool) {
+-- 
+2.25.1
+
diff --git a/third_party/go/patches/coredns-remove-unused-plugins.patch b/third_party/go/patches/coredns-remove-unused-plugins.patch
index 9180d48..e952585 100644
--- a/third_party/go/patches/coredns-remove-unused-plugins.patch
+++ b/third_party/go/patches/coredns-remove-unused-plugins.patch
@@ -1,202 +1,156 @@
-Copyright 2020 The Monogon Project Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-  http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-
-From b8a216a10775163dac6267127c35c9230ccbc5f7 Mon Sep 17 00:00:00 2001
-From: Lorenz Brun <lorenz@nexantic.com>
-Date: Mon, 27 Jul 2020 14:25:03 +0200
-Subject: [PATCH] Removed unused plugins
+From 355d38f38cf241b91a862795892bcb87c5729d2e Mon Sep 17 00:00:00 2001
+From: Lorenz Brun <lorenz@monogon.tech>
+Date: Wed, 16 Mar 2022 13:39:39 +0100
+Subject: [PATCH] Remove unused plugins
 
 ---
- core/dnsserver/zdirectives.go | 26 --------------------------
- core/plugin/zplugin.go        | 26 --------------------------
- plugin.cfg                    | 26 --------------------------
- plugin/kubernetes/setup.go    |  3 ---
- 4 files changed, 81 deletions(-)
+ core/dnsserver/zdirectives.go | 7 -------
+ core/plugin/zplugin.go        | 7 -------
+ plugin.cfg                    | 7 -------
+ plugin/kubernetes/setup.go    | 3 ---
+ 4 files changed, 24 deletions(-)
 
 diff --git a/core/dnsserver/zdirectives.go b/core/dnsserver/zdirectives.go
-index 1bf449cb..c3619160 100644
+index bca21718..678c02c6 100644
 --- a/core/dnsserver/zdirectives.go
 +++ b/core/dnsserver/zdirectives.go
-@@ -11,48 +11,22 @@ package dnsserver
+@@ -11,7 +11,6 @@ package dnsserver
  // care what plugin above them are doing.
  var Directives = []string{
  	"metadata",
--	"cancel",
--	"tls",
+-	"geoip",
+ 	"cancel",
+ 	"tls",
  	"reload",
- 	"nsid",
- 	"bufsize",
--	"root",
+@@ -20,14 +19,12 @@ var Directives = []string{
+ 	"root",
  	"bind",
  	"debug",
 -	"trace",
--	"ready",
+ 	"ready",
  	"health",
--	"pprof",
--	"prometheus",
+ 	"pprof",
+ 	"prometheus",
  	"errors",
  	"log",
 -	"dnstap",
+ 	"local",
  	"dns64",
  	"acl",
- 	"any",
--	"chaos",
- 	"loadbalance",
- 	"cache",
--	"rewrite",
--	"dnssec",
--	"autopath",
+@@ -43,15 +40,11 @@ var Directives = []string{
  	"template",
--	"transfer",
+ 	"transfer",
  	"hosts",
 -	"route53",
 -	"azure",
 -	"clouddns",
--	"k8s_external",
+ 	"k8s_external",
  	"kubernetes",
--	"file",
--	"auto",
--	"secondary",
+ 	"file",
+ 	"auto",
+ 	"secondary",
 -	"etcd",
--	"loop",
+ 	"loop",
  	"forward",
--	"grpc",
--	"erratic",
- 	"whoami",
--	"on",
--	"sign",
- }
+ 	"grpc",
 diff --git a/core/plugin/zplugin.go b/core/plugin/zplugin.go
-index cf83be70..de026516 100644
+index a9167eea..2a5d0b13 100644
 --- a/core/plugin/zplugin.go
 +++ b/core/plugin/zplugin.go
-@@ -4,49 +4,23 @@ package plugin
-
- import (
- 	// Include all plugins.
--	_ "github.com/caddyserver/caddy/onevent"
- 	_ "github.com/coredns/coredns/plugin/acl"
+@@ -9,23 +9,18 @@ import (
  	_ "github.com/coredns/coredns/plugin/any"
--	_ "github.com/coredns/coredns/plugin/auto"
--	_ "github.com/coredns/coredns/plugin/autopath"
+ 	_ "github.com/coredns/coredns/plugin/auto"
+ 	_ "github.com/coredns/coredns/plugin/autopath"
 -	_ "github.com/coredns/coredns/plugin/azure"
  	_ "github.com/coredns/coredns/plugin/bind"
  	_ "github.com/coredns/coredns/plugin/bufsize"
  	_ "github.com/coredns/coredns/plugin/cache"
--	_ "github.com/coredns/coredns/plugin/cancel"
--	_ "github.com/coredns/coredns/plugin/chaos"
+ 	_ "github.com/coredns/coredns/plugin/cancel"
+ 	_ "github.com/coredns/coredns/plugin/chaos"
 -	_ "github.com/coredns/coredns/plugin/clouddns"
  	_ "github.com/coredns/coredns/plugin/debug"
  	_ "github.com/coredns/coredns/plugin/dns64"
--	_ "github.com/coredns/coredns/plugin/dnssec"
+ 	_ "github.com/coredns/coredns/plugin/dnssec"
 -	_ "github.com/coredns/coredns/plugin/dnstap"
--	_ "github.com/coredns/coredns/plugin/erratic"
+ 	_ "github.com/coredns/coredns/plugin/erratic"
  	_ "github.com/coredns/coredns/plugin/errors"
 -	_ "github.com/coredns/coredns/plugin/etcd"
--	_ "github.com/coredns/coredns/plugin/file"
+ 	_ "github.com/coredns/coredns/plugin/file"
  	_ "github.com/coredns/coredns/plugin/forward"
--	_ "github.com/coredns/coredns/plugin/grpc"
+-	_ "github.com/coredns/coredns/plugin/geoip"
+ 	_ "github.com/coredns/coredns/plugin/grpc"
+ 	_ "github.com/coredns/coredns/plugin/header"
  	_ "github.com/coredns/coredns/plugin/health"
- 	_ "github.com/coredns/coredns/plugin/hosts"
--	_ "github.com/coredns/coredns/plugin/k8s_external"
- 	_ "github.com/coredns/coredns/plugin/kubernetes"
- 	_ "github.com/coredns/coredns/plugin/loadbalance"
- 	_ "github.com/coredns/coredns/plugin/log"
--	_ "github.com/coredns/coredns/plugin/loop"
- 	_ "github.com/coredns/coredns/plugin/metadata"
--	_ "github.com/coredns/coredns/plugin/metrics"
- 	_ "github.com/coredns/coredns/plugin/nsid"
--	_ "github.com/coredns/coredns/plugin/pprof"
--	_ "github.com/coredns/coredns/plugin/ready"
+@@ -45,12 +40,10 @@ import (
  	_ "github.com/coredns/coredns/plugin/reload"
--	_ "github.com/coredns/coredns/plugin/rewrite"
--	_ "github.com/coredns/coredns/plugin/root"
+ 	_ "github.com/coredns/coredns/plugin/rewrite"
+ 	_ "github.com/coredns/coredns/plugin/root"
 -	_ "github.com/coredns/coredns/plugin/route53"
--	_ "github.com/coredns/coredns/plugin/secondary"
--	_ "github.com/coredns/coredns/plugin/sign"
+ 	_ "github.com/coredns/coredns/plugin/secondary"
+ 	_ "github.com/coredns/coredns/plugin/sign"
  	_ "github.com/coredns/coredns/plugin/template"
--	_ "github.com/coredns/coredns/plugin/tls"
+ 	_ "github.com/coredns/coredns/plugin/tls"
 -	_ "github.com/coredns/coredns/plugin/trace"
--	_ "github.com/coredns/coredns/plugin/transfer"
+ 	_ "github.com/coredns/coredns/plugin/transfer"
  	_ "github.com/coredns/coredns/plugin/whoami"
  )
 diff --git a/plugin.cfg b/plugin.cfg
-index c389d74e..80a8bc51 100644
+index 628e7141..d4ce1c89 100644
 --- a/plugin.cfg
 +++ b/plugin.cfg
-@@ -20,47 +20,21 @@
+@@ -20,7 +20,6 @@
  # log:log
-
+ 
  metadata:metadata
--cancel:cancel
--tls:tls
+-geoip:geoip
+ cancel:cancel
+ tls:tls
  reload:reload
- nsid:nsid
- bufsize:bufsize
--root:root
+@@ -29,14 +28,12 @@ bufsize:bufsize
+ root:root
  bind:bind
  debug:debug
 -trace:trace
--ready:ready
+ ready:ready
  health:health
--pprof:pprof
--prometheus:metrics
+ pprof:pprof
+ prometheus:metrics
  errors:errors
  log:log
 -dnstap:dnstap
+ local:local
  dns64:dns64
  acl:acl
- any:any
--chaos:chaos
- loadbalance:loadbalance
- cache:cache
--rewrite:rewrite
--dnssec:dnssec
--autopath:autopath
+@@ -52,15 +49,11 @@ minimal:minimal
  template:template
--transfer:transfer
+ transfer:transfer
  hosts:hosts
 -route53:route53
 -azure:azure
 -clouddns:clouddns
--k8s_external:k8s_external
+ k8s_external:k8s_external
  kubernetes:kubernetes
--file:file
--auto:auto
--secondary:secondary
+ file:file
+ auto:auto
+ secondary:secondary
 -etcd:etcd
--loop:loop
+ loop:loop
  forward:forward
--grpc:grpc
--erratic:erratic
- whoami:whoami
--on:github.com/caddyserver/caddy/onevent
--sign:sign
+ grpc:grpc
 diff --git a/plugin/kubernetes/setup.go b/plugin/kubernetes/setup.go
-index 0c46a3ab..827fc141 100644
+index 5f5ab058..ae1a7cf8 100644
 --- a/plugin/kubernetes/setup.go
 +++ b/plugin/kubernetes/setup.go
-@@ -20,9 +20,6 @@ import (
- 	"github.com/caddyserver/caddy"
+@@ -17,9 +17,6 @@ import (
+ 
  	"github.com/miekg/dns"
  	meta "k8s.io/apimachinery/pkg/apis/meta/v1"
 -	_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"       // pull this in here, because we want it excluded if plugin.cfg doesn't have k8s
 -	_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"      // pull this in here, because we want it excluded if plugin.cfg doesn't have k8s
 -	_ "k8s.io/client-go/plugin/pkg/client/auth/openstack" // pull this in here, because we want it excluded if plugin.cfg doesn't have k8s
  	"k8s.io/client-go/tools/clientcmd"
- 	"k8s.io/klog"
+ 	"k8s.io/klog/v2"
  )
---
+-- 
 2.25.1
+
diff --git a/third_party/go/patches/delve-fix-cgo.patch b/third_party/go/patches/delve-fix-cgo.patch
new file mode 100644
index 0000000..d8a5254
--- /dev/null
+++ b/third_party/go/patches/delve-fix-cgo.patch
@@ -0,0 +1,11 @@
+diff -urN b/pkg/proc/internal/ebpf/BUILD.bazel c/pkg/proc/internal/ebpf/BUILD.bazel
+--- b/pkg/proc/internal/ebpf/BUILD.bazel  2022-01-14 23:29:02.948089761 +0000
++++ c/pkg/proc/internal/ebpf/BUILD.bazel  2022-01-14 23:13:52.964810803 +0000
+@@ -7,6 +7,7 @@
+         "helpers.go",
+         "helpers_disabled.go",
+         "trace_bpfel_x86.go",
++        "bpf/include/function_vals.bpf.h",
+     ],
+     cgo = True,
+     embedsrcs = select({
\ No newline at end of file
diff --git a/third_party/go/patches/etcd-fix-stub.patch b/third_party/go/patches/etcd-fix-stub.patch
new file mode 100644
index 0000000..bb47ac4
--- /dev/null
+++ b/third_party/go/patches/etcd-fix-stub.patch
@@ -0,0 +1,9 @@
+--- a/grpc_testing/stub_server.go
++++ b/grpc_testing/stub_server.go
+@@ -86,5 +86,5 @@ func (d dummyStubServer) UnaryCall(context.Context, *testpb.SimpleRequest) (*tes
+ // NewDummyStubServer creates a simple test server that serves Unary calls with
+ // responses with the given payload.
+ func NewDummyStubServer(body []byte) *StubServer {
+-	return New(dummyStubServer{body: body})
++	return New(&dummyStubServer{body: body})
+ }
diff --git a/third_party/go/patches/go-proto-validators-default-alias.patch b/third_party/go/patches/go-proto-validators-default-alias.patch
deleted file mode 100644
index 46f21a9..0000000
--- a/third_party/go/patches/go-proto-validators-default-alias.patch
+++ /dev/null
@@ -1,29 +0,0 @@
-From 49b5f957d01cd50e5303cfc0e6c7f350d7e1c24e Mon Sep 17 00:00:00 2001
-From: Lorenz Brun <lorenz@monogon.tech>
-Date: Tue, 30 Nov 2021 03:19:37 +0100
-Subject: [PATCH] Alias go_default_library in BUILD to make Gazelle work
-
----
- BUILD.bazel | 6 ++++++
- 1 file changed, 6 insertions(+)
-
-diff --git a/BUILD.bazel b/BUILD.bazel
-index 9a1edd4..e5b8f96 100644
---- a/BUILD.bazel
-+++ b/BUILD.bazel
-@@ -14,6 +14,12 @@ gazelle(
- # By default resolve the top-level package to the gogo variant as it's required for the plugin compilation.
- # gazelle:resolve go github.com/mwitkow/go-proto-validators //:validators_gogo
- 
-+alias(
-+    name = "go_default_library",
-+    actual = "validators_golang",
-+    visibility = ["//visibility:public"],
-+)
-+
- go_proto_compiler(
-     name = "go_proto_validators",
-     options = ["gogoimport=false"],
--- 
-2.25.1
-
diff --git a/third_party/go/patches/go-tpm-tools-fix-proto-paths.patch b/third_party/go/patches/go-tpm-tools-fix-proto-paths.patch
new file mode 100644
index 0000000..ae7fcf3
--- /dev/null
+++ b/third_party/go/patches/go-tpm-tools-fix-proto-paths.patch
@@ -0,0 +1,66 @@
+From a799a94b7b67b21d1eecfe66dbe264ef3e276cd3 Mon Sep 17 00:00:00 2001
+From: Lorenz Brun <lorenz@monogon.tech>
+Date: Wed, 30 Mar 2022 15:48:23 +0200
+Subject: [PATCH] Fix proto file paths
+
+---
+ proto/0001-Fix-proto-file-paths.patch | 23 +++++++++++++++++++++++
+ proto/{ => attest}/attest.proto       |  2 +-
+ proto/{ => tpm}/tpm.proto             |  0
+ 3 files changed, 24 insertions(+), 1 deletion(-)
+ create mode 100644 proto/0001-Fix-proto-file-paths.patch
+ rename proto/{ => attest}/attest.proto (99%)
+ rename proto/{ => tpm}/tpm.proto (100%)
+
+diff --git a/proto/0001-Fix-proto-file-paths.patch b/proto/0001-Fix-proto-file-paths.patch
+new file mode 100644
+index 0000000..22e3b45
+--- /dev/null
++++ b/proto/0001-Fix-proto-file-paths.patch
+@@ -0,0 +1,23 @@
++From ba5bf5ac1e0b3d92e8f19a0feb2307515c0e2baf Mon Sep 17 00:00:00 2001
++From: Lorenz Brun <lorenz@monogon.tech>
++Date: Wed, 30 Mar 2022 15:48:23 +0200
++Subject: [PATCH] Fix proto file paths
++
++---
++ proto/{ => attest}/attest.proto | 0
++ proto/{ => tpm}/tpm.proto       | 0
++ 2 files changed, 0 insertions(+), 0 deletions(-)
++ rename proto/{ => attest}/attest.proto (100%)
++ rename proto/{ => tpm}/tpm.proto (100%)
++
++diff --git a/proto/attest.proto b/proto/attest/attest.proto
++similarity index 100%
++rename from proto/attest.proto
++rename to proto/attest/attest.proto
++diff --git a/proto/tpm.proto b/proto/tpm/tpm.proto
++similarity index 100%
++rename from proto/tpm.proto
++rename to proto/tpm/tpm.proto
++-- 
++2.25.1
++
+diff --git a/proto/attest.proto b/proto/attest/attest.proto
+similarity index 99%
+rename from proto/attest.proto
+rename to proto/attest/attest.proto
+index fbaf5bd..7960297 100644
+--- a/proto/attest.proto
++++ b/proto/attest/attest.proto
+@@ -3,7 +3,7 @@ syntax = "proto3";
+ package attest;
+ option go_package = "github.com/google/go-tpm-tools/proto/attest";
+ 
+-import "tpm.proto";
++import "proto/tpm/tpm.proto";
+ 
+ // Information uniquely identifying a GCE instance. Can be used to create an
+ // instance URL, which can then be used with GCE APIs. Formatted like:
+diff --git a/proto/tpm.proto b/proto/tpm/tpm.proto
+similarity index 100%
+rename from proto/tpm.proto
+rename to proto/tpm/tpm.proto
+-- 
+2.25.1
+
diff --git a/third_party/go/patches/gvisor-build-against-newer-runtime-specs.patch b/third_party/go/patches/gvisor-build-against-newer-runtime-specs.patch
deleted file mode 100644
index f653492..0000000
--- a/third_party/go/patches/gvisor-build-against-newer-runtime-specs.patch
+++ /dev/null
@@ -1,41 +0,0 @@
-Copyright 2020 The Monogon Project Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-  http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-
-From 9fc5e36f4e0bd691fc502372ebf837d24fbe5297 Mon Sep 17 00:00:00 2001
-From: Serge Bazanski <serge@nexantic.com>
-Date: Fri, 15 Jan 2021 16:22:33 +0100
-Subject: [PATCH] Build against new opencontainers runtime-specs
-Company: nexantic GmbH
-
----
- runsc/container/container.go | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/runsc/container/container.go b/runsc/container/container.go
-index 418a27beb..8508ded5a 100644
---- a/runsc/container/container.go
-+++ b/runsc/container/container.go
-@@ -747,7 +747,7 @@ func (c *Container) State() specs.State {
- 	return specs.State{
- 		Version: specs.Version,
- 		ID:      c.ID,
--		Status:  c.Status.String(),
-+		Status:  specs.ContainerState(c.Status.String()),
- 		Pid:     c.SandboxPid(),
- 		Bundle:  c.BundleDir,
- 	}
--- 
-2.26.2
-
diff --git a/third_party/go/patches/gvisor-cgroup-fix.patch b/third_party/go/patches/gvisor-cgroup-fix.patch
deleted file mode 100644
index aba0b4a..0000000
--- a/third_party/go/patches/gvisor-cgroup-fix.patch
+++ /dev/null
@@ -1,27 +0,0 @@
-From fcd48e672489f41c9977e092937ff806a7e772bd Mon Sep 17 00:00:00 2001
-From: Lorenz Brun <lorenz@monogon.tech>
-Date: Thu, 11 Nov 2021 13:58:50 +0100
-Subject: [PATCH] Make systemd cgroup optional
-
-This breaks gVisor on systems that use custom inits/cgroup setups which
-don't have this cgroup and don't need it.
----
- runsc/cgroup/cgroup.go | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/runsc/cgroup/cgroup.go b/runsc/cgroup/cgroup.go
-index 0eb5821a9..2116bddca 100644
---- a/runsc/cgroup/cgroup.go
-+++ b/runsc/cgroup/cgroup.go
-@@ -58,7 +58,7 @@ var controllers = map[string]controller{
- 	"freezer":    &noop{},
- 	"perf_event": &noop{},
- 	"rdma":       &noop{isOptional: true},
--	"systemd":    &noop{},
-+	"systemd":    &noop{isOptional: true},
- }
- 
- // IsOnlyV2 checks whether cgroups V2 is enabled and V1 is not.
--- 
-2.25.1
-
diff --git a/third_party/go/patches/gvisor-containerd-compat.patch b/third_party/go/patches/gvisor-containerd-compat.patch
new file mode 100644
index 0000000..89eb40f
--- /dev/null
+++ b/third_party/go/patches/gvisor-containerd-compat.patch
@@ -0,0 +1,114 @@
+From bf861ce45721791336b617a44844613bb74c677a Mon Sep 17 00:00:00 2001
+From: Lorenz Brun <lorenz@monogon.tech>
+Date: Tue, 22 Mar 2022 00:52:48 +0100
+Subject: [PATCH] containerd 1.6 compatibility
+
+---
+ pkg/shim/proc/exec.go     |  2 +-
+ pkg/shim/proc/init.go     |  2 +-
+ pkg/shim/service.go       | 17 +++++++++--------
+ pkg/shim/service_linux.go |  2 +-
+ 4 files changed, 12 insertions(+), 11 deletions(-)
+
+diff --git a/pkg/shim/proc/exec.go b/pkg/shim/proc/exec.go
+index da2e21598..d0d14dd24 100644
+--- a/pkg/shim/proc/exec.go
++++ b/pkg/shim/proc/exec.go
+@@ -238,7 +238,7 @@ func (e *execProcess) start(ctx context.Context) error {
+ 		if err != nil {
+ 			return fmt.Errorf("failed to retrieve console master: %w", err)
+ 		}
+-		if e.console, err = e.parent.Platform.CopyConsole(ctx, console, e.stdio.Stdin, e.stdio.Stdout, e.stdio.Stderr, &e.wg); err != nil {
++		if e.console, err = e.parent.Platform.CopyConsole(ctx, console, e.id, e.stdio.Stdin, e.stdio.Stdout, e.stdio.Stderr, &e.wg); err != nil {
+ 			return fmt.Errorf("failed to start console copy: %w", err)
+ 		}
+ 	} else if !e.stdio.IsNull() {
+diff --git a/pkg/shim/proc/init.go b/pkg/shim/proc/init.go
+index 6bf090813..76df1101a 100644
+--- a/pkg/shim/proc/init.go
++++ b/pkg/shim/proc/init.go
+@@ -152,7 +152,7 @@ func (p *Init) Create(ctx context.Context, r *CreateConfig) (err error) {
+ 		if err != nil {
+ 			return fmt.Errorf("failed to retrieve console master: %w", err)
+ 		}
+-		console, err = p.Platform.CopyConsole(ctx, console, r.Stdin, r.Stdout, r.Stderr, &p.wg)
++		console, err = p.Platform.CopyConsole(ctx, console, r.ID, r.Stdin, r.Stdout, r.Stderr, &p.wg)
+ 		if err != nil {
+ 			return fmt.Errorf("failed to start console copy: %w", err)
+ 		}
+diff --git a/pkg/shim/service.go b/pkg/shim/service.go
+index 68966afdf..772168052 100644
+--- a/pkg/shim/service.go
++++ b/pkg/shim/service.go
+@@ -50,7 +50,7 @@ import (
+ 	"github.com/sirupsen/logrus"
+ 	"golang.org/x/sys/unix"
+ 	"gvisor.dev/gvisor/pkg/cleanup"
+-	"gvisor.dev/gvisor/pkg/shim/runtimeoptions/v14"
++	v14 "gvisor.dev/gvisor/pkg/shim/runtimeoptions/v14"
+ 
+ 	"gvisor.dev/gvisor/pkg/shim/proc"
+ 	"gvisor.dev/gvisor/pkg/shim/runsc"
+@@ -189,7 +189,7 @@ type service struct {
+ 	shimAddress string
+ }
+ 
+-func (s *service) newCommand(ctx context.Context, containerdBinary, containerdAddress string) (*exec.Cmd, error) {
++func (s *service) newCommand(ctx context.Context, id, containerdBinary, containerdAddress string) (*exec.Cmd, error) {
+ 	ns, err := namespaces.NamespaceRequired(ctx)
+ 	if err != nil {
+ 		return nil, err
+@@ -204,6 +204,7 @@ func (s *service) newCommand(ctx context.Context, containerdBinary, containerdAd
+ 	}
+ 	args := []string{
+ 		"-namespace", ns,
++		"-id", id,
+ 		"-address", containerdAddress,
+ 		"-publish-binary", containerdBinary,
+ 	}
+@@ -219,14 +220,14 @@ func (s *service) newCommand(ctx context.Context, containerdBinary, containerdAd
+ 	return cmd, nil
+ }
+ 
+-func (s *service) StartShim(ctx context.Context, id, containerdBinary, containerdAddress, containerdTTRPCAddress string) (string, error) {
+-	log.L.Debugf("StartShim, id: %s, binary: %q, address: %q", id, containerdBinary, containerdAddress)
++func (s *service) StartShim(ctx context.Context, opts shim.StartOpts) (string, error) {
++	log.L.Debugf("StartShim, id: %s, binary: %q, address: %q", opts.ID, opts.ContainerdBinary, opts.Address)
+ 
+-	cmd, err := s.newCommand(ctx, containerdBinary, containerdAddress)
++	cmd, err := s.newCommand(ctx, opts.ID, opts.ContainerdBinary, opts.Address)
+ 	if err != nil {
+ 		return "", err
+ 	}
+-	address, err := shim.SocketAddress(ctx, containerdAddress, id)
++	address, err := shim.SocketAddress(ctx, opts.Address, opts.ID)
+ 	if err != nil {
+ 		return "", err
+ 	}
+@@ -280,8 +281,8 @@ func (s *service) StartShim(ctx context.Context, id, containerdBinary, container
+ 	if err := shim.WriteAddress(shimAddressPath, address); err != nil {
+ 		return "", err
+ 	}
+-	if err := shim.SetScore(cmd.Process.Pid); err != nil {
+-		return "", fmt.Errorf("failed to set OOM Score on shim: %w", err)
++	if err := shim.AdjustOOMScore(cmd.Process.Pid); err != nil {
++		return "", fmt.Errorf("failed to adjust OOM score for shim: %w", err)
+ 	}
+ 	cu.Release()
+ 	return address, nil
+diff --git a/pkg/shim/service_linux.go b/pkg/shim/service_linux.go
+index fb2f8b062..52c82ca90 100644
+--- a/pkg/shim/service_linux.go
++++ b/pkg/shim/service_linux.go
+@@ -33,7 +33,7 @@ type linuxPlatform struct {
+ 	epoller *console.Epoller
+ }
+ 
+-func (p *linuxPlatform) CopyConsole(ctx context.Context, console console.Console, stdin, stdout, stderr string, wg *sync.WaitGroup) (console.Console, error) {
++func (p *linuxPlatform) CopyConsole(ctx context.Context, console console.Console, id, stdin, stdout, stderr string, wg *sync.WaitGroup) (console.Console, error) {
+ 	if p.epoller == nil {
+ 		return nil, fmt.Errorf("uninitialized epoller")
+ 	}
+-- 
+2.25.1
+
diff --git a/third_party/go/patches/gvisor-fix-resolution.patch b/third_party/go/patches/gvisor-fix-resolution.patch
new file mode 100644
index 0000000..71e0af5
--- /dev/null
+++ b/third_party/go/patches/gvisor-fix-resolution.patch
@@ -0,0 +1,10 @@
+--- a/pkg/coverage/BUILD.bazel
++++ b/pkg/coverage/BUILD.bazel
+@@ -11,7 +11,7 @@ go_library(
+     deps = [
+         "//pkg/hostarch",
+         "//pkg/sync",
+-        "@io_bazel_rules_go//go/tools/coverdata:go_default_library",
++        "@io_bazel_rules_go//go/tools/coverdata",
+     ],
+ )
diff --git a/third_party/go/patches/gvisor-shim-root.patch b/third_party/go/patches/gvisor-shim-root.patch
deleted file mode 100644
index 6b10797..0000000
--- a/third_party/go/patches/gvisor-shim-root.patch
+++ /dev/null
@@ -1,67 +0,0 @@
-Copyright 2020 The Monogon Project Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-  http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-
-From 6cbcb8e61b60046e51ff79674b78031707739401 Mon Sep 17 00:00:00 2001
-From: Lorenz Brun <lorenz@brun.one>
-Date: Wed, 6 May 2020 18:52:14 +0200
-Subject: [PATCH] Fix externally-configured non-standard root paths
-
-Going upstream as https://github.com/google/gvisor-containerd-shim/pull/60
-
----
- pkg/v2/service.go | 6 ++++--
- 1 file changed, 4 insertions(+), 2 deletions(-)
-
-diff --git a/pkg/v2/service.go b/pkg/v2/service.go
-index c1df4b8..f7f5847 100644
---- a/pkg/v2/service.go
-+++ b/pkg/v2/service.go
-@@ -103,6 +103,7 @@ type service struct {
- 	processes map[string]process.Process
- 	events    chan interface{}
- 	platform  stdio.Platform
-+	opts      options.Options
- 	ec        chan proc.Exit
- 
- 	id     string
-@@ -194,7 +195,7 @@ func (s *service) Cleanup(ctx context.Context) (*taskAPI.DeleteResponse, error)
- 	if err != nil {
- 		return nil, err
- 	}
--	r := proc.NewRunsc(proc.RunscRoot, path, ns, runtime, nil)
-+	r := proc.NewRunsc(s.opts.Root, path, ns, runtime, nil)
- 	if err := r.Delete(ctx, s.id, &runsc.DeleteOpts{
- 		Force: true,
- 	}); err != nil {
-@@ -344,6 +345,7 @@ func (s *service) Create(ctx context.Context, r *taskAPI.CreateTaskRequest) (_ *
- 	s.id = r.ID
- 	s.bundle = r.Bundle
- 	s.task = process
-+	s.opts = opts
- 	return &taskAPI.CreateTaskResponse{
- 		Pid: uint32(process.Pid()),
- 	}, nil
-@@ -577,7 +579,7 @@ func (s *service) Stats(ctx context.Context, r *taskAPI.StatsRequest) (*taskAPI.
- 	if err != nil {
- 		return nil, err
- 	}
--	rs := proc.NewRunsc(proc.RunscRoot, path, ns, runtime, nil)
-+	rs := proc.NewRunsc(s.opts.Root, path, ns, runtime, nil)
- 	stats, err := rs.Stats(ctx, s.id)
- 	if err != nil {
- 		return nil, err
--- 
-2.25.1
-
diff --git a/third_party/go/patches/gvisor.patch b/third_party/go/patches/gvisor.patch
deleted file mode 100644
index 143ee59..0000000
--- a/third_party/go/patches/gvisor.patch
+++ /dev/null
@@ -1,97 +0,0 @@
-Copyright 2020 The Monogon Project Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-  http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-
-From 69b2c3b6e2594a3f28e4ea1141bef542456b3eb2 Mon Sep 17 00:00:00 2001
-From: Lorenz Brun <lorenz@brun.one>
-Date: Wed, 5 Feb 2020 17:14:19 +0100
-Subject: [PATCH] Fix vdso include that breaks in an external
-
----
- vdso/cycle_clock.h | 2 +-
- vdso/seqlock.h     | 4 ++--
- vdso/vdso.cc       | 4 ++--
- vdso/vdso_time.cc  | 8 ++++----
- 4 files changed, 9 insertions(+), 9 deletions(-)
-
-diff --git a/vdso/cycle_clock.h b/vdso/cycle_clock.h
-index 5d3fbb25..fe15812c 100644
---- a/vdso/cycle_clock.h
-+++ b/vdso/cycle_clock.h
-@@ -17,7 +17,7 @@
- 
- #include <stdint.h>
- 
--#include "vdso/barrier.h"
-+#include "barrier.h"
- 
- namespace vdso {
- 
-diff --git a/vdso/seqlock.h b/vdso/seqlock.h
-index 7a173174..fff99748 100644
---- a/vdso/seqlock.h
-+++ b/vdso/seqlock.h
-@@ -18,8 +18,8 @@
- 
- #include <stdint.h>
- 
--#include "vdso/barrier.h"
--#include "vdso/compiler.h"
-+#include "barrier.h"
-+#include "compiler.h"
- 
- namespace vdso {
- 
-diff --git a/vdso/vdso.cc b/vdso/vdso.cc
-index 8bb80a7a..210d31ff 100644
---- a/vdso/vdso.cc
-+++ b/vdso/vdso.cc
-@@ -19,8 +19,8 @@
- #include <sys/time.h>
- #include <time.h>
- 
--#include "vdso/syscalls.h"
--#include "vdso/vdso_time.h"
-+#include "syscalls.h"
-+#include "vdso_time.h"
- 
- namespace vdso {
- namespace {
-diff --git a/vdso/vdso_time.cc b/vdso/vdso_time.cc
-index 1bb4bb86..fb5b281f 100644
---- a/vdso/vdso_time.cc
-+++ b/vdso/vdso_time.cc
-@@ -12,15 +12,15 @@
- // See the License for the specific language governing permissions and
- // limitations under the License.
- 
--#include "vdso/vdso_time.h"
-+#include "vdso_time.h"
- 
- #include <stdint.h>
- #include <sys/time.h>
- #include <time.h>
- 
--#include "vdso/cycle_clock.h"
--#include "vdso/seqlock.h"
--#include "vdso/syscalls.h"
-+#include "cycle_clock.h"
-+#include "seqlock.h"
-+#include "syscalls.h"
- 
- // struct params defines the layout of the parameter page maintained by the
- // kernel (i.e., sentry).
--- 
-2.20.1
-
diff --git a/third_party/go/patches/k8s-adopt-to-go-jose-2.3.patch b/third_party/go/patches/k8s-adopt-to-go-jose-2.3.patch
new file mode 100644
index 0000000..9efd08a
--- /dev/null
+++ b/third_party/go/patches/k8s-adopt-to-go-jose-2.3.patch
@@ -0,0 +1,136 @@
+From a1411288423dfc4062844b9f699a30fd7cbe090d Mon Sep 17 00:00:00 2001
+From: Lorenz Brun <lorenz@monogon.tech>
+Date: Mon, 21 Mar 2022 15:20:19 +0100
+Subject: [PATCH 1/2] Adopt to API breakage in go-jose 2.3.0
+
+---
+ pkg/serviceaccount/claims_test.go          | 40 +++++++++++-----------
+ test/integration/auth/svcaccttoken_test.go |  6 ++--
+ 2 files changed, 23 insertions(+), 23 deletions(-)
+
+diff --git a/pkg/serviceaccount/claims_test.go b/pkg/serviceaccount/claims_test.go
+index 2e968f60335..a0b5a595c2f 100644
+--- a/pkg/serviceaccount/claims_test.go
++++ b/pkg/serviceaccount/claims_test.go
+@@ -85,9 +85,9 @@ func TestClaims(t *testing.T) {
+ 
+ 			sc: &jwt.Claims{
+ 				Subject:   "system:serviceaccount:myns:mysvcacct",
+-				IssuedAt:  jwt.NumericDate(1514764800),
+-				NotBefore: jwt.NumericDate(1514764800),
+-				Expiry:    jwt.NumericDate(1514764800),
++				IssuedAt:  jwt.NewNumericDate(time.Unix(1514764800, 0)),
++				NotBefore: jwt.NewNumericDate(time.Unix(1514764800, 0)),
++				Expiry:    jwt.NewNumericDate(time.Unix(1514764800, 0)),
+ 			},
+ 			pc: &privateClaims{
+ 				Kubernetes: kubernetes{
+@@ -107,9 +107,9 @@ func TestClaims(t *testing.T) {
+ 
+ 			sc: &jwt.Claims{
+ 				Subject:   "system:serviceaccount:myns:mysvcacct",
+-				IssuedAt:  jwt.NumericDate(1514764800),
+-				NotBefore: jwt.NumericDate(1514764800),
+-				Expiry:    jwt.NumericDate(1514764800 + 100),
++				IssuedAt:  jwt.NewNumericDate(time.Unix(1514764800, 0)),
++				NotBefore: jwt.NewNumericDate(time.Unix(1514764800, 0)),
++				Expiry:    jwt.NewNumericDate(time.Unix(1514764800+100, 0)),
+ 			},
+ 			pc: &privateClaims{
+ 				Kubernetes: kubernetes{
+@@ -130,9 +130,9 @@ func TestClaims(t *testing.T) {
+ 			sc: &jwt.Claims{
+ 				Subject:   "system:serviceaccount:myns:mysvcacct",
+ 				Audience:  []string{"1"},
+-				IssuedAt:  jwt.NumericDate(1514764800),
+-				NotBefore: jwt.NumericDate(1514764800),
+-				Expiry:    jwt.NumericDate(1514764800 + 100),
++				IssuedAt:  jwt.NewNumericDate(time.Unix(1514764800, 0)),
++				NotBefore: jwt.NewNumericDate(time.Unix(1514764800, 0)),
++				Expiry:    jwt.NewNumericDate(time.Unix(1514764800+100, 0)),
+ 			},
+ 			pc: &privateClaims{
+ 				Kubernetes: kubernetes{
+@@ -152,9 +152,9 @@ func TestClaims(t *testing.T) {
+ 			sc: &jwt.Claims{
+ 				Subject:   "system:serviceaccount:myns:mysvcacct",
+ 				Audience:  []string{"1", "2"},
+-				IssuedAt:  jwt.NumericDate(1514764800),
+-				NotBefore: jwt.NumericDate(1514764800),
+-				Expiry:    jwt.NumericDate(1514764800 + 100),
++				IssuedAt:  jwt.NewNumericDate(time.Unix(1514764800, 0)),
++				NotBefore: jwt.NewNumericDate(time.Unix(1514764800, 0)),
++				Expiry:    jwt.NewNumericDate(time.Unix(1514764800+100, 0)),
+ 			},
+ 			pc: &privateClaims{
+ 				Kubernetes: kubernetes{
+@@ -175,16 +175,16 @@ func TestClaims(t *testing.T) {
+ 
+ 			sc: &jwt.Claims{
+ 				Subject:   "system:serviceaccount:myns:mysvcacct",
+-				IssuedAt:  jwt.NumericDate(1514764800),
+-				NotBefore: jwt.NumericDate(1514764800),
+-				Expiry:    jwt.NumericDate(1514764800 + 60*60*24),
++				IssuedAt:  jwt.NewNumericDate(time.Unix(1514764800, 0)),
++				NotBefore: jwt.NewNumericDate(time.Unix(1514764800, 0)),
++				Expiry:    jwt.NewNumericDate(time.Unix(1514764800+60*60*24, 0)),
+ 			},
+ 			pc: &privateClaims{
+ 				Kubernetes: kubernetes{
+ 					Namespace: "myns",
+ 					Svcacct:   ref{Name: "mysvcacct", UID: "mysvcacct-uid"},
+ 					Pod:       &ref{Name: "mypod", UID: "mypod-uid"},
+-					WarnAfter: jwt.NumericDate(1514764800 + 60*60),
++					WarnAfter: jwt.NewNumericDate(time.Unix(1514764800+60*60, 0)),
+ 				},
+ 			},
+ 		},
+@@ -223,8 +223,8 @@ type claimTestCase struct {
+ 	name      string
+ 	getter    ServiceAccountTokenGetter
+ 	private   *privateClaims
+-	expiry    jwt.NumericDate
+-	notBefore jwt.NumericDate
++	expiry    *jwt.NumericDate
++	notBefore *jwt.NumericDate
+ 	expectErr string
+ }
+ 
+@@ -365,8 +365,8 @@ func TestValidatePrivateClaims(t *testing.T) {
+ 	for _, tc := range testcases {
+ 		t.Run(tc.name, func(t *testing.T) {
+ 			v := &validator{tc.getter}
+-			expiry := jwt.NumericDate(nowUnix)
+-			if tc.expiry != 0 {
++			expiry := jwt.NewNumericDate(time.Unix(nowUnix, 0))
++			if tc.expiry != nil {
+ 				expiry = tc.expiry
+ 			}
+ 			_, err := v.Validate(context.Background(), "", &jwt.Claims{Expiry: expiry, NotBefore: tc.notBefore}, tc.private)
+diff --git a/test/integration/auth/svcaccttoken_test.go b/test/integration/auth/svcaccttoken_test.go
+index da50bf4736e..5311b6c90c3 100644
+--- a/test/integration/auth/svcaccttoken_test.go
++++ b/test/integration/auth/svcaccttoken_test.go
+@@ -421,16 +421,16 @@ func TestServiceAccountTokenCreate(t *testing.T) {
+ 			t.Fatalf("error parsing warnafter: %v", err)
+ 		}
+ 
+-		if exp < int64(actualExpiry)-leeway || exp > int64(actualExpiry)+leeway {
++		if exp < int64(*actualExpiry)-leeway || exp > int64(*actualExpiry)+leeway {
+ 			t.Errorf("unexpected token exp %d, should within range of %d +- %d seconds", exp, actualExpiry, leeway)
+ 		}
+-		if warnafter < int64(assumedExpiry)-leeway || warnafter > int64(assumedExpiry)+leeway {
++		if warnafter < int64(*assumedExpiry)-leeway || warnafter > int64(*assumedExpiry)+leeway {
+ 			t.Errorf("unexpected token warnafter %d, should within range of %d +- %d seconds", warnafter, assumedExpiry, leeway)
+ 		}
+ 
+ 		checkExpiration(t, treq, requestExp)
+ 		expStatus := treq.Status.ExpirationTimestamp.Time.Unix()
+-		if expStatus < int64(assumedExpiry)-leeway || warnafter > int64(assumedExpiry)+leeway {
++		if expStatus < int64(*assumedExpiry)-leeway || warnafter > int64(*assumedExpiry)+leeway {
+ 			t.Errorf("unexpected expiration returned in tokenrequest status %d, should within range of %d +- %d seconds", expStatus, assumedExpiry, leeway)
+ 		}
+ 	})
+-- 
+2.25.1
+
diff --git a/third_party/go/patches/k8s-adopt-to-runc-1.1.patch b/third_party/go/patches/k8s-adopt-to-runc-1.1.patch
new file mode 100644
index 0000000..8bfdcf2
--- /dev/null
+++ b/third_party/go/patches/k8s-adopt-to-runc-1.1.patch
@@ -0,0 +1,168 @@
+From 1564b39d0fbeac776a0d92236a0ca0c7cbdc6c5c Mon Sep 17 00:00:00 2001
+From: Lorenz Brun <lorenz@monogon.tech>
+Date: Mon, 21 Mar 2022 15:21:25 +0100
+Subject: [PATCH 2/2] Adopt to API breakage in runc 1.1.0
+
+---
+ pkg/kubelet/cm/cgroup_manager_linux.go                | 11 ++++++-----
+ pkg/kubelet/cm/container_manager_linux.go             |  4 ++--
+ pkg/kubelet/cm/qos_container_manager_linux.go         |  4 ++--
+ pkg/kubelet/dockershim/cm/container_manager_linux.go  |  2 +-
+ .../kuberuntime/kuberuntime_container_linux.go        |  4 ++--
+ .../kuberuntime/kuberuntime_container_linux_test.go   |  6 +++---
+ 6 files changed, 16 insertions(+), 15 deletions(-)
+
+diff --git a/pkg/kubelet/cm/cgroup_manager_linux.go b/pkg/kubelet/cm/cgroup_manager_linux.go
+index 230173690d5..4bd50db5050 100644
+--- a/pkg/kubelet/cm/cgroup_manager_linux.go
++++ b/pkg/kubelet/cm/cgroup_manager_linux.go
+@@ -27,6 +27,7 @@ import (
+ 	"sync"
+ 	"time"
+ 
++	"github.com/opencontainers/runc/libcontainer/cgroups"
+ 	libcontainercgroups "github.com/opencontainers/runc/libcontainer/cgroups"
+ 	cgroupfs "github.com/opencontainers/runc/libcontainer/cgroups/fs"
+ 	cgroupfs2 "github.com/opencontainers/runc/libcontainer/cgroups/fs2"
+@@ -150,18 +151,18 @@ func (l *libcontainerAdapter) newManager(cgroups *libcontainerconfigs.Cgroup, pa
+ 	switch l.cgroupManagerType {
+ 	case libcontainerCgroupfs:
+ 		if libcontainercgroups.IsCgroup2UnifiedMode() {
+-			return cgroupfs2.NewManager(cgroups, paths["memory"], false)
++			return cgroupfs2.NewManager(cgroups, paths["memory"])
+ 		}
+-		return cgroupfs.NewManager(cgroups, paths, false), nil
++		return cgroupfs.NewManager(cgroups, paths)
+ 	case libcontainerSystemd:
+ 		// this means you asked systemd to manage cgroups, but systemd was not on the host, so all you can do is panic...
+ 		if !cgroupsystemd.IsRunningSystemd() {
+ 			panic("systemd cgroup manager not available")
+ 		}
+ 		if libcontainercgroups.IsCgroup2UnifiedMode() {
+-			return cgroupsystemd.NewUnifiedManager(cgroups, paths["memory"], false), nil
++			return cgroupsystemd.NewUnifiedManager(cgroups, paths["memory"])
+ 		}
+-		return cgroupsystemd.NewLegacyManager(cgroups, paths), nil
++		return cgroupsystemd.NewLegacyManager(cgroups, paths)
+ 	}
+ 	return nil, fmt.Errorf("invalid cgroup manager configuration")
+ }
+@@ -420,7 +421,7 @@ func (m *cgroupManagerImpl) toResources(resourceConfig *ResourceConfig) *libcont
+ 		pageSizes.Insert(sizeString)
+ 	}
+ 	// for each page size omitted, limit to 0
+-	for _, pageSize := range cgroupfs.HugePageSizes {
++	for _, pageSize := range cgroups.HugePageSizes() {
+ 		if pageSizes.Has(pageSize) {
+ 			continue
+ 		}
+diff --git a/pkg/kubelet/cm/container_manager_linux.go b/pkg/kubelet/cm/container_manager_linux.go
+index 3892bae081d..4c79f212ab5 100644
+--- a/pkg/kubelet/cm/container_manager_linux.go
++++ b/pkg/kubelet/cm/container_manager_linux.go
+@@ -401,10 +401,10 @@ func createManager(containerName string) (cgroups.Manager, error) {
+ 	}
+ 
+ 	if cgroups.IsCgroup2UnifiedMode() {
+-		return cgroupfs2.NewManager(cg, "", false)
++		return cgroupfs2.NewManager(cg, "")
+ 
+ 	}
+-	return cgroupfs.NewManager(cg, nil, false), nil
++	return cgroupfs.NewManager(cg, nil)
+ }
+ 
+ type KernelTunableBehavior string
+diff --git a/pkg/kubelet/cm/qos_container_manager_linux.go b/pkg/kubelet/cm/qos_container_manager_linux.go
+index bb79109b141..d0a78db2483 100644
+--- a/pkg/kubelet/cm/qos_container_manager_linux.go
++++ b/pkg/kubelet/cm/qos_container_manager_linux.go
+@@ -28,8 +28,8 @@ import (
+ 	"k8s.io/apimachinery/pkg/util/wait"
+ 
+ 	units "github.com/docker/go-units"
++	"github.com/opencontainers/runc/libcontainer/cgroups"
+ 	libcontainercgroups "github.com/opencontainers/runc/libcontainer/cgroups"
+-	cgroupfs "github.com/opencontainers/runc/libcontainer/cgroups/fs"
+ 	v1 "k8s.io/api/core/v1"
+ 	utilfeature "k8s.io/apiserver/pkg/util/feature"
+ 	"k8s.io/kubernetes/pkg/api/v1/resource"
+@@ -147,7 +147,7 @@ func (m *qosContainerManagerImpl) Start(getNodeAllocatable func() v1.ResourceLis
+ // setHugePagesUnbounded ensures hugetlb is effectively unbounded
+ func (m *qosContainerManagerImpl) setHugePagesUnbounded(cgroupConfig *CgroupConfig) error {
+ 	hugePageLimit := map[int64]int64{}
+-	for _, pageSize := range cgroupfs.HugePageSizes {
++	for _, pageSize := range cgroups.HugePageSizes() {
+ 		pageSizeBytes, err := units.RAMInBytes(pageSize)
+ 		if err != nil {
+ 			return err
+diff --git a/pkg/kubelet/dockershim/cm/container_manager_linux.go b/pkg/kubelet/dockershim/cm/container_manager_linux.go
+index 759e27f26c5..93d6c51ac00 100644
+--- a/pkg/kubelet/dockershim/cm/container_manager_linux.go
++++ b/pkg/kubelet/dockershim/cm/container_manager_linux.go
+@@ -129,7 +129,7 @@ func createCgroupManager(name string) (cgroups.Manager, error) {
+ 			SkipDevices: true,
+ 		},
+ 	}
+-	return cgroupfs.NewManager(cg, nil, false), nil
++	return cgroupfs.NewManager(cg, nil)
+ }
+ 
+ // getMemoryCapacity returns the memory capacity on the machine in bytes.
+diff --git a/pkg/kubelet/kuberuntime/kuberuntime_container_linux.go b/pkg/kubelet/kuberuntime/kuberuntime_container_linux.go
+index 6cb9e54729e..fd922f07c7c 100644
+--- a/pkg/kubelet/kuberuntime/kuberuntime_container_linux.go
++++ b/pkg/kubelet/kuberuntime/kuberuntime_container_linux.go
+@@ -23,8 +23,8 @@ import (
+ 	"strconv"
+ 	"time"
+ 
++	"github.com/opencontainers/runc/libcontainer/cgroups"
+ 	libcontainercgroups "github.com/opencontainers/runc/libcontainer/cgroups"
+-	cgroupfs "github.com/opencontainers/runc/libcontainer/cgroups/fs"
+ 	v1 "k8s.io/api/core/v1"
+ 	"k8s.io/apimachinery/pkg/api/resource"
+ 	utilfeature "k8s.io/apiserver/pkg/util/feature"
+@@ -170,7 +170,7 @@ func GetHugepageLimitsFromResources(resources v1.ResourceRequirements) []*runtim
+ 	var hugepageLimits []*runtimeapi.HugepageLimit
+ 
+ 	// For each page size, limit to 0.
+-	for _, pageSize := range cgroupfs.HugePageSizes {
++	for _, pageSize := range cgroups.HugePageSizes() {
+ 		hugepageLimits = append(hugepageLimits, &runtimeapi.HugepageLimit{
+ 			PageSize: pageSize,
+ 			Limit:    uint64(0),
+diff --git a/pkg/kubelet/kuberuntime/kuberuntime_container_linux_test.go b/pkg/kubelet/kuberuntime/kuberuntime_container_linux_test.go
+index 46817e00fb0..f166adc6fe1 100644
+--- a/pkg/kubelet/kuberuntime/kuberuntime_container_linux_test.go
++++ b/pkg/kubelet/kuberuntime/kuberuntime_container_linux_test.go
+@@ -25,7 +25,7 @@ import (
+ 	"testing"
+ 
+ 	"github.com/google/go-cmp/cmp"
+-	cgroupfs "github.com/opencontainers/runc/libcontainer/cgroups/fs"
++	"github.com/opencontainers/runc/libcontainer/cgroups"
+ 	"github.com/stretchr/testify/assert"
+ 	v1 "k8s.io/api/core/v1"
+ 	"k8s.io/apimachinery/pkg/api/resource"
+@@ -366,7 +366,7 @@ func TestGetHugepageLimitsFromResources(t *testing.T) {
+ 	var baseHugepage []*runtimeapi.HugepageLimit
+ 
+ 	// For each page size, limit to 0.
+-	for _, pageSize := range cgroupfs.HugePageSizes {
++	for _, pageSize := range cgroups.HugePageSizes() {
+ 		baseHugepage = append(baseHugepage, &runtimeapi.HugepageLimit{
+ 			PageSize: pageSize,
+ 			Limit:    uint64(0),
+@@ -481,7 +481,7 @@ func TestGetHugepageLimitsFromResources(t *testing.T) {
+ 		machineHugepageSupport := true
+ 		for _, hugepageLimit := range test.expected {
+ 			hugepageSupport := false
+-			for _, pageSize := range cgroupfs.HugePageSizes {
++			for _, pageSize := range cgroups.HugePageSizes() {
+ 				if pageSize == hugepageLimit.PageSize {
+ 					hugepageSupport = true
+ 					break
+-- 
+2.25.1
+
diff --git a/third_party/go/patches/k8s-backport-no-dockershim.patch b/third_party/go/patches/k8s-backport-no-dockershim.patch
new file mode 100644
index 0000000..452c7e1
--- /dev/null
+++ b/third_party/go/patches/k8s-backport-no-dockershim.patch
@@ -0,0 +1,191 @@
+From bc78dff42ec6be929648e91f3ef2dd6dae5169fb Mon Sep 17 00:00:00 2001
+From: Davanum Srinivas <davanum@gmail.com>
+Date: Tue, 7 Dec 2021 14:48:57 -0500
+Subject: [PATCH] update files to drop dockershim
+
+Signed-off-by: Davanum Srinivas <davanum@gmail.com>
+---
+ build/dependencies.yaml                       |  2 --
+ cmd/kubelet/app/options/globalflags_linux.go  | 11 --------
+ go.mod                                        |  5 ----
+ go.sum                                        |  3 ---
+ pkg/kubelet/kubelet.go                        | 26 +------------------
+ .../legacy-cloud-providers/aws/aws_fakes.go   |  1 +
+ test/e2e/framework/.import-restrictions       | 10 -------
+ 7 files changed, 2 insertions(+), 56 deletions(-)
+
+diff --git a/build/dependencies.yaml b/build/dependencies.yaml
+index ff296e255b961..b0b1b5a6d41b8 100644
+--- a/build/dependencies.yaml
++++ b/build/dependencies.yaml
+@@ -172,8 +172,6 @@ dependencies:
+       match: defaultPodSandboxImageVersion\s+=
+     - path: hack/testdata/pod-with-precision.json
+       match: k8s.gcr.io\/pause:\d+\.\d+
+-    - path: pkg/kubelet/dockershim/docker_sandbox.go
+-      match: k8s.gcr.io\/pause:\d+\.\d+
+     - path: staging/src/k8s.io/kubectl/testdata/set/multi-resource-yaml.yaml
+       match: k8s.gcr.io\/pause:\d+\.\d+
+     - path: staging/src/k8s.io/kubectl/testdata/set/namespaced-resource.yaml
+diff --git a/cmd/kubelet/app/options/globalflags_linux.go b/cmd/kubelet/app/options/globalflags_linux.go
+index ad3b68628f661..e75e65ec37cd0 100644
+--- a/cmd/kubelet/app/options/globalflags_linux.go
++++ b/cmd/kubelet/app/options/globalflags_linux.go
+@@ -28,7 +28,6 @@ import (
+ 	// ensure libs have a chance to globally register their flags
+ 	_ "github.com/google/cadvisor/container/common"
+ 	_ "github.com/google/cadvisor/container/containerd"
+-	_ "github.com/google/cadvisor/container/docker"
+ 	_ "github.com/google/cadvisor/container/raw"
+ 	_ "github.com/google/cadvisor/machine"
+ 	_ "github.com/google/cadvisor/manager"
+@@ -41,9 +40,6 @@ func addCadvisorFlags(fs *pflag.FlagSet) {
+ 	global := flag.CommandLine
+ 	local := pflag.NewFlagSet(os.Args[0], pflag.ExitOnError)
+ 
+-	// These flags were also implicit from cadvisor, but are actually used by something in the core repo:
+-	// TODO(mtaufen): This one is stil used by our salt, but for heaven's sake it's even deprecated in cadvisor
+-	register(global, local, "docker_root")
+ 	// e2e node tests rely on this
+ 	register(global, local, "housekeeping_interval")
+ 
+@@ -54,13 +50,6 @@ func addCadvisorFlags(fs *pflag.FlagSet) {
+ 	registerDeprecated(global, local, "boot_id_file", deprecated)
+ 	registerDeprecated(global, local, "container_hints", deprecated)
+ 	registerDeprecated(global, local, "containerd", deprecated)
+-	registerDeprecated(global, local, "docker", deprecated)
+-	registerDeprecated(global, local, "docker_env_metadata_whitelist", deprecated)
+-	registerDeprecated(global, local, "docker_only", deprecated)
+-	registerDeprecated(global, local, "docker-tls", deprecated)
+-	registerDeprecated(global, local, "docker-tls-ca", deprecated)
+-	registerDeprecated(global, local, "docker-tls-cert", deprecated)
+-	registerDeprecated(global, local, "docker-tls-key", deprecated)
+ 	registerDeprecated(global, local, "enable_load_reader", deprecated)
+ 	registerDeprecated(global, local, "event_storage_age_limit", deprecated)
+ 	registerDeprecated(global, local, "event_storage_event_limit", deprecated)
+diff --git a/go.mod b/go.mod
+index a17878d68d030..7dccd35bb6b91 100644
+--- a/go.mod
++++ b/go.mod
+@@ -25,15 +25,12 @@ require (
+ 	github.com/boltdb/bolt v1.3.1 // indirect
+ 	github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313
+ 	github.com/container-storage-interface/spec v1.5.0
+-	github.com/containernetworking/cni v0.8.1
+ 	github.com/coredns/corefile-migration v1.0.14
+ 	github.com/coreos/go-oidc v2.1.0+incompatible
+ 	github.com/coreos/go-systemd/v22 v22.3.2
+ 	github.com/cpuguy83/go-md2man/v2 v2.0.0
+ 	github.com/davecgh/go-spew v1.1.1
+ 	github.com/docker/distribution v2.7.1+incompatible
+-	github.com/docker/docker v20.10.7+incompatible
+-	github.com/docker/go-connections v0.4.0
+ 	github.com/docker/go-units v0.4.0
+ 	github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153
+ 	github.com/emicklei/go-restful v2.9.5+incompatible
+@@ -63,7 +60,6 @@ require (
+ 	github.com/mvdan/xurls v1.1.0
+ 	github.com/onsi/ginkgo v1.14.0
+ 	github.com/onsi/gomega v1.10.1
+-	github.com/opencontainers/go-digest v1.0.0
+ 	github.com/opencontainers/runc v1.0.2
+ 	github.com/opencontainers/selinux v1.8.2
+ 	github.com/pkg/errors v0.9.1
+@@ -209,7 +205,6 @@ replace (
+ 	github.com/containerd/go-runc => github.com/containerd/go-runc v1.0.0
+ 	github.com/containerd/ttrpc => github.com/containerd/ttrpc v1.0.2
+ 	github.com/containerd/typeurl => github.com/containerd/typeurl v1.0.2
+-	github.com/containernetworking/cni => github.com/containernetworking/cni v0.8.1
+ 	github.com/coredns/caddy => github.com/coredns/caddy v1.1.0
+ 	github.com/coredns/corefile-migration => github.com/coredns/corefile-migration v1.0.14
+ 	github.com/coreos/go-oidc => github.com/coreos/go-oidc v2.1.0+incompatible
+diff --git a/go.sum b/go.sum
+index b458fb06802b3..9121b2f4ac81a 100644
+--- a/go.sum
++++ b/go.sum
+@@ -116,8 +116,6 @@ github.com/containerd/ttrpc v1.0.2 h1:2/O3oTZN36q2xRolk0a2WWGgh7/Vf/liElg5hFYLX9
+ github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
+ github.com/containerd/typeurl v1.0.2 h1:Chlt8zIieDbzQFzXzAeBEF92KhExuE4p9p92/QmY7aY=
+ github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s=
+-github.com/containernetworking/cni v0.8.1 h1:7zpDnQ3T3s4ucOuJ/ZCLrYBxzkg0AELFfII3Epo9TmI=
+-github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
+ github.com/coredns/caddy v1.1.0 h1:ezvsPrT/tA/7pYDBZxu0cT0VmWk75AfIaf6GSYCNMf0=
+ github.com/coredns/caddy v1.1.0/go.mod h1:A6ntJQlAWuQfFlsd9hvigKbo2WS0VUs2l1e2F+BawD4=
+ github.com/coredns/corefile-migration v1.0.14 h1:Tz3WZhoj2NdP8drrQH86NgnCng+VrPjNeg2Oe1ALKag=
+@@ -353,7 +351,6 @@ github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb h1:e+l77LJOEqXTIQih
+ github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8=
+ github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0=
+ github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4=
+-github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
+ github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
+ github.com/mrunalp/fileutils v0.5.0 h1:NKzVxiH7eSk+OQ4M+ZYW1K6h27RUV3MI6NUTsHhU6Z4=
+ github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
+diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go
+index 2013c871a608a..0e6f5f946dea0 100644
+--- a/pkg/kubelet/kubelet.go
++++ b/pkg/kubelet/kubelet.go
+@@ -73,7 +73,6 @@ import (
+ 	"k8s.io/kubernetes/pkg/kubelet/configmap"
+ 	kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
+ 	"k8s.io/kubernetes/pkg/kubelet/cri/remote"
+-	"k8s.io/kubernetes/pkg/kubelet/cri/streaming"
+ 	"k8s.io/kubernetes/pkg/kubelet/events"
+ 	"k8s.io/kubernetes/pkg/kubelet/eviction"
+ 	"k8s.io/kubernetes/pkg/kubelet/images"
+@@ -310,18 +309,7 @@ func PreInitRuntimeService(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
+ 
+ 	switch containerRuntime {
+ 	case kubetypes.DockerContainerRuntime:
+-		klog.InfoS("Using dockershim is deprecated, please consider using a full-fledged CRI implementation")
+-		if err := runDockershim(
+-			kubeCfg,
+-			kubeDeps,
+-			crOptions,
+-			runtimeCgroups,
+-			remoteRuntimeEndpoint,
+-			remoteImageEndpoint,
+-			nonMasqueradeCIDR,
+-		); err != nil {
+-			return err
+-		}
++		return fmt.Errorf("using dockershim is not supported, please consider using a full-fledged CRI implementation")
+ 	case kubetypes.RemoteContainerRuntime:
+ 		// No-op.
+ 		break
+@@ -2440,15 +2428,3 @@ func isSyncPodWorthy(event *pleg.PodLifecycleEvent) bool {
+ 	// ContainerRemoved doesn't affect pod state
+ 	return event.Type != pleg.ContainerRemoved
+ }
+-
+-// Gets the streaming server configuration to use with in-process CRI shims.
+-func getStreamingConfig(kubeCfg *kubeletconfiginternal.KubeletConfiguration, kubeDeps *Dependencies, crOptions *config.ContainerRuntimeOptions) *streaming.Config {
+-	config := &streaming.Config{
+-		StreamIdleTimeout:               kubeCfg.StreamingConnectionIdleTimeout.Duration,
+-		StreamCreationTimeout:           streaming.DefaultConfig.StreamCreationTimeout,
+-		SupportedRemoteCommandProtocols: streaming.DefaultConfig.SupportedRemoteCommandProtocols,
+-		SupportedPortForwardProtocols:   streaming.DefaultConfig.SupportedPortForwardProtocols,
+-	}
+-	config.Addr = net.JoinHostPort("localhost", "0")
+-	return config
+-}
+diff --git a/test/e2e/framework/.import-restrictions b/test/e2e/framework/.import-restrictions
+index a60fb9d790847..1353f40df9ddf 100644
+--- a/test/e2e/framework/.import-restrictions
++++ b/test/e2e/framework/.import-restrictions
+@@ -86,16 +86,6 @@ rules:
+       - k8s.io/kubernetes/pkg/kubelet/config
+       - k8s.io/kubernetes/pkg/kubelet/configmap
+       - k8s.io/kubernetes/pkg/kubelet/container
+-      - k8s.io/kubernetes/pkg/kubelet/dockershim
+-      - k8s.io/kubernetes/pkg/kubelet/dockershim/cm
+-      - k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker
+-      - k8s.io/kubernetes/pkg/kubelet/dockershim/metrics
+-      - k8s.io/kubernetes/pkg/kubelet/dockershim/network
+-      - k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni
+-      - k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport
+-      - k8s.io/kubernetes/pkg/kubelet/dockershim/network/kubenet
+-      - k8s.io/kubernetes/pkg/kubelet/dockershim/network/metrics
+-      - k8s.io/kubernetes/pkg/kubelet/dockershim/remote
+       - k8s.io/kubernetes/pkg/kubelet/envvars
+       - k8s.io/kubernetes/pkg/kubelet/eviction
+       - k8s.io/kubernetes/pkg/kubelet/eviction/api
diff --git a/third_party/go/patches/k8s-client-go.patch b/third_party/go/patches/k8s-client-go.patch
deleted file mode 100644
index ebfae0e..0000000
--- a/third_party/go/patches/k8s-client-go.patch
+++ /dev/null
@@ -1,29 +0,0 @@
-Copyright 2020 The Monogon Project Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-  http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-
-This patch rips out azure/openstack auth support for k8s.io/client-go. This should be made configurable upstream.
-
-diff -ur io_k8s_client_go.orig/plugin/pkg/client/auth/plugins.go io_k8s_client_go/plugin/pkg/client/auth/plugins.go
---- io_k8s_client_go.orig/plugin/pkg/client/auth/plugins.go	2020-04-16 17:46:53.965434780 +0200
-+++ io_k8s_client_go/plugin/pkg/client/auth/plugins.go	2020-04-16 17:47:21.720538171 +0200
-@@ -18,8 +18,6 @@
- 
- import (
- 	// Initialize all known client auth plugins.
--	_ "k8s.io/client-go/plugin/pkg/client/auth/azure"
- 	_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
- 	_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
--	_ "k8s.io/client-go/plugin/pkg/client/auth/openstack"
- )
diff --git a/third_party/go/patches/k8s-drop-legacy-log-path.patch b/third_party/go/patches/k8s-drop-legacy-log-path.patch
index af7b007..356a681 100644
--- a/third_party/go/patches/k8s-drop-legacy-log-path.patch
+++ b/third_party/go/patches/k8s-drop-legacy-log-path.patch
@@ -1,21 +1,6 @@
-Copyright 2020 The Monogon Project Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-  http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-
-From 227ccd88e378a002b7c23703eec96aa1d25949eb Mon Sep 17 00:00:00 2001
-From: Lorenz Brun <lorenz@brun.one>
-Date: Wed, 3 Feb 2021 16:47:38 +0100
+From b2d875981cec8eda9c041f858004f613ea928895 Mon Sep 17 00:00:00 2001
+From: Lorenz Brun <lorenz@monogon.tech>
+Date: Wed, 16 Mar 2022 18:10:09 +0100
 Subject: [PATCH] Drop legacy log path
 
 ---
@@ -23,14 +8,14 @@
  .../kuberuntime/kuberuntime_container.go      | 32 ---------------
  pkg/kubelet/kuberuntime/kuberuntime_gc.go     | 39 -------------------
  pkg/kubelet/runonce.go                        |  8 ----
- test/e2e_node/log_path_test.go                | 19 +--------
- 5 files changed, 1 insertion(+), 104 deletions(-)
+ test/e2e_node/log_path_test.go                | 18 ---------
+ 5 files changed, 104 deletions(-)
 
 diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go
-index 902dc7532e1..2d582f65b19 100644
+index 2013c871a60..c3fd4221544 100644
 --- a/pkg/kubelet/kubelet.go
 +++ b/pkg/kubelet/kubelet.go
-@@ -1250,13 +1250,6 @@ func (kl *Kubelet) initializeModules() error {
+@@ -1379,13 +1379,6 @@ func (kl *Kubelet) initializeModules() error {
  		return err
  	}
  
@@ -45,10 +30,10 @@
  	kl.imageManager.Start()
  
 diff --git a/pkg/kubelet/kuberuntime/kuberuntime_container.go b/pkg/kubelet/kuberuntime/kuberuntime_container.go
-index af361122c35..d5b2d245219 100644
+index 9c762ac309e..d1148cc7de5 100644
 --- a/pkg/kubelet/kuberuntime/kuberuntime_container.go
 +++ b/pkg/kubelet/kuberuntime/kuberuntime_container.go
-@@ -190,25 +190,6 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb
+@@ -254,25 +254,6 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb
  	}
  	m.recordContainerEvent(pod, container, containerID, v1.EventTypeNormal, events.StartedContainer, fmt.Sprintf("Started container %s", container.Name))
  
@@ -66,15 +51,15 @@
 -	// to create it in the first place. it happens when journald logging driver is used with docker.
 -	if _, err := m.osInterface.Stat(containerLog); !os.IsNotExist(err) {
 -		if err := m.osInterface.Symlink(containerLog, legacySymlink); err != nil {
--			klog.Errorf("Failed to create legacy symbolic link %q to container %q log %q: %v",
--				legacySymlink, containerID, containerLog, err)
+-			klog.ErrorS(err, "Failed to create legacy symbolic link", "path", legacySymlink,
+-				"containerID", containerID, "containerLogPath", containerLog)
 -		}
 -	}
 -
  	// Step 4: execute the post start hook.
  	if container.Lifecycle != nil && container.Lifecycle.PostStart != nil {
  		kubeContainerID := kubecontainer.ContainerID{
-@@ -861,19 +842,6 @@ func (m *kubeGenericRuntimeManager) removeContainerLog(containerID string) error
+@@ -967,19 +948,6 @@ func (m *kubeGenericRuntimeManager) removeContainerLog(containerID string) error
  		return err
  	}
  
@@ -95,7 +80,7 @@
  }
  
 diff --git a/pkg/kubelet/kuberuntime/kuberuntime_gc.go b/pkg/kubelet/kuberuntime/kuberuntime_gc.go
-index 8c4f786db9b..b5b104ee6a6 100644
+index 610026661b7..a09dec91fbb 100644
 --- a/pkg/kubelet/kuberuntime/kuberuntime_gc.go
 +++ b/pkg/kubelet/kuberuntime/kuberuntime_gc.go
 @@ -18,7 +18,6 @@ package kuberuntime
@@ -106,7 +91,7 @@
  	"path/filepath"
  	"sort"
  	"time"
-@@ -346,44 +345,6 @@ func (cgc *containerGC) evictPodLogsDirectories(allSourcesReady bool) error {
+@@ -347,44 +346,6 @@ func (cgc *containerGC) evictPodLogsDirectories(allSourcesReady bool) error {
  			}
  		}
  	}
@@ -121,7 +106,7 @@
 -				if err != nil {
 -					// TODO: we should handle container not found (i.e. container was deleted) case differently
 -					// once https://github.com/kubernetes/kubernetes/issues/63336 is resolved
--					klog.Infof("Error getting ContainerStatus for containerID %q: %v", containerID, err)
+-					klog.InfoS("Error getting ContainerStatus for containerID", "containerID", containerID, "err", err)
 -				} else if status.State != runtimeapi.ContainerState_CONTAINER_EXITED {
 -					// Here is how container log rotation works (see containerLogManager#rotateLatestLog):
 -					//
@@ -134,17 +119,17 @@
 -					// See https://github.com/kubernetes/kubernetes/issues/52172
 -					//
 -					// We only remove unhealthy symlink for dead containers
--					klog.V(5).Infof("Container %q is still running, not removing symlink %q.", containerID, logSymlink)
+-					klog.V(5).InfoS("Container is still running, not removing symlink", "containerID", containerID, "path", logSymlink)
 -					continue
 -				}
 -			} else {
--				klog.V(4).Infof("unable to obtain container Id: %v", err)
+-				klog.V(4).InfoS("Unable to obtain container ID", "err", err)
 -			}
 -			err := osInterface.Remove(logSymlink)
 -			if err != nil {
--				klog.Errorf("Failed to remove container log dead symlink %q: %v", logSymlink, err)
+-				klog.ErrorS(err, "Failed to remove container log dead symlink", "path", logSymlink)
 -			} else {
--				klog.V(4).Infof("removed symlink %s", logSymlink)
+-				klog.V(4).InfoS("Removed symlink", "path", logSymlink)
 -			}
 -		}
 -	}
@@ -152,40 +137,36 @@
  }
  
 diff --git a/pkg/kubelet/runonce.go b/pkg/kubelet/runonce.go
-index 1da9c225186..d6a5a63e92d 100644
+index 19b8a4f6a7b..2f0aad713e3 100644
 --- a/pkg/kubelet/runonce.go
 +++ b/pkg/kubelet/runonce.go
-@@ -18,7 +18,6 @@ package kubelet
- 
+@@ -19,7 +19,6 @@ package kubelet
  import (
+ 	"context"
  	"fmt"
 -	"os"
  	"time"
  
- 	"k8s.io/api/core/v1"
-@@ -48,13 +47,6 @@ func (kl *Kubelet) RunOnce(updates <-chan kubetypes.PodUpdate) ([]RunPodResult,
+ 	v1 "k8s.io/api/core/v1"
+@@ -49,13 +48,6 @@ func (kl *Kubelet) RunOnce(updates <-chan kubetypes.PodUpdate) ([]RunPodResult,
  		return nil, err
  	}
  
 -	// If the container logs directory does not exist, create it.
 -	if _, err := os.Stat(ContainerLogsDir); err != nil {
 -		if err := kl.os.MkdirAll(ContainerLogsDir, 0755); err != nil {
--			klog.Errorf("Failed to create directory %q: %v", ContainerLogsDir, err)
+-			klog.ErrorS(err, "Failed to create directory", "path", ContainerLogsDir)
 -		}
 -	}
 -
  	select {
  	case u := <-updates:
- 		klog.Infof("processing manifest with %d pods", len(u.Pods))
+ 		klog.InfoS("Processing manifest with pods", "numPods", len(u.Pods))
 diff --git a/test/e2e_node/log_path_test.go b/test/e2e_node/log_path_test.go
-index 41646f326a5..6568d31e242 100644
+index cfdd9823cb1..35f3b7be465 100644
 --- a/test/e2e_node/log_path_test.go
 +++ b/test/e2e_node/log_path_test.go
-@@ -18,11 +18,10 @@ package e2enode
- 
- import (
- 	"context"
-+
+@@ -22,8 +22,6 @@ import (
  	v1 "k8s.io/api/core/v1"
  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  	"k8s.io/apimachinery/pkg/util/uuid"
@@ -194,7 +175,7 @@
  	"k8s.io/kubernetes/test/e2e/framework"
  	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
  	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
-@@ -138,22 +137,6 @@ var _ = framework.KubeDescribe("ContainerLogPath [NodeConformance]", func() {
+@@ -144,22 +142,6 @@ var _ = SIGDescribe("ContainerLogPath [NodeConformance]", func() {
  				err := createAndWaitPod(makeLogPod(logPodName, logString))
  				framework.ExpectNoError(err, "Failed waiting for pod: %s to enter success state", logPodName)
  			})
diff --git a/third_party/go/patches/k8s-e2e-tests-providerless.patch b/third_party/go/patches/k8s-e2e-tests-providerless.patch
deleted file mode 100644
index b761257..0000000
--- a/third_party/go/patches/k8s-e2e-tests-providerless.patch
+++ /dev/null
@@ -1,5147 +0,0 @@
-Copyright 2020 The Monogon Project Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-  http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-
-From 65e40a970e3f33f44423653767c9ca8ff792bf70 Mon Sep 17 00:00:00 2001
-From: Lorenz Brun <lorenz@nexantic.com>
-Date: Mon, 20 Jul 2020 16:50:56 +0200
-Subject: [PATCH] POC Make e2e test suite support providerless
-
----
- .../custom_metrics_stackdriver_autoscaling.go |   2 +
- test/e2e/cloud/imports.go                     |   2 +
- test/e2e/e2e.go                               |  10 -
- test/e2e/e2e_providers.go                     |  32 +
- .../framework/providers/gce/firewall_test.go  |   2 +
- test/e2e/instrumentation/logging/imports.go   |   2 +
- .../instrumentation/monitoring/accelerator.go |   2 +
- .../monitoring/custom_metrics_deployments.go  |   2 +
- .../monitoring/custom_metrics_stackdriver.go  |   4 +-
- .../instrumentation/monitoring/stackdriver.go |   2 +
- .../monitoring/stackdriver_metadata_agent.go  |   4 +-
- test/e2e/network/firewall.go                  |   2 +
- test/e2e/network/ingress.go                   |   2 +
- test/e2e/network/ingress_scale.go             |   2 +
- test/e2e/network/network_tiers.go             |   2 +
- test/e2e/network/scale/ingress.go             |   2 +
- .../network/scale/localrun/ingress_scale.go   |   2 +-
- test/e2e/network/service.go                   | 955 -----------------
- test/e2e/network/service_providers.go         | 980 ++++++++++++++++++
- test/e2e/node/recreate_node.go                |   2 +
- test/e2e/scheduling/nvidia-gpus.go            |   2 +
- test/e2e/scheduling/ubernetes_lite_volumes.go |   2 +
- test/e2e/storage/drivers/in_tree.go           | 732 -------------
- test/e2e/storage/drivers/in_tree_providers.go | 751 ++++++++++++++
- test/e2e/storage/in_tree_volumes.go           |   5 -
- test/e2e/storage/in_tree_volumes_providers.go |  46 +
- .../nfs_persistent_volume-disruptive.go       |   2 +-
- test/e2e/storage/pd.go                        |   2 +
- test/e2e/storage/persistent_volumes-gce.go    |   2 +
- test/e2e/storage/regional_pd.go               |   3 +
- test/e2e/storage/utils/BUILD                  |   3 -
- test/e2e/storage/utils/ebs.go                 |   2 +
- test/e2e/storage/volume_provisioning.go       | 527 ----------
- .../storage/volume_provisioning_providers.go  | 577 +++++++++++
- test/e2e/upgrades/nvidia-gpu.go               |   2 +
- 35 files changed, 2435 insertions(+), 2236 deletions(-)
- create mode 100644 test/e2e/e2e_providers.go
- create mode 100644 test/e2e/network/service_providers.go
- create mode 100644 test/e2e/storage/drivers/in_tree_providers.go
- create mode 100644 test/e2e/storage/in_tree_volumes_providers.go
- create mode 100644 test/e2e/storage/volume_provisioning_providers.go
-
-diff --git a/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go b/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go
-index d3a7862d338..8bacec7fe1d 100644
---- a/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go
-+++ b/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go
-@@ -1,3 +1,5 @@
-+// +build !providerless
-+
- /*
- Copyright 2017 The Kubernetes Authors.
- 
-diff --git a/test/e2e/cloud/imports.go b/test/e2e/cloud/imports.go
-index 5aa1def97d1..382cb1a2264 100644
---- a/test/e2e/cloud/imports.go
-+++ b/test/e2e/cloud/imports.go
-@@ -1,3 +1,5 @@
-+// +build !providerless
-+
- /*
- Copyright 2019 The Kubernetes Authors.
- 
-diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go
-index d1e23325d69..f5717e417e7 100644
---- a/test/e2e/e2e.go
-+++ b/test/e2e/e2e.go
-@@ -53,16 +53,6 @@ import (
- 	utilnet "k8s.io/utils/net"
- 
- 	clientset "k8s.io/client-go/kubernetes"
--	// ensure auth plugins are loaded
--	_ "k8s.io/client-go/plugin/pkg/client/auth"
--
--	// ensure that cloud providers are loaded
--	_ "k8s.io/kubernetes/test/e2e/framework/providers/aws"
--	_ "k8s.io/kubernetes/test/e2e/framework/providers/azure"
--	_ "k8s.io/kubernetes/test/e2e/framework/providers/gce"
--	_ "k8s.io/kubernetes/test/e2e/framework/providers/kubemark"
--	_ "k8s.io/kubernetes/test/e2e/framework/providers/openstack"
--	_ "k8s.io/kubernetes/test/e2e/framework/providers/vsphere"
- )
- 
- const (
-diff --git a/test/e2e/e2e_providers.go b/test/e2e/e2e_providers.go
-new file mode 100644
-index 00000000000..cf96642b110
---- /dev/null
-+++ b/test/e2e/e2e_providers.go
-@@ -0,0 +1,32 @@
-+// +build !providerless
-+
-+/*
-+Copyright 2020 The Kubernetes Authors.
-+
-+Licensed under the Apache License, Version 2.0 (the "License");
-+you may not use this file except in compliance with the License.
-+You may obtain a copy of the License at
-+
-+    http://www.apache.org/licenses/LICENSE-2.0
-+
-+Unless required by applicable law or agreed to in writing, software
-+distributed under the License is distributed on an "AS IS" BASIS,
-+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-+See the License for the specific language governing permissions and
-+limitations under the License.
-+*/
-+
-+package e2e
-+
-+import (
-+	// ensure auth plugins are loaded
-+	_ "k8s.io/client-go/plugin/pkg/client/auth"
-+
-+	// ensure that cloud providers are loaded
-+	_ "k8s.io/kubernetes/test/e2e/framework/providers/aws"
-+	_ "k8s.io/kubernetes/test/e2e/framework/providers/azure"
-+	_ "k8s.io/kubernetes/test/e2e/framework/providers/gce"
-+	_ "k8s.io/kubernetes/test/e2e/framework/providers/kubemark"
-+	_ "k8s.io/kubernetes/test/e2e/framework/providers/openstack"
-+	_ "k8s.io/kubernetes/test/e2e/framework/providers/vsphere"
-+)
-diff --git a/test/e2e/framework/providers/gce/firewall_test.go b/test/e2e/framework/providers/gce/firewall_test.go
-index 647441dc962..2a92543a5a7 100644
---- a/test/e2e/framework/providers/gce/firewall_test.go
-+++ b/test/e2e/framework/providers/gce/firewall_test.go
-@@ -1,3 +1,5 @@
-+// +build !providerless
-+
- /*
- Copyright 2018 The Kubernetes Authors.
- 
-diff --git a/test/e2e/instrumentation/logging/imports.go b/test/e2e/instrumentation/logging/imports.go
-index 5dd66717db1..fc15c04bfef 100644
---- a/test/e2e/instrumentation/logging/imports.go
-+++ b/test/e2e/instrumentation/logging/imports.go
-@@ -1,3 +1,5 @@
-+// +build !providerless
-+
- /*
- Copyright 2017 The Kubernetes Authors.
- 
-diff --git a/test/e2e/instrumentation/monitoring/accelerator.go b/test/e2e/instrumentation/monitoring/accelerator.go
-index 90047e46ea1..6fa094e6a18 100644
---- a/test/e2e/instrumentation/monitoring/accelerator.go
-+++ b/test/e2e/instrumentation/monitoring/accelerator.go
-@@ -1,3 +1,5 @@
-+// +build !providerless
-+
- /*
- Copyright 2017 The Kubernetes Authors.
- 
-diff --git a/test/e2e/instrumentation/monitoring/custom_metrics_deployments.go b/test/e2e/instrumentation/monitoring/custom_metrics_deployments.go
-index de80b129315..8d96b93bf11 100644
---- a/test/e2e/instrumentation/monitoring/custom_metrics_deployments.go
-+++ b/test/e2e/instrumentation/monitoring/custom_metrics_deployments.go
-@@ -1,3 +1,5 @@
-+// +build !providerless
-+
- /*
- Copyright 2017 The Kubernetes Authors.
- 
-diff --git a/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go b/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go
-index 277b5a0ab24..ddbc3f20802 100644
---- a/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go
-+++ b/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go
-@@ -1,3 +1,5 @@
-+// +build !providerless
-+
- /*
- Copyright 2017 The Kubernetes Authors.
- 
-@@ -21,7 +23,7 @@ import (
- 	"time"
- 
- 	gcm "google.golang.org/api/monitoring/v3"
--	"k8s.io/api/core/v1"
-+	v1 "k8s.io/api/core/v1"
- 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- 	"k8s.io/apimachinery/pkg/labels"
- 	"k8s.io/apimachinery/pkg/runtime/schema"
-diff --git a/test/e2e/instrumentation/monitoring/stackdriver.go b/test/e2e/instrumentation/monitoring/stackdriver.go
-index dbc5e51c20d..3db0120900b 100644
---- a/test/e2e/instrumentation/monitoring/stackdriver.go
-+++ b/test/e2e/instrumentation/monitoring/stackdriver.go
-@@ -1,3 +1,5 @@
-+// +build !providerless
-+
- /*
- Copyright 2017 The Kubernetes Authors.
- 
-diff --git a/test/e2e/instrumentation/monitoring/stackdriver_metadata_agent.go b/test/e2e/instrumentation/monitoring/stackdriver_metadata_agent.go
-index 321591344db..bad9be5b5bf 100644
---- a/test/e2e/instrumentation/monitoring/stackdriver_metadata_agent.go
-+++ b/test/e2e/instrumentation/monitoring/stackdriver_metadata_agent.go
-@@ -1,3 +1,5 @@
-+// +build !providerless
-+
- /*
- Copyright 2017 The Kubernetes Authors.
- 
-@@ -24,7 +26,7 @@ import (
- 	"reflect"
- 	"time"
- 
--	"k8s.io/api/core/v1"
-+	v1 "k8s.io/api/core/v1"
- 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- 	clientset "k8s.io/client-go/kubernetes"
- 	"k8s.io/kubernetes/test/e2e/framework"
-diff --git a/test/e2e/network/firewall.go b/test/e2e/network/firewall.go
-index f4200f5a30c..f8612ed75a9 100644
---- a/test/e2e/network/firewall.go
-+++ b/test/e2e/network/firewall.go
-@@ -1,3 +1,5 @@
-+// +build !providerless
-+
- /*
- Copyright 2016 The Kubernetes Authors.
- 
-diff --git a/test/e2e/network/ingress.go b/test/e2e/network/ingress.go
-index 6c3b09e41f2..8485f8ce50e 100644
---- a/test/e2e/network/ingress.go
-+++ b/test/e2e/network/ingress.go
-@@ -1,3 +1,5 @@
-+// +build !providerless
-+
- /*
- Copyright 2015 The Kubernetes Authors.
- 
-diff --git a/test/e2e/network/ingress_scale.go b/test/e2e/network/ingress_scale.go
-index 6cc8585b7b2..867c834868c 100644
---- a/test/e2e/network/ingress_scale.go
-+++ b/test/e2e/network/ingress_scale.go
-@@ -1,3 +1,5 @@
-+// +build !providerless
-+
- /*
- Copyright 2018 The Kubernetes Authors.
- 
-diff --git a/test/e2e/network/network_tiers.go b/test/e2e/network/network_tiers.go
-index 5ae68a5a1ee..f3ea1f72a6b 100644
---- a/test/e2e/network/network_tiers.go
-+++ b/test/e2e/network/network_tiers.go
-@@ -1,3 +1,5 @@
-+// +build !providerless
-+
- /*
- Copyright 2017 The Kubernetes Authors.
- 
-diff --git a/test/e2e/network/scale/ingress.go b/test/e2e/network/scale/ingress.go
-index 954296beb52..43ad9c9b618 100644
---- a/test/e2e/network/scale/ingress.go
-+++ b/test/e2e/network/scale/ingress.go
-@@ -1,3 +1,5 @@
-+// +build !providerless
-+
- /*
- Copyright 2018 The Kubernetes Authors.
- 
-diff --git a/test/e2e/network/scale/localrun/ingress_scale.go b/test/e2e/network/scale/localrun/ingress_scale.go
-index 2e2c39884da..5a27f5f4cb2 100644
---- a/test/e2e/network/scale/localrun/ingress_scale.go
-+++ b/test/e2e/network/scale/localrun/ingress_scale.go
-@@ -27,7 +27,7 @@ import (
- 
- 	"k8s.io/klog/v2"
- 
--	"k8s.io/api/core/v1"
-+	v1 "k8s.io/api/core/v1"
- 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- 	clientset "k8s.io/client-go/kubernetes"
- 	"k8s.io/client-go/tools/clientcmd"
-diff --git a/test/e2e/network/service.go b/test/e2e/network/service.go
-index 35ac43001d2..b458347a9f9 100644
---- a/test/e2e/network/service.go
-+++ b/test/e2e/network/service.go
-@@ -31,8 +31,6 @@ import (
- 
- 	utilnet "k8s.io/apimachinery/pkg/util/net"
- 
--	compute "google.golang.org/api/compute/v1"
--
- 	"k8s.io/client-go/tools/cache"
- 
- 	appsv1 "k8s.io/api/apps/v1"
-@@ -52,11 +50,9 @@ import (
- 	e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
- 	e2eendpoints "k8s.io/kubernetes/test/e2e/framework/endpoints"
- 	e2eendpointslice "k8s.io/kubernetes/test/e2e/framework/endpointslice"
--	e2ekubesystem "k8s.io/kubernetes/test/e2e/framework/kubesystem"
- 	e2enetwork "k8s.io/kubernetes/test/e2e/framework/network"
- 	e2enode "k8s.io/kubernetes/test/e2e/framework/node"
- 	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
--	"k8s.io/kubernetes/test/e2e/framework/providers/gce"
- 	e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
- 	e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
- 	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
-@@ -64,7 +60,6 @@ import (
- 	"k8s.io/kubernetes/test/e2e/storage/utils"
- 	testutils "k8s.io/kubernetes/test/utils"
- 	imageutils "k8s.io/kubernetes/test/utils/image"
--	gcecloud "k8s.io/legacy-cloud-providers/gce"
- 
- 	"github.com/onsi/ginkgo"
- 	"github.com/onsi/gomega"
-@@ -1242,375 +1237,6 @@ var _ = SIGDescribe("Services", func() {
- 		framework.ExpectNoError(err)
- 	})
- 
--	// TODO: Get rid of [DisabledForLargeClusters] tag when issue #56138 is fixed.
--	ginkgo.It("should be able to change the type and ports of a service [Slow] [DisabledForLargeClusters]", func() {
--		// requires cloud load-balancer support
--		e2eskipper.SkipUnlessProviderIs("gce", "gke", "aws")
--
--		loadBalancerSupportsUDP := !framework.ProviderIs("aws")
--
--		loadBalancerLagTimeout := e2eservice.LoadBalancerLagTimeoutDefault
--		if framework.ProviderIs("aws") {
--			loadBalancerLagTimeout = e2eservice.LoadBalancerLagTimeoutAWS
--		}
--		loadBalancerCreateTimeout := e2eservice.GetServiceLoadBalancerCreationTimeout(cs)
--
--		// This test is more monolithic than we'd like because LB turnup can be
--		// very slow, so we lumped all the tests into one LB lifecycle.
--
--		serviceName := "mutability-test"
--		ns1 := f.Namespace.Name // LB1 in ns1 on TCP
--		framework.Logf("namespace for TCP test: %s", ns1)
--
--		ginkgo.By("creating a second namespace")
--		namespacePtr, err := f.CreateNamespace("services", nil)
--		framework.ExpectNoError(err, "failed to create namespace")
--		ns2 := namespacePtr.Name // LB2 in ns2 on UDP
--		framework.Logf("namespace for UDP test: %s", ns2)
--
--		nodeIP, err := e2enode.PickIP(cs) // for later
--		framework.ExpectNoError(err)
--
--		// Test TCP and UDP Services.  Services with the same name in different
--		// namespaces should get different node ports and load balancers.
--
--		ginkgo.By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns1)
--		tcpJig := e2eservice.NewTestJig(cs, ns1, serviceName)
--		tcpService, err := tcpJig.CreateTCPService(nil)
--		framework.ExpectNoError(err)
--
--		ginkgo.By("creating a UDP service " + serviceName + " with type=ClusterIP in namespace " + ns2)
--		udpJig := e2eservice.NewTestJig(cs, ns2, serviceName)
--		udpService, err := udpJig.CreateUDPService(nil)
--		framework.ExpectNoError(err)
--
--		ginkgo.By("verifying that TCP and UDP use the same port")
--		if tcpService.Spec.Ports[0].Port != udpService.Spec.Ports[0].Port {
--			framework.Failf("expected to use the same port for TCP and UDP")
--		}
--		svcPort := int(tcpService.Spec.Ports[0].Port)
--		framework.Logf("service port (TCP and UDP): %d", svcPort)
--
--		ginkgo.By("creating a pod to be part of the TCP service " + serviceName)
--		_, err = tcpJig.Run(nil)
--		framework.ExpectNoError(err)
--
--		ginkgo.By("creating a pod to be part of the UDP service " + serviceName)
--		_, err = udpJig.Run(nil)
--		framework.ExpectNoError(err)
--
--		// Change the services to NodePort.
--
--		ginkgo.By("changing the TCP service to type=NodePort")
--		tcpService, err = tcpJig.UpdateService(func(s *v1.Service) {
--			s.Spec.Type = v1.ServiceTypeNodePort
--		})
--		framework.ExpectNoError(err)
--		tcpNodePort := int(tcpService.Spec.Ports[0].NodePort)
--		framework.Logf("TCP node port: %d", tcpNodePort)
--
--		ginkgo.By("changing the UDP service to type=NodePort")
--		udpService, err = udpJig.UpdateService(func(s *v1.Service) {
--			s.Spec.Type = v1.ServiceTypeNodePort
--		})
--		framework.ExpectNoError(err)
--		udpNodePort := int(udpService.Spec.Ports[0].NodePort)
--		framework.Logf("UDP node port: %d", udpNodePort)
--
--		ginkgo.By("hitting the TCP service's NodePort")
--		e2eservice.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
--
--		ginkgo.By("hitting the UDP service's NodePort")
--		testReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout)
--
--		// Change the services to LoadBalancer.
--
--		// Here we test that LoadBalancers can receive static IP addresses.  This isn't
--		// necessary, but is an additional feature this monolithic test checks.
--		requestedIP := ""
--		staticIPName := ""
--		if framework.ProviderIs("gce", "gke") {
--			ginkgo.By("creating a static load balancer IP")
--			staticIPName = fmt.Sprintf("e2e-external-lb-test-%s", framework.RunID)
--			gceCloud, err := gce.GetGCECloud()
--			framework.ExpectNoError(err, "failed to get GCE cloud provider")
--
--			err = gceCloud.ReserveRegionAddress(&compute.Address{Name: staticIPName}, gceCloud.Region())
--			defer func() {
--				if staticIPName != "" {
--					// Release GCE static IP - this is not kube-managed and will not be automatically released.
--					if err := gceCloud.DeleteRegionAddress(staticIPName, gceCloud.Region()); err != nil {
--						framework.Logf("failed to release static IP %s: %v", staticIPName, err)
--					}
--				}
--			}()
--			framework.ExpectNoError(err, "failed to create region address: %s", staticIPName)
--			reservedAddr, err := gceCloud.GetRegionAddress(staticIPName, gceCloud.Region())
--			framework.ExpectNoError(err, "failed to get region address: %s", staticIPName)
--
--			requestedIP = reservedAddr.Address
--			framework.Logf("Allocated static load balancer IP: %s", requestedIP)
--		}
--
--		ginkgo.By("changing the TCP service to type=LoadBalancer")
--		tcpService, err = tcpJig.UpdateService(func(s *v1.Service) {
--			s.Spec.LoadBalancerIP = requestedIP // will be "" if not applicable
--			s.Spec.Type = v1.ServiceTypeLoadBalancer
--		})
--		framework.ExpectNoError(err)
--
--		if loadBalancerSupportsUDP {
--			ginkgo.By("changing the UDP service to type=LoadBalancer")
--			udpService, err = udpJig.UpdateService(func(s *v1.Service) {
--				s.Spec.Type = v1.ServiceTypeLoadBalancer
--			})
--			framework.ExpectNoError(err)
--		}
--		serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(tcpService))
--		if loadBalancerSupportsUDP {
--			serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(udpService))
--		}
--
--		ginkgo.By("waiting for the TCP service to have a load balancer")
--		// Wait for the load balancer to be created asynchronously
--		tcpService, err = tcpJig.WaitForLoadBalancer(loadBalancerCreateTimeout)
--		framework.ExpectNoError(err)
--		if int(tcpService.Spec.Ports[0].NodePort) != tcpNodePort {
--			framework.Failf("TCP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", tcpNodePort, tcpService.Spec.Ports[0].NodePort)
--		}
--		if requestedIP != "" && e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != requestedIP {
--			framework.Failf("unexpected TCP Status.LoadBalancer.Ingress (expected %s, got %s)", requestedIP, e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]))
--		}
--		tcpIngressIP := e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])
--		framework.Logf("TCP load balancer: %s", tcpIngressIP)
--
--		if framework.ProviderIs("gce", "gke") {
--			// Do this as early as possible, which overrides the `defer` above.
--			// This is mostly out of fear of leaking the IP in a timeout case
--			// (as of this writing we're not 100% sure where the leaks are
--			// coming from, so this is first-aid rather than surgery).
--			ginkgo.By("demoting the static IP to ephemeral")
--			if staticIPName != "" {
--				gceCloud, err := gce.GetGCECloud()
--				framework.ExpectNoError(err, "failed to get GCE cloud provider")
--				// Deleting it after it is attached "demotes" it to an
--				// ephemeral IP, which can be auto-released.
--				if err := gceCloud.DeleteRegionAddress(staticIPName, gceCloud.Region()); err != nil {
--					framework.Failf("failed to release static IP %s: %v", staticIPName, err)
--				}
--				staticIPName = ""
--			}
--		}
--
--		var udpIngressIP string
--		if loadBalancerSupportsUDP {
--			ginkgo.By("waiting for the UDP service to have a load balancer")
--			// 2nd one should be faster since they ran in parallel.
--			udpService, err = udpJig.WaitForLoadBalancer(loadBalancerCreateTimeout)
--			framework.ExpectNoError(err)
--			if int(udpService.Spec.Ports[0].NodePort) != udpNodePort {
--				framework.Failf("UDP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", udpNodePort, udpService.Spec.Ports[0].NodePort)
--			}
--			udpIngressIP = e2eservice.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0])
--			framework.Logf("UDP load balancer: %s", udpIngressIP)
--
--			ginkgo.By("verifying that TCP and UDP use different load balancers")
--			if tcpIngressIP == udpIngressIP {
--				framework.Failf("Load balancers are not different: %s", e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]))
--			}
--		}
--
--		ginkgo.By("hitting the TCP service's NodePort")
--		e2eservice.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
--
--		ginkgo.By("hitting the UDP service's NodePort")
--		testReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout)
--
--		ginkgo.By("hitting the TCP service's LoadBalancer")
--		e2eservice.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout)
--
--		if loadBalancerSupportsUDP {
--			ginkgo.By("hitting the UDP service's LoadBalancer")
--			testReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout)
--		}
--
--		// Change the services' node ports.
--
--		ginkgo.By("changing the TCP service's NodePort")
--		tcpService, err = tcpJig.ChangeServiceNodePort(tcpNodePort)
--		framework.ExpectNoError(err)
--		tcpNodePortOld := tcpNodePort
--		tcpNodePort = int(tcpService.Spec.Ports[0].NodePort)
--		if tcpNodePort == tcpNodePortOld {
--			framework.Failf("TCP Spec.Ports[0].NodePort (%d) did not change", tcpNodePort)
--		}
--		if e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != tcpIngressIP {
--			framework.Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]))
--		}
--		framework.Logf("TCP node port: %d", tcpNodePort)
--
--		ginkgo.By("changing the UDP service's NodePort")
--		udpService, err = udpJig.ChangeServiceNodePort(udpNodePort)
--		framework.ExpectNoError(err)
--		udpNodePortOld := udpNodePort
--		udpNodePort = int(udpService.Spec.Ports[0].NodePort)
--		if udpNodePort == udpNodePortOld {
--			framework.Failf("UDP Spec.Ports[0].NodePort (%d) did not change", udpNodePort)
--		}
--		if loadBalancerSupportsUDP && e2eservice.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) != udpIngressIP {
--			framework.Failf("UDP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", udpIngressIP, e2eservice.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]))
--		}
--		framework.Logf("UDP node port: %d", udpNodePort)
--
--		ginkgo.By("hitting the TCP service's new NodePort")
--		e2eservice.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
--
--		ginkgo.By("hitting the UDP service's new NodePort")
--		testReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout)
--
--		ginkgo.By("checking the old TCP NodePort is closed")
--		testNotReachableHTTP(nodeIP, tcpNodePortOld, e2eservice.KubeProxyLagTimeout)
--
--		ginkgo.By("checking the old UDP NodePort is closed")
--		testNotReachableUDP(nodeIP, udpNodePortOld, e2eservice.KubeProxyLagTimeout)
--
--		ginkgo.By("hitting the TCP service's LoadBalancer")
--		e2eservice.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout)
--
--		if loadBalancerSupportsUDP {
--			ginkgo.By("hitting the UDP service's LoadBalancer")
--			testReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout)
--		}
--
--		// Change the services' main ports.
--
--		ginkgo.By("changing the TCP service's port")
--		tcpService, err = tcpJig.UpdateService(func(s *v1.Service) {
--			s.Spec.Ports[0].Port++
--		})
--		framework.ExpectNoError(err)
--		svcPortOld := svcPort
--		svcPort = int(tcpService.Spec.Ports[0].Port)
--		if svcPort == svcPortOld {
--			framework.Failf("TCP Spec.Ports[0].Port (%d) did not change", svcPort)
--		}
--		if int(tcpService.Spec.Ports[0].NodePort) != tcpNodePort {
--			framework.Failf("TCP Spec.Ports[0].NodePort (%d) changed", tcpService.Spec.Ports[0].NodePort)
--		}
--		if e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != tcpIngressIP {
--			framework.Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]))
--		}
--
--		ginkgo.By("changing the UDP service's port")
--		udpService, err = udpJig.UpdateService(func(s *v1.Service) {
--			s.Spec.Ports[0].Port++
--		})
--		framework.ExpectNoError(err)
--		if int(udpService.Spec.Ports[0].Port) != svcPort {
--			framework.Failf("UDP Spec.Ports[0].Port (%d) did not change", udpService.Spec.Ports[0].Port)
--		}
--		if int(udpService.Spec.Ports[0].NodePort) != udpNodePort {
--			framework.Failf("UDP Spec.Ports[0].NodePort (%d) changed", udpService.Spec.Ports[0].NodePort)
--		}
--		if loadBalancerSupportsUDP && e2eservice.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) != udpIngressIP {
--			framework.Failf("UDP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", udpIngressIP, e2eservice.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]))
--		}
--
--		framework.Logf("service port (TCP and UDP): %d", svcPort)
--
--		ginkgo.By("hitting the TCP service's NodePort")
--		e2eservice.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
--
--		ginkgo.By("hitting the UDP service's NodePort")
--		testReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout)
--
--		ginkgo.By("hitting the TCP service's LoadBalancer")
--		e2eservice.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout)
--
--		if loadBalancerSupportsUDP {
--			ginkgo.By("hitting the UDP service's LoadBalancer")
--			testReachableUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout)
--		}
--
--		ginkgo.By("Scaling the pods to 0")
--		err = tcpJig.Scale(0)
--		framework.ExpectNoError(err)
--		err = udpJig.Scale(0)
--		framework.ExpectNoError(err)
--
--		ginkgo.By("looking for ICMP REJECT on the TCP service's NodePort")
--		testRejectedHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
--
--		ginkgo.By("looking for ICMP REJECT on the UDP service's NodePort")
--		testRejectedUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout)
--
--		ginkgo.By("looking for ICMP REJECT on the TCP service's LoadBalancer")
--		testRejectedHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout)
--
--		if loadBalancerSupportsUDP {
--			ginkgo.By("looking for ICMP REJECT on the UDP service's LoadBalancer")
--			testRejectedUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout)
--		}
--
--		ginkgo.By("Scaling the pods to 1")
--		err = tcpJig.Scale(1)
--		framework.ExpectNoError(err)
--		err = udpJig.Scale(1)
--		framework.ExpectNoError(err)
--
--		ginkgo.By("hitting the TCP service's NodePort")
--		e2eservice.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
--
--		ginkgo.By("hitting the UDP service's NodePort")
--		testReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout)
--
--		ginkgo.By("hitting the TCP service's LoadBalancer")
--		e2eservice.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout)
--
--		if loadBalancerSupportsUDP {
--			ginkgo.By("hitting the UDP service's LoadBalancer")
--			testReachableUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout)
--		}
--
--		// Change the services back to ClusterIP.
--
--		ginkgo.By("changing TCP service back to type=ClusterIP")
--		_, err = tcpJig.UpdateService(func(s *v1.Service) {
--			s.Spec.Type = v1.ServiceTypeClusterIP
--			s.Spec.Ports[0].NodePort = 0
--		})
--		framework.ExpectNoError(err)
--		// Wait for the load balancer to be destroyed asynchronously
--		_, err = tcpJig.WaitForLoadBalancerDestroy(tcpIngressIP, svcPort, loadBalancerCreateTimeout)
--		framework.ExpectNoError(err)
--
--		ginkgo.By("changing UDP service back to type=ClusterIP")
--		_, err = udpJig.UpdateService(func(s *v1.Service) {
--			s.Spec.Type = v1.ServiceTypeClusterIP
--			s.Spec.Ports[0].NodePort = 0
--		})
--		framework.ExpectNoError(err)
--		if loadBalancerSupportsUDP {
--			// Wait for the load balancer to be destroyed asynchronously
--			_, err = udpJig.WaitForLoadBalancerDestroy(udpIngressIP, svcPort, loadBalancerCreateTimeout)
--			framework.ExpectNoError(err)
--		}
--
--		ginkgo.By("checking the TCP NodePort is closed")
--		testNotReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
--
--		ginkgo.By("checking the UDP NodePort is closed")
--		testNotReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout)
--
--		ginkgo.By("checking the TCP LoadBalancer is closed")
--		testNotReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout)
--
--		if loadBalancerSupportsUDP {
--			ginkgo.By("checking the UDP LoadBalancer is closed")
--			testNotReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout)
--		}
--	})
--
- 	/*
- 		Testname: Service, update NodePort, same port different protocol
- 		Description: Create a service to accept TCP requests. By default, created service MUST be of type ClusterIP and an ClusterIP MUST be assigned to the service.
-@@ -2253,199 +1879,6 @@ var _ = SIGDescribe("Services", func() {
- 		checkReachabilityFromPod(true, normalReachabilityTimeout, namespace, dropPod.Name, svcIP)
- 	})
- 
--	ginkgo.It("should be able to create an internal type load balancer [Slow]", func() {
--		e2eskipper.SkipUnlessProviderIs("azure", "gke", "gce")
--
--		createTimeout := e2eservice.GetServiceLoadBalancerCreationTimeout(cs)
--		pollInterval := framework.Poll * 10
--
--		namespace := f.Namespace.Name
--		serviceName := "lb-internal"
--		jig := e2eservice.NewTestJig(cs, namespace, serviceName)
--
--		ginkgo.By("creating pod to be part of service " + serviceName)
--		_, err := jig.Run(nil)
--		framework.ExpectNoError(err)
--
--		enableILB, disableILB := enableAndDisableInternalLB()
--
--		isInternalEndpoint := func(lbIngress *v1.LoadBalancerIngress) bool {
--			ingressEndpoint := e2eservice.GetIngressPoint(lbIngress)
--			// Needs update for providers using hostname as endpoint.
--			return strings.HasPrefix(ingressEndpoint, "10.")
--		}
--
--		ginkgo.By("creating a service with type LoadBalancer and cloud specific Internal-LB annotation enabled")
--		svc, err := jig.CreateTCPService(func(svc *v1.Service) {
--			svc.Spec.Type = v1.ServiceTypeLoadBalancer
--			enableILB(svc)
--		})
--		framework.ExpectNoError(err)
--
--		defer func() {
--			ginkgo.By("Clean up loadbalancer service")
--			e2eservice.WaitForServiceDeletedWithFinalizer(cs, svc.Namespace, svc.Name)
--		}()
--
--		svc, err = jig.WaitForLoadBalancer(createTimeout)
--		framework.ExpectNoError(err)
--		lbIngress := &svc.Status.LoadBalancer.Ingress[0]
--		svcPort := int(svc.Spec.Ports[0].Port)
--		// should have an internal IP.
--		framework.ExpectEqual(isInternalEndpoint(lbIngress), true)
--
--		// ILBs are not accessible from the test orchestrator, so it's necessary to use
--		//  a pod to test the service.
--		ginkgo.By("hitting the internal load balancer from pod")
--		framework.Logf("creating pod with host network")
--		hostExec := launchHostExecPod(f.ClientSet, f.Namespace.Name, "ilb-host-exec")
--
--		framework.Logf("Waiting up to %v for service %q's internal LB to respond to requests", createTimeout, serviceName)
--		tcpIngressIP := e2eservice.GetIngressPoint(lbIngress)
--		if pollErr := wait.PollImmediate(pollInterval, createTimeout, func() (bool, error) {
--			cmd := fmt.Sprintf(`curl -m 5 'http://%v:%v/echo?msg=hello'`, tcpIngressIP, svcPort)
--			stdout, err := framework.RunHostCmd(hostExec.Namespace, hostExec.Name, cmd)
--			if err != nil {
--				framework.Logf("error curling; stdout: %v. err: %v", stdout, err)
--				return false, nil
--			}
--
--			if !strings.Contains(stdout, "hello") {
--				framework.Logf("Expected output to contain 'hello', got %q; retrying...", stdout)
--				return false, nil
--			}
--
--			framework.Logf("Successful curl; stdout: %v", stdout)
--			return true, nil
--		}); pollErr != nil {
--			framework.Failf("ginkgo.Failed to hit ILB IP, err: %v", pollErr)
--		}
--
--		ginkgo.By("switching to external type LoadBalancer")
--		svc, err = jig.UpdateService(func(svc *v1.Service) {
--			disableILB(svc)
--		})
--		framework.ExpectNoError(err)
--		framework.Logf("Waiting up to %v for service %q to have an external LoadBalancer", createTimeout, serviceName)
--		if pollErr := wait.PollImmediate(pollInterval, createTimeout, func() (bool, error) {
--			svc, err := cs.CoreV1().Services(namespace).Get(context.TODO(), serviceName, metav1.GetOptions{})
--			if err != nil {
--				return false, err
--			}
--			lbIngress = &svc.Status.LoadBalancer.Ingress[0]
--			return !isInternalEndpoint(lbIngress), nil
--		}); pollErr != nil {
--			framework.Failf("Loadbalancer IP not changed to external.")
--		}
--		// should have an external IP.
--		gomega.Expect(isInternalEndpoint(lbIngress)).To(gomega.BeFalse())
--
--		ginkgo.By("hitting the external load balancer")
--		framework.Logf("Waiting up to %v for service %q's external LB to respond to requests", createTimeout, serviceName)
--		tcpIngressIP = e2eservice.GetIngressPoint(lbIngress)
--		e2eservice.TestReachableHTTP(tcpIngressIP, svcPort, e2eservice.LoadBalancerLagTimeoutDefault)
--
--		// GCE cannot test a specific IP because the test may not own it. This cloud specific condition
--		// will be removed when GCP supports similar functionality.
--		if framework.ProviderIs("azure") {
--			ginkgo.By("switching back to interal type LoadBalancer, with static IP specified.")
--			internalStaticIP := "10.240.11.11"
--			svc, err = jig.UpdateService(func(svc *v1.Service) {
--				svc.Spec.LoadBalancerIP = internalStaticIP
--				enableILB(svc)
--			})
--			framework.ExpectNoError(err)
--			framework.Logf("Waiting up to %v for service %q to have an internal LoadBalancer", createTimeout, serviceName)
--			if pollErr := wait.PollImmediate(pollInterval, createTimeout, func() (bool, error) {
--				svc, err := cs.CoreV1().Services(namespace).Get(context.TODO(), serviceName, metav1.GetOptions{})
--				if err != nil {
--					return false, err
--				}
--				lbIngress = &svc.Status.LoadBalancer.Ingress[0]
--				return isInternalEndpoint(lbIngress), nil
--			}); pollErr != nil {
--				framework.Failf("Loadbalancer IP not changed to internal.")
--			}
--			// should have the given static internal IP.
--			framework.ExpectEqual(e2eservice.GetIngressPoint(lbIngress), internalStaticIP)
--		}
--	})
--
--	// This test creates a load balancer, make sure its health check interval
--	// equals to gceHcCheckIntervalSeconds. Then the interval is manipulated
--	// to be something else, see if the interval will be reconciled.
--	ginkgo.It("should reconcile LB health check interval [Slow][Serial]", func() {
--		const gceHcCheckIntervalSeconds = int64(8)
--		// This test is for clusters on GCE.
--		// (It restarts kube-controller-manager, which we don't support on GKE)
--		e2eskipper.SkipUnlessProviderIs("gce")
--		e2eskipper.SkipUnlessSSHKeyPresent()
--
--		clusterID, err := gce.GetClusterID(cs)
--		if err != nil {
--			framework.Failf("framework.GetClusterID(cs) = _, %v; want nil", err)
--		}
--		gceCloud, err := gce.GetGCECloud()
--		if err != nil {
--			framework.Failf("framework.GetGCECloud() = _, %v; want nil", err)
--		}
--
--		namespace := f.Namespace.Name
--		serviceName := "lb-hc-int"
--		jig := e2eservice.NewTestJig(cs, namespace, serviceName)
--
--		ginkgo.By("create load balancer service")
--		// Create loadbalancer service with source range from node[0] and podAccept
--		svc, err := jig.CreateTCPService(func(svc *v1.Service) {
--			svc.Spec.Type = v1.ServiceTypeLoadBalancer
--		})
--		framework.ExpectNoError(err)
--
--		defer func() {
--			ginkgo.By("Clean up loadbalancer service")
--			e2eservice.WaitForServiceDeletedWithFinalizer(cs, svc.Namespace, svc.Name)
--		}()
--
--		svc, err = jig.WaitForLoadBalancer(e2eservice.GetServiceLoadBalancerCreationTimeout(cs))
--		framework.ExpectNoError(err)
--
--		hcName := gcecloud.MakeNodesHealthCheckName(clusterID)
--		hc, err := gceCloud.GetHTTPHealthCheck(hcName)
--		if err != nil {
--			framework.Failf("gceCloud.GetHttpHealthCheck(%q) = _, %v; want nil", hcName, err)
--		}
--		framework.ExpectEqual(hc.CheckIntervalSec, gceHcCheckIntervalSeconds)
--
--		ginkgo.By("modify the health check interval")
--		hc.CheckIntervalSec = gceHcCheckIntervalSeconds - 1
--		if err = gceCloud.UpdateHTTPHealthCheck(hc); err != nil {
--			framework.Failf("gcecloud.UpdateHttpHealthCheck(%#v) = %v; want nil", hc, err)
--		}
--
--		ginkgo.By("restart kube-controller-manager")
--		if err := e2ekubesystem.RestartControllerManager(); err != nil {
--			framework.Failf("e2ekubesystem.RestartControllerManager() = %v; want nil", err)
--		}
--		if err := e2ekubesystem.WaitForControllerManagerUp(); err != nil {
--			framework.Failf("e2ekubesystem.WaitForControllerManagerUp() = %v; want nil", err)
--		}
--
--		ginkgo.By("health check should be reconciled")
--		pollInterval := framework.Poll * 10
--		loadBalancerPropagationTimeout := e2eservice.GetServiceLoadBalancerPropagationTimeout(cs)
--		if pollErr := wait.PollImmediate(pollInterval, loadBalancerPropagationTimeout, func() (bool, error) {
--			hc, err := gceCloud.GetHTTPHealthCheck(hcName)
--			if err != nil {
--				framework.Logf("ginkgo.Failed to get HttpHealthCheck(%q): %v", hcName, err)
--				return false, err
--			}
--			framework.Logf("hc.CheckIntervalSec = %v", hc.CheckIntervalSec)
--			return hc.CheckIntervalSec == gceHcCheckIntervalSeconds, nil
--		}); pollErr != nil {
--			framework.Failf("Health check %q does not reconcile its check interval to %d.", hcName, gceHcCheckIntervalSeconds)
--		}
--	})
--
- 	/*
- 		Release: v1.19
- 		Testname: Service, ClusterIP type, session affinity to ClientIP
-@@ -2999,394 +2432,6 @@ var _ = SIGDescribe("Services", func() {
- 	})
- })
- 
--var _ = SIGDescribe("ESIPP [Slow]", func() {
--	f := framework.NewDefaultFramework("esipp")
--	var loadBalancerCreateTimeout time.Duration
--
--	var cs clientset.Interface
--	serviceLBNames := []string{}
--
--	ginkgo.BeforeEach(func() {
--		// requires cloud load-balancer support - this feature currently supported only on GCE/GKE
--		e2eskipper.SkipUnlessProviderIs("gce", "gke")
--
--		cs = f.ClientSet
--		loadBalancerCreateTimeout = e2eservice.GetServiceLoadBalancerCreationTimeout(cs)
--	})
--
--	ginkgo.AfterEach(func() {
--		if ginkgo.CurrentGinkgoTestDescription().Failed {
--			DescribeSvc(f.Namespace.Name)
--		}
--		for _, lb := range serviceLBNames {
--			framework.Logf("cleaning load balancer resource for %s", lb)
--			e2eservice.CleanupServiceResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone)
--		}
--		//reset serviceLBNames
--		serviceLBNames = []string{}
--	})
--
--	ginkgo.It("should work for type=LoadBalancer", func() {
--		namespace := f.Namespace.Name
--		serviceName := "external-local-lb"
--		jig := e2eservice.NewTestJig(cs, namespace, serviceName)
--
--		svc, err := jig.CreateOnlyLocalLoadBalancerService(loadBalancerCreateTimeout, true, nil)
--		framework.ExpectNoError(err)
--		serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc))
--		healthCheckNodePort := int(svc.Spec.HealthCheckNodePort)
--		if healthCheckNodePort == 0 {
--			framework.Failf("Service HealthCheck NodePort was not allocated")
--		}
--		defer func() {
--			err = jig.ChangeServiceType(v1.ServiceTypeClusterIP, loadBalancerCreateTimeout)
--			framework.ExpectNoError(err)
--
--			// Make sure we didn't leak the health check node port.
--			const threshold = 2
--			nodes, err := getEndpointNodesWithInternalIP(jig)
--			framework.ExpectNoError(err)
--			config := e2enetwork.NewNetworkingTestConfig(f, false, false)
--			for _, internalIP := range nodes {
--				err := testHTTPHealthCheckNodePortFromTestContainer(
--					config,
--					internalIP,
--					healthCheckNodePort,
--					e2eservice.KubeProxyLagTimeout,
--					false,
--					threshold)
--				framework.ExpectNoError(err)
--			}
--			err = cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{})
--			framework.ExpectNoError(err)
--		}()
--
--		svcTCPPort := int(svc.Spec.Ports[0].Port)
--		ingressIP := e2eservice.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0])
--
--		ginkgo.By("reading clientIP using the TCP service's service port via its external VIP")
--		content := GetHTTPContent(ingressIP, svcTCPPort, e2eservice.KubeProxyLagTimeout, "/clientip")
--		clientIP := content.String()
--		framework.Logf("ClientIP detected by target pod using VIP:SvcPort is %s", clientIP)
--
--		ginkgo.By("checking if Source IP is preserved")
--		if strings.HasPrefix(clientIP, "10.") {
--			framework.Failf("Source IP was NOT preserved")
--		}
--	})
--
--	ginkgo.It("should work for type=NodePort", func() {
--		namespace := f.Namespace.Name
--		serviceName := "external-local-nodeport"
--		jig := e2eservice.NewTestJig(cs, namespace, serviceName)
--
--		svc, err := jig.CreateOnlyLocalNodePortService(true)
--		framework.ExpectNoError(err)
--		defer func() {
--			err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{})
--			framework.ExpectNoError(err)
--		}()
--
--		tcpNodePort := int(svc.Spec.Ports[0].NodePort)
--
--		endpointsNodeMap, err := getEndpointNodesWithInternalIP(jig)
--		framework.ExpectNoError(err)
--
--		dialCmd := "clientip"
--		config := e2enetwork.NewNetworkingTestConfig(f, false, false)
--
--		for nodeName, nodeIP := range endpointsNodeMap {
--			ginkgo.By(fmt.Sprintf("reading clientIP using the TCP service's NodePort, on node %v: %v:%v/%v", nodeName, nodeIP, tcpNodePort, dialCmd))
--			clientIP, err := GetHTTPContentFromTestContainer(config, nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout, dialCmd)
--			framework.ExpectNoError(err)
--			framework.Logf("ClientIP detected by target pod using NodePort is %s, the ip of test container is %s", clientIP, config.TestContainerPod.Status.PodIP)
--			// the clientIP returned by agnhost contains port
--			if !strings.HasPrefix(clientIP, config.TestContainerPod.Status.PodIP) {
--				framework.Failf("Source IP was NOT preserved")
--			}
--		}
--	})
--
--	ginkgo.It("should only target nodes with endpoints", func() {
--		namespace := f.Namespace.Name
--		serviceName := "external-local-nodes"
--		jig := e2eservice.NewTestJig(cs, namespace, serviceName)
--		nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, e2eservice.MaxNodesForEndpointsTests)
--		framework.ExpectNoError(err)
--
--		svc, err := jig.CreateOnlyLocalLoadBalancerService(loadBalancerCreateTimeout, false,
--			func(svc *v1.Service) {
--				// Change service port to avoid collision with opened hostPorts
--				// in other tests that run in parallel.
--				if len(svc.Spec.Ports) != 0 {
--					svc.Spec.Ports[0].TargetPort = intstr.FromInt(int(svc.Spec.Ports[0].Port))
--					svc.Spec.Ports[0].Port = 8081
--				}
--
--			})
--		framework.ExpectNoError(err)
--		serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc))
--		defer func() {
--			err = jig.ChangeServiceType(v1.ServiceTypeClusterIP, loadBalancerCreateTimeout)
--			framework.ExpectNoError(err)
--			err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{})
--			framework.ExpectNoError(err)
--		}()
--
--		healthCheckNodePort := int(svc.Spec.HealthCheckNodePort)
--		if healthCheckNodePort == 0 {
--			framework.Failf("Service HealthCheck NodePort was not allocated")
--		}
--
--		ips := e2enode.CollectAddresses(nodes, v1.NodeInternalIP)
--
--		ingressIP := e2eservice.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0])
--		svcTCPPort := int(svc.Spec.Ports[0].Port)
--
--		const threshold = 2
--		config := e2enetwork.NewNetworkingTestConfig(f, false, false)
--		for i := 0; i < len(nodes.Items); i++ {
--			endpointNodeName := nodes.Items[i].Name
--
--			ginkgo.By("creating a pod to be part of the service " + serviceName + " on node " + endpointNodeName)
--			_, err = jig.Run(func(rc *v1.ReplicationController) {
--				rc.Name = serviceName
--				if endpointNodeName != "" {
--					rc.Spec.Template.Spec.NodeName = endpointNodeName
--				}
--			})
--			framework.ExpectNoError(err)
--
--			ginkgo.By(fmt.Sprintf("waiting for service endpoint on node %v", endpointNodeName))
--			err = jig.WaitForEndpointOnNode(endpointNodeName)
--			framework.ExpectNoError(err)
--
--			// HealthCheck should pass only on the node where num(endpoints) > 0
--			// All other nodes should fail the healthcheck on the service healthCheckNodePort
--			for n, internalIP := range ips {
--				// Make sure the loadbalancer picked up the health check change.
--				// Confirm traffic can reach backend through LB before checking healthcheck nodeport.
--				e2eservice.TestReachableHTTP(ingressIP, svcTCPPort, e2eservice.KubeProxyLagTimeout)
--				expectedSuccess := nodes.Items[n].Name == endpointNodeName
--				port := strconv.Itoa(healthCheckNodePort)
--				ipPort := net.JoinHostPort(internalIP, port)
--				framework.Logf("Health checking %s, http://%s/healthz, expectedSuccess %v", nodes.Items[n].Name, ipPort, expectedSuccess)
--				err := testHTTPHealthCheckNodePortFromTestContainer(
--					config,
--					internalIP,
--					healthCheckNodePort,
--					e2eservice.KubeProxyEndpointLagTimeout,
--					expectedSuccess,
--					threshold)
--				framework.ExpectNoError(err)
--			}
--			framework.ExpectNoError(e2erc.DeleteRCAndWaitForGC(f.ClientSet, namespace, serviceName))
--		}
--	})
--
--	ginkgo.It("should work from pods", func() {
--		var err error
--		namespace := f.Namespace.Name
--		serviceName := "external-local-pods"
--		jig := e2eservice.NewTestJig(cs, namespace, serviceName)
--
--		svc, err := jig.CreateOnlyLocalLoadBalancerService(loadBalancerCreateTimeout, true, nil)
--		framework.ExpectNoError(err)
--		serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc))
--		defer func() {
--			err = jig.ChangeServiceType(v1.ServiceTypeClusterIP, loadBalancerCreateTimeout)
--			framework.ExpectNoError(err)
--			err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{})
--			framework.ExpectNoError(err)
--		}()
--
--		ingressIP := e2eservice.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0])
--		port := strconv.Itoa(int(svc.Spec.Ports[0].Port))
--		ipPort := net.JoinHostPort(ingressIP, port)
--		path := fmt.Sprintf("%s/clientip", ipPort)
--
--		ginkgo.By("Creating pause pod deployment to make sure, pausePods are in desired state")
--		deployment := createPausePodDeployment(cs, "pause-pod-deployment", namespace, 1)
--		framework.ExpectNoError(e2edeployment.WaitForDeploymentComplete(cs, deployment), "Failed to complete pause pod deployment")
--
--		defer func() {
--			framework.Logf("Deleting deployment")
--			err = cs.AppsV1().Deployments(namespace).Delete(context.TODO(), deployment.Name, metav1.DeleteOptions{})
--			framework.ExpectNoError(err, "Failed to delete deployment %s", deployment.Name)
--		}()
--
--		deployment, err = cs.AppsV1().Deployments(namespace).Get(context.TODO(), deployment.Name, metav1.GetOptions{})
--		framework.ExpectNoError(err, "Error in retrieving pause pod deployment")
--		labelSelector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
--		framework.ExpectNoError(err, "Error in setting LabelSelector as selector from deployment")
--
--		pausePods, err := cs.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector.String()})
--		framework.ExpectNoError(err, "Error in listing pods associated with pause pod deployments")
--
--		pausePod := pausePods.Items[0]
--		framework.Logf("Waiting up to %v curl %v", e2eservice.KubeProxyLagTimeout, path)
--		cmd := fmt.Sprintf(`curl -q -s --connect-timeout 30 %v`, path)
--
--		var srcIP string
--		loadBalancerPropagationTimeout := e2eservice.GetServiceLoadBalancerPropagationTimeout(cs)
--		ginkgo.By(fmt.Sprintf("Hitting external lb %v from pod %v on node %v", ingressIP, pausePod.Name, pausePod.Spec.NodeName))
--		if pollErr := wait.PollImmediate(framework.Poll, loadBalancerPropagationTimeout, func() (bool, error) {
--			stdout, err := framework.RunHostCmd(pausePod.Namespace, pausePod.Name, cmd)
--			if err != nil {
--				framework.Logf("got err: %v, retry until timeout", err)
--				return false, nil
--			}
--			srcIP = strings.TrimSpace(strings.Split(stdout, ":")[0])
--			return srcIP == pausePod.Status.PodIP, nil
--		}); pollErr != nil {
--			framework.Failf("Source IP not preserved from %v, expected '%v' got '%v'", pausePod.Name, pausePod.Status.PodIP, srcIP)
--		}
--	})
--
--	ginkgo.It("should handle updates to ExternalTrafficPolicy field", func() {
--		namespace := f.Namespace.Name
--		serviceName := "external-local-update"
--		jig := e2eservice.NewTestJig(cs, namespace, serviceName)
--
--		nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, e2eservice.MaxNodesForEndpointsTests)
--		framework.ExpectNoError(err)
--		if len(nodes.Items) < 2 {
--			framework.Failf("Need at least 2 nodes to verify source ip from a node without endpoint")
--		}
--
--		svc, err := jig.CreateOnlyLocalLoadBalancerService(loadBalancerCreateTimeout, true, nil)
--		framework.ExpectNoError(err)
--		serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc))
--		defer func() {
--			err = jig.ChangeServiceType(v1.ServiceTypeClusterIP, loadBalancerCreateTimeout)
--			framework.ExpectNoError(err)
--			err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{})
--			framework.ExpectNoError(err)
--		}()
--
--		// save the health check node port because it disappears when ESIPP is turned off.
--		healthCheckNodePort := int(svc.Spec.HealthCheckNodePort)
--
--		ginkgo.By("turning ESIPP off")
--		svc, err = jig.UpdateService(func(svc *v1.Service) {
--			svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeCluster
--		})
--		framework.ExpectNoError(err)
--		if svc.Spec.HealthCheckNodePort > 0 {
--			framework.Failf("Service HealthCheck NodePort still present")
--		}
--
--		epNodes, err := jig.ListNodesWithEndpoint()
--		framework.ExpectNoError(err)
--		// map from name of nodes with endpoint to internal ip
--		// it is assumed that there is only a single node with the endpoint
--		endpointNodeMap := make(map[string]string)
--		// map from name of nodes without endpoint to internal ip
--		noEndpointNodeMap := make(map[string]string)
--		for _, node := range epNodes {
--			ips := e2enode.GetAddresses(&node, v1.NodeInternalIP)
--			if len(ips) < 1 {
--				framework.Failf("No internal ip found for node %s", node.Name)
--			}
--			endpointNodeMap[node.Name] = ips[0]
--		}
--		for _, n := range nodes.Items {
--			ips := e2enode.GetAddresses(&n, v1.NodeInternalIP)
--			if len(ips) < 1 {
--				framework.Failf("No internal ip found for node %s", n.Name)
--			}
--			if _, ok := endpointNodeMap[n.Name]; !ok {
--				noEndpointNodeMap[n.Name] = ips[0]
--			}
--		}
--		framework.ExpectNotEqual(len(endpointNodeMap), 0)
--		framework.ExpectNotEqual(len(noEndpointNodeMap), 0)
--
--		svcTCPPort := int(svc.Spec.Ports[0].Port)
--		svcNodePort := int(svc.Spec.Ports[0].NodePort)
--		ingressIP := e2eservice.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0])
--		path := "/clientip"
--		dialCmd := "clientip"
--
--		config := e2enetwork.NewNetworkingTestConfig(f, false, false)
--
--		ginkgo.By(fmt.Sprintf("endpoints present on nodes %v, absent on nodes %v", endpointNodeMap, noEndpointNodeMap))
--		for nodeName, nodeIP := range noEndpointNodeMap {
--			ginkgo.By(fmt.Sprintf("Checking %v (%v:%v/%v) proxies to endpoints on another node", nodeName, nodeIP[0], svcNodePort, dialCmd))
--			_, err := GetHTTPContentFromTestContainer(config, nodeIP, svcNodePort, e2eservice.KubeProxyLagTimeout, dialCmd)
--			framework.ExpectNoError(err, "Could not reach HTTP service through %v:%v/%v after %v", nodeIP, svcNodePort, dialCmd, e2eservice.KubeProxyLagTimeout)
--		}
--
--		for nodeName, nodeIP := range endpointNodeMap {
--			ginkgo.By(fmt.Sprintf("checking kube-proxy health check fails on node with endpoint (%s), public IP %s", nodeName, nodeIP))
--			var body string
--			pollFn := func() (bool, error) {
--				// we expect connection failure here, but not other errors
--				resp, err := config.GetResponseFromTestContainer(
--					"http",
--					"healthz",
--					nodeIP,
--					healthCheckNodePort)
--				if err != nil {
--					return false, nil
--				}
--				if len(resp.Errors) > 0 {
--					return true, nil
--				}
--				if len(resp.Responses) > 0 {
--					body = resp.Responses[0]
--				}
--				return false, nil
--			}
--			if pollErr := wait.PollImmediate(framework.Poll, e2eservice.TestTimeout, pollFn); pollErr != nil {
--				framework.Failf("Kube-proxy still exposing health check on node %v:%v, after ESIPP was turned off. body %s",
--					nodeName, healthCheckNodePort, body)
--			}
--		}
--
--		// Poll till kube-proxy re-adds the MASQUERADE rule on the node.
--		ginkgo.By(fmt.Sprintf("checking source ip is NOT preserved through loadbalancer %v", ingressIP))
--		var clientIP string
--		pollErr := wait.PollImmediate(framework.Poll, e2eservice.KubeProxyLagTimeout, func() (bool, error) {
--			content := GetHTTPContent(ingressIP, svcTCPPort, e2eservice.KubeProxyLagTimeout, "/clientip")
--			clientIP = content.String()
--			if strings.HasPrefix(clientIP, "10.") {
--				return true, nil
--			}
--			return false, nil
--		})
--		if pollErr != nil {
--			framework.Failf("Source IP WAS preserved even after ESIPP turned off. Got %v, expected a ten-dot cluster ip.", clientIP)
--		}
--
--		// TODO: We need to attempt to create another service with the previously
--		// allocated healthcheck nodePort. If the health check nodePort has been
--		// freed, the new service creation will succeed, upon which we cleanup.
--		// If the health check nodePort has NOT been freed, the new service
--		// creation will fail.
--
--		ginkgo.By("setting ExternalTraffic field back to OnlyLocal")
--		svc, err = jig.UpdateService(func(svc *v1.Service) {
--			svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
--			// Request the same healthCheckNodePort as before, to test the user-requested allocation path
--			svc.Spec.HealthCheckNodePort = int32(healthCheckNodePort)
--		})
--		framework.ExpectNoError(err)
--		pollErr = wait.PollImmediate(framework.Poll, e2eservice.KubeProxyLagTimeout, func() (bool, error) {
--			content := GetHTTPContent(ingressIP, svcTCPPort, e2eservice.KubeProxyLagTimeout, path)
--			clientIP = content.String()
--			ginkgo.By(fmt.Sprintf("Endpoint %v:%v%v returned client ip %v", ingressIP, svcTCPPort, path, clientIP))
--			if !strings.HasPrefix(clientIP, "10.") {
--				return true, nil
--			}
--			return false, nil
--		})
--		if pollErr != nil {
--			framework.Failf("Source IP (%v) is not the client IP even after ESIPP turned on, expected a public IP.", clientIP)
--		}
--	})
--})
--
- // execAffinityTestForSessionAffinityTimeout is a helper function that wrap the logic of
- // affinity test for non-load-balancer services. Session afinity will be
- // enabled when the service is created and a short timeout will be configured so
-diff --git a/test/e2e/network/service_providers.go b/test/e2e/network/service_providers.go
-new file mode 100644
-index 00000000000..b7eae6feb2c
---- /dev/null
-+++ b/test/e2e/network/service_providers.go
-@@ -0,0 +1,980 @@
-+// +build !providerless
-+
-+/*
-+Copyright 2020 The Kubernetes Authors.
-+
-+Licensed under the Apache License, Version 2.0 (the "License");
-+you may not use this file except in compliance with the License.
-+You may obtain a copy of the License at
-+
-+    http://www.apache.org/licenses/LICENSE-2.0
-+
-+Unless required by applicable law or agreed to in writing, software
-+distributed under the License is distributed on an "AS IS" BASIS,
-+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-+See the License for the specific language governing permissions and
-+limitations under the License.
-+*/
-+
-+package network
-+
-+import (
-+	"bytes"
-+	"context"
-+	"fmt"
-+	"net"
-+	"strconv"
-+	"strings"
-+	"time"
-+
-+	compute "google.golang.org/api/compute/v1"
-+	v1 "k8s.io/api/core/v1"
-+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-+	"k8s.io/apimachinery/pkg/util/intstr"
-+	"k8s.io/apimachinery/pkg/util/wait"
-+	clientset "k8s.io/client-go/kubernetes"
-+	cloudprovider "k8s.io/cloud-provider"
-+	"k8s.io/kubernetes/test/e2e/framework"
-+	e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
-+	e2ekubesystem "k8s.io/kubernetes/test/e2e/framework/kubesystem"
-+	e2enetwork "k8s.io/kubernetes/test/e2e/framework/network"
-+	e2enode "k8s.io/kubernetes/test/e2e/framework/node"
-+	"k8s.io/kubernetes/test/e2e/framework/providers/gce"
-+	e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
-+	e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
-+	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
-+	gcecloud "k8s.io/legacy-cloud-providers/gce"
-+
-+	"github.com/onsi/ginkgo"
-+	"github.com/onsi/gomega"
-+)
-+
-+var _ = SIGDescribe("Services with Cloud LoadBalancers", func() {
-+
-+	f := framework.NewDefaultFramework("services")
-+
-+	var cs clientset.Interface
-+	serviceLBNames := []string{}
-+
-+	ginkgo.BeforeEach(func() {
-+		cs = f.ClientSet
-+	})
-+
-+	ginkgo.AfterEach(func() {
-+		if ginkgo.CurrentGinkgoTestDescription().Failed {
-+			DescribeSvc(f.Namespace.Name)
-+		}
-+		for _, lb := range serviceLBNames {
-+			framework.Logf("cleaning load balancer resource for %s", lb)
-+			e2eservice.CleanupServiceResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone)
-+		}
-+		//reset serviceLBNames
-+		serviceLBNames = []string{}
-+	})
-+
-+	// TODO: Get rid of [DisabledForLargeClusters] tag when issue #56138 is fixed
-+	ginkgo.It("should be able to change the type and ports of a service [Slow] [DisabledForLargeClusters]", func() {
-+		// requires cloud load-balancer support
-+		e2eskipper.SkipUnlessProviderIs("gce", "gke", "aws")
-+
-+		loadBalancerSupportsUDP := !framework.ProviderIs("aws")
-+
-+		loadBalancerLagTimeout := e2eservice.LoadBalancerLagTimeoutDefault
-+		if framework.ProviderIs("aws") {
-+			loadBalancerLagTimeout = e2eservice.LoadBalancerLagTimeoutAWS
-+		}
-+		loadBalancerCreateTimeout := e2eservice.GetServiceLoadBalancerCreationTimeout(cs)
-+
-+		// This test is more monolithic than we'd like because LB turnup can be
-+		// very slow, so we lumped all the tests into one LB lifecycle.
-+
-+		serviceName := "mutability-test"
-+		ns1 := f.Namespace.Name // LB1 in ns1 on TCP
-+		framework.Logf("namespace for TCP test: %s", ns1)
-+
-+		ginkgo.By("creating a second namespace")
-+		namespacePtr, err := f.CreateNamespace("services", nil)
-+		framework.ExpectNoError(err, "failed to create namespace")
-+		ns2 := namespacePtr.Name // LB2 in ns2 on UDP
-+		framework.Logf("namespace for UDP test: %s", ns2)
-+
-+		nodeIP, err := e2enode.PickIP(cs) // for later
-+		framework.ExpectNoError(err)
-+
-+		// Test TCP and UDP Services.  Services with the same name in different
-+		// namespaces should get different node ports and load balancers.
-+
-+		ginkgo.By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns1)
-+		tcpJig := e2eservice.NewTestJig(cs, ns1, serviceName)
-+		tcpService, err := tcpJig.CreateTCPService(nil)
-+		framework.ExpectNoError(err)
-+
-+		ginkgo.By("creating a UDP service " + serviceName + " with type=ClusterIP in namespace " + ns2)
-+		udpJig := e2eservice.NewTestJig(cs, ns2, serviceName)
-+		udpService, err := udpJig.CreateUDPService(nil)
-+		framework.ExpectNoError(err)
-+
-+		ginkgo.By("verifying that TCP and UDP use the same port")
-+		if tcpService.Spec.Ports[0].Port != udpService.Spec.Ports[0].Port {
-+			framework.Failf("expected to use the same port for TCP and UDP")
-+		}
-+		svcPort := int(tcpService.Spec.Ports[0].Port)
-+		framework.Logf("service port (TCP and UDP): %d", svcPort)
-+
-+		ginkgo.By("creating a pod to be part of the TCP service " + serviceName)
-+		_, err = tcpJig.Run(nil)
-+		framework.ExpectNoError(err)
-+
-+		ginkgo.By("creating a pod to be part of the UDP service " + serviceName)
-+		_, err = udpJig.Run(nil)
-+		framework.ExpectNoError(err)
-+
-+		// Change the services to NodePort.
-+
-+		ginkgo.By("changing the TCP service to type=NodePort")
-+		tcpService, err = tcpJig.UpdateService(func(s *v1.Service) {
-+			s.Spec.Type = v1.ServiceTypeNodePort
-+		})
-+		framework.ExpectNoError(err)
-+		tcpNodePort := int(tcpService.Spec.Ports[0].NodePort)
-+		framework.Logf("TCP node port: %d", tcpNodePort)
-+
-+		ginkgo.By("changing the UDP service to type=NodePort")
-+		udpService, err = udpJig.UpdateService(func(s *v1.Service) {
-+			s.Spec.Type = v1.ServiceTypeNodePort
-+		})
-+		framework.ExpectNoError(err)
-+		udpNodePort := int(udpService.Spec.Ports[0].NodePort)
-+		framework.Logf("UDP node port: %d", udpNodePort)
-+
-+		ginkgo.By("hitting the TCP service's NodePort")
-+		e2eservice.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
-+
-+		ginkgo.By("hitting the UDP service's NodePort")
-+		testReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout)
-+
-+		// Change the services to LoadBalancer.
-+
-+		// Here we test that LoadBalancers can receive static IP addresses.  This isn't
-+		// necessary, but is an additional feature this monolithic test checks.
-+		requestedIP := ""
-+		staticIPName := ""
-+		if framework.ProviderIs("gce", "gke") {
-+			ginkgo.By("creating a static load balancer IP")
-+			staticIPName = fmt.Sprintf("e2e-external-lb-test-%s", framework.RunID)
-+			gceCloud, err := gce.GetGCECloud()
-+			framework.ExpectNoError(err, "failed to get GCE cloud provider")
-+
-+			err = gceCloud.ReserveRegionAddress(&compute.Address{Name: staticIPName}, gceCloud.Region())
-+			defer func() {
-+				if staticIPName != "" {
-+					// Release GCE static IP - this is not kube-managed and will not be automatically released.
-+					if err := gceCloud.DeleteRegionAddress(staticIPName, gceCloud.Region()); err != nil {
-+						framework.Logf("failed to release static IP %s: %v", staticIPName, err)
-+					}
-+				}
-+			}()
-+			framework.ExpectNoError(err, "failed to create region address: %s", staticIPName)
-+			reservedAddr, err := gceCloud.GetRegionAddress(staticIPName, gceCloud.Region())
-+			framework.ExpectNoError(err, "failed to get region address: %s", staticIPName)
-+
-+			requestedIP = reservedAddr.Address
-+			framework.Logf("Allocated static load balancer IP: %s", requestedIP)
-+		}
-+
-+		ginkgo.By("changing the TCP service to type=LoadBalancer")
-+		tcpService, err = tcpJig.UpdateService(func(s *v1.Service) {
-+			s.Spec.LoadBalancerIP = requestedIP // will be "" if not applicable
-+			s.Spec.Type = v1.ServiceTypeLoadBalancer
-+		})
-+		framework.ExpectNoError(err)
-+
-+		if loadBalancerSupportsUDP {
-+			ginkgo.By("changing the UDP service to type=LoadBalancer")
-+			udpService, err = udpJig.UpdateService(func(s *v1.Service) {
-+				s.Spec.Type = v1.ServiceTypeLoadBalancer
-+			})
-+			framework.ExpectNoError(err)
-+		}
-+		serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(tcpService))
-+		if loadBalancerSupportsUDP {
-+			serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(udpService))
-+		}
-+
-+		ginkgo.By("waiting for the TCP service to have a load balancer")
-+		// Wait for the load balancer to be created asynchronously
-+		tcpService, err = tcpJig.WaitForLoadBalancer(loadBalancerCreateTimeout)
-+		framework.ExpectNoError(err)
-+		if int(tcpService.Spec.Ports[0].NodePort) != tcpNodePort {
-+			framework.Failf("TCP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", tcpNodePort, tcpService.Spec.Ports[0].NodePort)
-+		}
-+		if requestedIP != "" && e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != requestedIP {
-+			framework.Failf("unexpected TCP Status.LoadBalancer.Ingress (expected %s, got %s)", requestedIP, e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]))
-+		}
-+		tcpIngressIP := e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])
-+		framework.Logf("TCP load balancer: %s", tcpIngressIP)
-+
-+		if framework.ProviderIs("gce", "gke") {
-+			// Do this as early as possible, which overrides the `defer` above.
-+			// This is mostly out of fear of leaking the IP in a timeout case
-+			// (as of this writing we're not 100% sure where the leaks are
-+			// coming from, so this is first-aid rather than surgery).
-+			ginkgo.By("demoting the static IP to ephemeral")
-+			if staticIPName != "" {
-+				gceCloud, err := gce.GetGCECloud()
-+				framework.ExpectNoError(err, "failed to get GCE cloud provider")
-+				// Deleting it after it is attached "demotes" it to an
-+				// ephemeral IP, which can be auto-released.
-+				if err := gceCloud.DeleteRegionAddress(staticIPName, gceCloud.Region()); err != nil {
-+					framework.Failf("failed to release static IP %s: %v", staticIPName, err)
-+				}
-+				staticIPName = ""
-+			}
-+		}
-+
-+		var udpIngressIP string
-+		if loadBalancerSupportsUDP {
-+			ginkgo.By("waiting for the UDP service to have a load balancer")
-+			// 2nd one should be faster since they ran in parallel.
-+			udpService, err = udpJig.WaitForLoadBalancer(loadBalancerCreateTimeout)
-+			framework.ExpectNoError(err)
-+			if int(udpService.Spec.Ports[0].NodePort) != udpNodePort {
-+				framework.Failf("UDP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", udpNodePort, udpService.Spec.Ports[0].NodePort)
-+			}
-+			udpIngressIP = e2eservice.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0])
-+			framework.Logf("UDP load balancer: %s", udpIngressIP)
-+
-+			ginkgo.By("verifying that TCP and UDP use different load balancers")
-+			if tcpIngressIP == udpIngressIP {
-+				framework.Failf("Load balancers are not different: %s", e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]))
-+			}
-+		}
-+
-+		ginkgo.By("hitting the TCP service's NodePort")
-+		e2eservice.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
-+
-+		ginkgo.By("hitting the UDP service's NodePort")
-+		testReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout)
-+
-+		ginkgo.By("hitting the TCP service's LoadBalancer")
-+		e2eservice.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout)
-+
-+		if loadBalancerSupportsUDP {
-+			ginkgo.By("hitting the UDP service's LoadBalancer")
-+			testReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout)
-+		}
-+
-+		// Change the services' node ports.
-+
-+		ginkgo.By("changing the TCP service's NodePort")
-+		tcpService, err = tcpJig.ChangeServiceNodePort(tcpNodePort)
-+		framework.ExpectNoError(err)
-+		tcpNodePortOld := tcpNodePort
-+		tcpNodePort = int(tcpService.Spec.Ports[0].NodePort)
-+		if tcpNodePort == tcpNodePortOld {
-+			framework.Failf("TCP Spec.Ports[0].NodePort (%d) did not change", tcpNodePort)
-+		}
-+		if e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != tcpIngressIP {
-+			framework.Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]))
-+		}
-+		framework.Logf("TCP node port: %d", tcpNodePort)
-+
-+		ginkgo.By("changing the UDP service's NodePort")
-+		udpService, err = udpJig.ChangeServiceNodePort(udpNodePort)
-+		framework.ExpectNoError(err)
-+		udpNodePortOld := udpNodePort
-+		udpNodePort = int(udpService.Spec.Ports[0].NodePort)
-+		if udpNodePort == udpNodePortOld {
-+			framework.Failf("UDP Spec.Ports[0].NodePort (%d) did not change", udpNodePort)
-+		}
-+		if loadBalancerSupportsUDP && e2eservice.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) != udpIngressIP {
-+			framework.Failf("UDP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", udpIngressIP, e2eservice.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]))
-+		}
-+		framework.Logf("UDP node port: %d", udpNodePort)
-+
-+		ginkgo.By("hitting the TCP service's new NodePort")
-+		e2eservice.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
-+
-+		ginkgo.By("hitting the UDP service's new NodePort")
-+		testReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout)
-+
-+		ginkgo.By("checking the old TCP NodePort is closed")
-+		testNotReachableHTTP(nodeIP, tcpNodePortOld, e2eservice.KubeProxyLagTimeout)
-+
-+		ginkgo.By("checking the old UDP NodePort is closed")
-+		testNotReachableUDP(nodeIP, udpNodePortOld, e2eservice.KubeProxyLagTimeout)
-+
-+		ginkgo.By("hitting the TCP service's LoadBalancer")
-+		e2eservice.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout)
-+
-+		if loadBalancerSupportsUDP {
-+			ginkgo.By("hitting the UDP service's LoadBalancer")
-+			testReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout)
-+		}
-+
-+		// Change the services' main ports.
-+
-+		ginkgo.By("changing the TCP service's port")
-+		tcpService, err = tcpJig.UpdateService(func(s *v1.Service) {
-+			s.Spec.Ports[0].Port++
-+		})
-+		framework.ExpectNoError(err)
-+		svcPortOld := svcPort
-+		svcPort = int(tcpService.Spec.Ports[0].Port)
-+		if svcPort == svcPortOld {
-+			framework.Failf("TCP Spec.Ports[0].Port (%d) did not change", svcPort)
-+		}
-+		if int(tcpService.Spec.Ports[0].NodePort) != tcpNodePort {
-+			framework.Failf("TCP Spec.Ports[0].NodePort (%d) changed", tcpService.Spec.Ports[0].NodePort)
-+		}
-+		if e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != tcpIngressIP {
-+			framework.Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]))
-+		}
-+
-+		ginkgo.By("changing the UDP service's port")
-+		udpService, err = udpJig.UpdateService(func(s *v1.Service) {
-+			s.Spec.Ports[0].Port++
-+		})
-+		framework.ExpectNoError(err)
-+		if int(udpService.Spec.Ports[0].Port) != svcPort {
-+			framework.Failf("UDP Spec.Ports[0].Port (%d) did not change", udpService.Spec.Ports[0].Port)
-+		}
-+		if int(udpService.Spec.Ports[0].NodePort) != udpNodePort {
-+			framework.Failf("UDP Spec.Ports[0].NodePort (%d) changed", udpService.Spec.Ports[0].NodePort)
-+		}
-+		if loadBalancerSupportsUDP && e2eservice.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) != udpIngressIP {
-+			framework.Failf("UDP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", udpIngressIP, e2eservice.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]))
-+		}
-+
-+		framework.Logf("service port (TCP and UDP): %d", svcPort)
-+
-+		ginkgo.By("hitting the TCP service's NodePort")
-+		e2eservice.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
-+
-+		ginkgo.By("hitting the UDP service's NodePort")
-+		testReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout)
-+
-+		ginkgo.By("hitting the TCP service's LoadBalancer")
-+		e2eservice.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout)
-+
-+		if loadBalancerSupportsUDP {
-+			ginkgo.By("hitting the UDP service's LoadBalancer")
-+			testReachableUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout)
-+		}
-+
-+		ginkgo.By("Scaling the pods to 0")
-+		err = tcpJig.Scale(0)
-+		framework.ExpectNoError(err)
-+		err = udpJig.Scale(0)
-+		framework.ExpectNoError(err)
-+
-+		ginkgo.By("looking for ICMP REJECT on the TCP service's NodePort")
-+		testRejectedHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
-+
-+		ginkgo.By("looking for ICMP REJECT on the UDP service's NodePort")
-+		testRejectedUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout)
-+
-+		ginkgo.By("looking for ICMP REJECT on the TCP service's LoadBalancer")
-+		testRejectedHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout)
-+
-+		if loadBalancerSupportsUDP {
-+			ginkgo.By("looking for ICMP REJECT on the UDP service's LoadBalancer")
-+			testRejectedUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout)
-+		}
-+
-+		ginkgo.By("Scaling the pods to 1")
-+		err = tcpJig.Scale(1)
-+		framework.ExpectNoError(err)
-+		err = udpJig.Scale(1)
-+		framework.ExpectNoError(err)
-+
-+		ginkgo.By("hitting the TCP service's NodePort")
-+		e2eservice.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
-+
-+		ginkgo.By("hitting the UDP service's NodePort")
-+		testReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout)
-+
-+		ginkgo.By("hitting the TCP service's LoadBalancer")
-+		e2eservice.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout)
-+
-+		if loadBalancerSupportsUDP {
-+			ginkgo.By("hitting the UDP service's LoadBalancer")
-+			testReachableUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout)
-+		}
-+
-+		// Change the services back to ClusterIP.
-+
-+		ginkgo.By("changing TCP service back to type=ClusterIP")
-+		_, err = tcpJig.UpdateService(func(s *v1.Service) {
-+			s.Spec.Type = v1.ServiceTypeClusterIP
-+			s.Spec.Ports[0].NodePort = 0
-+		})
-+		framework.ExpectNoError(err)
-+		// Wait for the load balancer to be destroyed asynchronously
-+		_, err = tcpJig.WaitForLoadBalancerDestroy(tcpIngressIP, svcPort, loadBalancerCreateTimeout)
-+		framework.ExpectNoError(err)
-+
-+		ginkgo.By("changing UDP service back to type=ClusterIP")
-+		_, err = udpJig.UpdateService(func(s *v1.Service) {
-+			s.Spec.Type = v1.ServiceTypeClusterIP
-+			s.Spec.Ports[0].NodePort = 0
-+		})
-+		framework.ExpectNoError(err)
-+		if loadBalancerSupportsUDP {
-+			// Wait for the load balancer to be destroyed asynchronously
-+			_, err = udpJig.WaitForLoadBalancerDestroy(udpIngressIP, svcPort, loadBalancerCreateTimeout)
-+			framework.ExpectNoError(err)
-+		}
-+
-+		ginkgo.By("checking the TCP NodePort is closed")
-+		testNotReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
-+
-+		ginkgo.By("checking the UDP NodePort is closed")
-+		testNotReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout)
-+
-+		ginkgo.By("checking the TCP LoadBalancer is closed")
-+		testNotReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout)
-+
-+		if loadBalancerSupportsUDP {
-+			ginkgo.By("checking the UDP LoadBalancer is closed")
-+			testNotReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout)
-+		}
-+	})
-+
-+	ginkgo.It("should be able to create an internal type load balancer [Slow]", func() {
-+		e2eskipper.SkipUnlessProviderIs("azure", "gke", "gce")
-+
-+		createTimeout := e2eservice.GetServiceLoadBalancerCreationTimeout(cs)
-+		pollInterval := framework.Poll * 10
-+
-+		namespace := f.Namespace.Name
-+		serviceName := "lb-internal"
-+		jig := e2eservice.NewTestJig(cs, namespace, serviceName)
-+
-+		ginkgo.By("creating pod to be part of service " + serviceName)
-+		_, err := jig.Run(nil)
-+		framework.ExpectNoError(err)
-+
-+		enableILB, disableILB := enableAndDisableInternalLB()
-+
-+		isInternalEndpoint := func(lbIngress *v1.LoadBalancerIngress) bool {
-+			ingressEndpoint := e2eservice.GetIngressPoint(lbIngress)
-+			// Needs update for providers using hostname as endpoint.
-+			return strings.HasPrefix(ingressEndpoint, "10.")
-+		}
-+
-+		ginkgo.By("creating a service with type LoadBalancer and cloud specific Internal-LB annotation enabled")
-+		svc, err := jig.CreateTCPService(func(svc *v1.Service) {
-+			svc.Spec.Type = v1.ServiceTypeLoadBalancer
-+			enableILB(svc)
-+		})
-+		framework.ExpectNoError(err)
-+
-+		defer func() {
-+			ginkgo.By("Clean up loadbalancer service")
-+			e2eservice.WaitForServiceDeletedWithFinalizer(cs, svc.Namespace, svc.Name)
-+		}()
-+
-+		svc, err = jig.WaitForLoadBalancer(createTimeout)
-+		framework.ExpectNoError(err)
-+		lbIngress := &svc.Status.LoadBalancer.Ingress[0]
-+		svcPort := int(svc.Spec.Ports[0].Port)
-+		// should have an internal IP.
-+		framework.ExpectEqual(isInternalEndpoint(lbIngress), true)
-+
-+		// ILBs are not accessible from the test orchestrator, so it's necessary to use
-+		//  a pod to test the service.
-+		ginkgo.By("hitting the internal load balancer from pod")
-+		framework.Logf("creating pod with host network")
-+		hostExec := launchHostExecPod(f.ClientSet, f.Namespace.Name, "ilb-host-exec")
-+
-+		framework.Logf("Waiting up to %v for service %q's internal LB to respond to requests", createTimeout, serviceName)
-+		tcpIngressIP := e2eservice.GetIngressPoint(lbIngress)
-+		if pollErr := wait.PollImmediate(pollInterval, createTimeout, func() (bool, error) {
-+			cmd := fmt.Sprintf(`curl -m 5 'http://%v:%v/echo?msg=hello'`, tcpIngressIP, svcPort)
-+			stdout, err := framework.RunHostCmd(hostExec.Namespace, hostExec.Name, cmd)
-+			if err != nil {
-+				framework.Logf("error curling; stdout: %v. err: %v", stdout, err)
-+				return false, nil
-+			}
-+
-+			if !strings.Contains(stdout, "hello") {
-+				framework.Logf("Expected output to contain 'hello', got %q; retrying...", stdout)
-+				return false, nil
-+			}
-+
-+			framework.Logf("Successful curl; stdout: %v", stdout)
-+			return true, nil
-+		}); pollErr != nil {
-+			framework.Failf("ginkgo.Failed to hit ILB IP, err: %v", pollErr)
-+		}
-+
-+		ginkgo.By("switching to external type LoadBalancer")
-+		svc, err = jig.UpdateService(func(svc *v1.Service) {
-+			disableILB(svc)
-+		})
-+		framework.ExpectNoError(err)
-+		framework.Logf("Waiting up to %v for service %q to have an external LoadBalancer", createTimeout, serviceName)
-+		if pollErr := wait.PollImmediate(pollInterval, createTimeout, func() (bool, error) {
-+			svc, err := cs.CoreV1().Services(namespace).Get(context.TODO(), serviceName, metav1.GetOptions{})
-+			if err != nil {
-+				return false, err
-+			}
-+			lbIngress = &svc.Status.LoadBalancer.Ingress[0]
-+			return !isInternalEndpoint(lbIngress), nil
-+		}); pollErr != nil {
-+			framework.Failf("Loadbalancer IP not changed to external.")
-+		}
-+		// should have an external IP.
-+		gomega.Expect(isInternalEndpoint(lbIngress)).To(gomega.BeFalse())
-+
-+		ginkgo.By("hitting the external load balancer")
-+		framework.Logf("Waiting up to %v for service %q's external LB to respond to requests", createTimeout, serviceName)
-+		tcpIngressIP = e2eservice.GetIngressPoint(lbIngress)
-+		e2eservice.TestReachableHTTP(tcpIngressIP, svcPort, e2eservice.LoadBalancerLagTimeoutDefault)
-+
-+		// GCE cannot test a specific IP because the test may not own it. This cloud specific condition
-+		// will be removed when GCP supports similar functionality.
-+		if framework.ProviderIs("azure") {
-+			ginkgo.By("switching back to interal type LoadBalancer, with static IP specified.")
-+			internalStaticIP := "10.240.11.11"
-+			svc, err = jig.UpdateService(func(svc *v1.Service) {
-+				svc.Spec.LoadBalancerIP = internalStaticIP
-+				enableILB(svc)
-+			})
-+			framework.ExpectNoError(err)
-+			framework.Logf("Waiting up to %v for service %q to have an internal LoadBalancer", createTimeout, serviceName)
-+			if pollErr := wait.PollImmediate(pollInterval, createTimeout, func() (bool, error) {
-+				svc, err := cs.CoreV1().Services(namespace).Get(context.TODO(), serviceName, metav1.GetOptions{})
-+				if err != nil {
-+					return false, err
-+				}
-+				lbIngress = &svc.Status.LoadBalancer.Ingress[0]
-+				return isInternalEndpoint(lbIngress), nil
-+			}); pollErr != nil {
-+				framework.Failf("Loadbalancer IP not changed to internal.")
-+			}
-+			// should have the given static internal IP.
-+			framework.ExpectEqual(e2eservice.GetIngressPoint(lbIngress), internalStaticIP)
-+		}
-+	})
-+
-+	// This test creates a load balancer, make sure its health check interval
-+	// equals to gceHcCheckIntervalSeconds. Then the interval is manipulated
-+	// to be something else, see if the interval will be reconciled.
-+	ginkgo.It("should reconcile LB health check interval [Slow][Serial]", func() {
-+		const gceHcCheckIntervalSeconds = int64(8)
-+		// This test is for clusters on GCE.
-+		// (It restarts kube-controller-manager, which we don't support on GKE)
-+		e2eskipper.SkipUnlessProviderIs("gce")
-+		e2eskipper.SkipUnlessSSHKeyPresent()
-+
-+		clusterID, err := gce.GetClusterID(cs)
-+		if err != nil {
-+			framework.Failf("framework.GetClusterID(cs) = _, %v; want nil", err)
-+		}
-+		gceCloud, err := gce.GetGCECloud()
-+		if err != nil {
-+			framework.Failf("framework.GetGCECloud() = _, %v; want nil", err)
-+		}
-+
-+		namespace := f.Namespace.Name
-+		serviceName := "lb-hc-int"
-+		jig := e2eservice.NewTestJig(cs, namespace, serviceName)
-+
-+		ginkgo.By("create load balancer service")
-+		// Create loadbalancer service with source range from node[0] and podAccept
-+		svc, err := jig.CreateTCPService(func(svc *v1.Service) {
-+			svc.Spec.Type = v1.ServiceTypeLoadBalancer
-+		})
-+		framework.ExpectNoError(err)
-+
-+		defer func() {
-+			ginkgo.By("Clean up loadbalancer service")
-+			e2eservice.WaitForServiceDeletedWithFinalizer(cs, svc.Namespace, svc.Name)
-+		}()
-+
-+		svc, err = jig.WaitForLoadBalancer(e2eservice.GetServiceLoadBalancerCreationTimeout(cs))
-+		framework.ExpectNoError(err)
-+
-+		hcName := gcecloud.MakeNodesHealthCheckName(clusterID)
-+		hc, err := gceCloud.GetHTTPHealthCheck(hcName)
-+		if err != nil {
-+			framework.Failf("gceCloud.GetHttpHealthCheck(%q) = _, %v; want nil", hcName, err)
-+		}
-+		framework.ExpectEqual(hc.CheckIntervalSec, gceHcCheckIntervalSeconds)
-+
-+		ginkgo.By("modify the health check interval")
-+		hc.CheckIntervalSec = gceHcCheckIntervalSeconds - 1
-+		if err = gceCloud.UpdateHTTPHealthCheck(hc); err != nil {
-+			framework.Failf("gcecloud.UpdateHttpHealthCheck(%#v) = %v; want nil", hc, err)
-+		}
-+
-+		ginkgo.By("restart kube-controller-manager")
-+		if err := e2ekubesystem.RestartControllerManager(); err != nil {
-+			framework.Failf("e2ekubesystem.RestartControllerManager() = %v; want nil", err)
-+		}
-+		if err := e2ekubesystem.WaitForControllerManagerUp(); err != nil {
-+			framework.Failf("e2ekubesystem.WaitForControllerManagerUp() = %v; want nil", err)
-+		}
-+
-+		ginkgo.By("health check should be reconciled")
-+		pollInterval := framework.Poll * 10
-+		loadBalancerPropagationTimeout := e2eservice.GetServiceLoadBalancerPropagationTimeout(cs)
-+		if pollErr := wait.PollImmediate(pollInterval, loadBalancerPropagationTimeout, func() (bool, error) {
-+			hc, err := gceCloud.GetHTTPHealthCheck(hcName)
-+			if err != nil {
-+				framework.Logf("ginkgo.Failed to get HttpHealthCheck(%q): %v", hcName, err)
-+				return false, err
-+			}
-+			framework.Logf("hc.CheckIntervalSec = %v", hc.CheckIntervalSec)
-+			return hc.CheckIntervalSec == gceHcCheckIntervalSeconds, nil
-+		}); pollErr != nil {
-+			framework.Failf("Health check %q does not reconcile its check interval to %d.", hcName, gceHcCheckIntervalSeconds)
-+		}
-+	})
-+
-+	var _ = SIGDescribe("ESIPP [Slow]", func() {
-+		f := framework.NewDefaultFramework("esipp")
-+		var loadBalancerCreateTimeout time.Duration
-+
-+		var cs clientset.Interface
-+		serviceLBNames := []string{}
-+
-+		ginkgo.BeforeEach(func() {
-+			// requires cloud load-balancer support - this feature currently supported only on GCE/GKE
-+			e2eskipper.SkipUnlessProviderIs("gce", "gke")
-+
-+			cs = f.ClientSet
-+			loadBalancerCreateTimeout = e2eservice.GetServiceLoadBalancerCreationTimeout(cs)
-+		})
-+
-+		ginkgo.AfterEach(func() {
-+			if ginkgo.CurrentGinkgoTestDescription().Failed {
-+				DescribeSvc(f.Namespace.Name)
-+			}
-+			for _, lb := range serviceLBNames {
-+				framework.Logf("cleaning load balancer resource for %s", lb)
-+				e2eservice.CleanupServiceResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone)
-+			}
-+			//reset serviceLBNames
-+			serviceLBNames = []string{}
-+		})
-+
-+		ginkgo.It("should work for type=LoadBalancer", func() {
-+			namespace := f.Namespace.Name
-+			serviceName := "external-local-lb"
-+			jig := e2eservice.NewTestJig(cs, namespace, serviceName)
-+
-+			svc, err := jig.CreateOnlyLocalLoadBalancerService(loadBalancerCreateTimeout, true, nil)
-+			framework.ExpectNoError(err)
-+			serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc))
-+			healthCheckNodePort := int(svc.Spec.HealthCheckNodePort)
-+			if healthCheckNodePort == 0 {
-+				framework.Failf("Service HealthCheck NodePort was not allocated")
-+			}
-+			defer func() {
-+				err = jig.ChangeServiceType(v1.ServiceTypeClusterIP, loadBalancerCreateTimeout)
-+				framework.ExpectNoError(err)
-+
-+				// Make sure we didn't leak the health check node port.
-+				threshold := 2
-+				nodes, err := jig.GetEndpointNodes()
-+				framework.ExpectNoError(err)
-+				for _, ips := range nodes {
-+					err := TestHTTPHealthCheckNodePort(ips[0], healthCheckNodePort, "/healthz", e2eservice.KubeProxyEndpointLagTimeout, false, threshold)
-+					framework.ExpectNoError(err)
-+				}
-+				err = cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{})
-+				framework.ExpectNoError(err)
-+			}()
-+
-+			svcTCPPort := int(svc.Spec.Ports[0].Port)
-+			ingressIP := e2eservice.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0])
-+
-+			ginkgo.By("reading clientIP using the TCP service's service port via its external VIP")
-+			content := GetHTTPContent(ingressIP, svcTCPPort, e2eservice.KubeProxyLagTimeout, "/clientip")
-+			clientIP := content.String()
-+			framework.Logf("ClientIP detected by target pod using VIP:SvcPort is %s", clientIP)
-+
-+			ginkgo.By("checking if Source IP is preserved")
-+			if strings.HasPrefix(clientIP, "10.") {
-+				framework.Failf("Source IP was NOT preserved")
-+			}
-+		})
-+
-+		ginkgo.It("should work for type=NodePort", func() {
-+			namespace := f.Namespace.Name
-+			serviceName := "external-local-nodeport"
-+			jig := e2eservice.NewTestJig(cs, namespace, serviceName)
-+
-+			svc, err := jig.CreateOnlyLocalNodePortService(true)
-+			framework.ExpectNoError(err)
-+			defer func() {
-+				err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{})
-+				framework.ExpectNoError(err)
-+			}()
-+
-+			tcpNodePort := int(svc.Spec.Ports[0].NodePort)
-+			endpointsNodeMap, err := jig.GetEndpointNodes()
-+			framework.ExpectNoError(err)
-+			path := "/clientip"
-+
-+			for nodeName, nodeIPs := range endpointsNodeMap {
-+				nodeIP := nodeIPs[0]
-+				ginkgo.By(fmt.Sprintf("reading clientIP using the TCP service's NodePort, on node %v: %v%v%v", nodeName, nodeIP, tcpNodePort, path))
-+				content := GetHTTPContent(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout, path)
-+				clientIP := content.String()
-+				framework.Logf("ClientIP detected by target pod using NodePort is %s", clientIP)
-+				if strings.HasPrefix(clientIP, "10.") {
-+					framework.Failf("Source IP was NOT preserved")
-+				}
-+			}
-+		})
-+
-+		ginkgo.It("should only target nodes with endpoints", func() {
-+			namespace := f.Namespace.Name
-+			serviceName := "external-local-nodes"
-+			jig := e2eservice.NewTestJig(cs, namespace, serviceName)
-+			nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, e2eservice.MaxNodesForEndpointsTests)
-+			framework.ExpectNoError(err)
-+
-+			svc, err := jig.CreateOnlyLocalLoadBalancerService(loadBalancerCreateTimeout, false,
-+				func(svc *v1.Service) {
-+					// Change service port to avoid collision with opened hostPorts
-+					// in other tests that run in parallel.
-+					if len(svc.Spec.Ports) != 0 {
-+						svc.Spec.Ports[0].TargetPort = intstr.FromInt(int(svc.Spec.Ports[0].Port))
-+						svc.Spec.Ports[0].Port = 8081
-+					}
-+
-+				})
-+			framework.ExpectNoError(err)
-+			serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc))
-+			defer func() {
-+				err = jig.ChangeServiceType(v1.ServiceTypeClusterIP, loadBalancerCreateTimeout)
-+				framework.ExpectNoError(err)
-+				err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{})
-+				framework.ExpectNoError(err)
-+			}()
-+
-+			healthCheckNodePort := int(svc.Spec.HealthCheckNodePort)
-+			if healthCheckNodePort == 0 {
-+				framework.Failf("Service HealthCheck NodePort was not allocated")
-+			}
-+
-+			ips := e2enode.CollectAddresses(nodes, v1.NodeExternalIP)
-+
-+			ingressIP := e2eservice.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0])
-+			svcTCPPort := int(svc.Spec.Ports[0].Port)
-+
-+			threshold := 2
-+			path := "/healthz"
-+			for i := 0; i < len(nodes.Items); i++ {
-+				endpointNodeName := nodes.Items[i].Name
-+
-+				ginkgo.By("creating a pod to be part of the service " + serviceName + " on node " + endpointNodeName)
-+				_, err = jig.Run(func(rc *v1.ReplicationController) {
-+					rc.Name = serviceName
-+					if endpointNodeName != "" {
-+						rc.Spec.Template.Spec.NodeName = endpointNodeName
-+					}
-+				})
-+				framework.ExpectNoError(err)
-+
-+				ginkgo.By(fmt.Sprintf("waiting for service endpoint on node %v", endpointNodeName))
-+				err = jig.WaitForEndpointOnNode(endpointNodeName)
-+				framework.ExpectNoError(err)
-+
-+				// HealthCheck should pass only on the node where num(endpoints) > 0
-+				// All other nodes should fail the healthcheck on the service healthCheckNodePort
-+				for n, publicIP := range ips {
-+					// Make sure the loadbalancer picked up the health check change.
-+					// Confirm traffic can reach backend through LB before checking healthcheck nodeport.
-+					e2eservice.TestReachableHTTP(ingressIP, svcTCPPort, e2eservice.KubeProxyLagTimeout)
-+					expectedSuccess := nodes.Items[n].Name == endpointNodeName
-+					port := strconv.Itoa(healthCheckNodePort)
-+					ipPort := net.JoinHostPort(publicIP, port)
-+					framework.Logf("Health checking %s, http://%s%s, expectedSuccess %v", nodes.Items[n].Name, ipPort, path, expectedSuccess)
-+					err := TestHTTPHealthCheckNodePort(publicIP, healthCheckNodePort, path, e2eservice.KubeProxyEndpointLagTimeout, expectedSuccess, threshold)
-+					framework.ExpectNoError(err)
-+				}
-+				framework.ExpectNoError(e2erc.DeleteRCAndWaitForGC(f.ClientSet, namespace, serviceName))
-+			}
-+		})
-+
-+		ginkgo.It("should work from pods", func() {
-+			var err error
-+			namespace := f.Namespace.Name
-+			serviceName := "external-local-pods"
-+			jig := e2eservice.NewTestJig(cs, namespace, serviceName)
-+
-+			svc, err := jig.CreateOnlyLocalLoadBalancerService(loadBalancerCreateTimeout, true, nil)
-+			framework.ExpectNoError(err)
-+			serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc))
-+			defer func() {
-+				err = jig.ChangeServiceType(v1.ServiceTypeClusterIP, loadBalancerCreateTimeout)
-+				framework.ExpectNoError(err)
-+				err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{})
-+				framework.ExpectNoError(err)
-+			}()
-+
-+			ingressIP := e2eservice.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0])
-+			port := strconv.Itoa(int(svc.Spec.Ports[0].Port))
-+			ipPort := net.JoinHostPort(ingressIP, port)
-+			path := fmt.Sprintf("%s/clientip", ipPort)
-+
-+			ginkgo.By("Creating pause pod deployment to make sure, pausePods are in desired state")
-+			deployment := createPausePodDeployment(cs, "pause-pod-deployment", namespace, 1)
-+			framework.ExpectNoError(e2edeployment.WaitForDeploymentComplete(cs, deployment), "Failed to complete pause pod deployment")
-+
-+			defer func() {
-+				framework.Logf("Deleting deployment")
-+				err = cs.AppsV1().Deployments(namespace).Delete(context.TODO(), deployment.Name, metav1.DeleteOptions{})
-+				framework.ExpectNoError(err, "Failed to delete deployment %s", deployment.Name)
-+			}()
-+
-+			deployment, err = cs.AppsV1().Deployments(namespace).Get(context.TODO(), deployment.Name, metav1.GetOptions{})
-+			framework.ExpectNoError(err, "Error in retrieving pause pod deployment")
-+			labelSelector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
-+			framework.ExpectNoError(err, "Error in setting LabelSelector as selector from deployment")
-+
-+			pausePods, err := cs.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector.String()})
-+			framework.ExpectNoError(err, "Error in listing pods associated with pause pod deployments")
-+
-+			pausePod := pausePods.Items[0]
-+			framework.Logf("Waiting up to %v curl %v", e2eservice.KubeProxyLagTimeout, path)
-+			cmd := fmt.Sprintf(`curl -q -s --connect-timeout 30 %v`, path)
-+
-+			var srcIP string
-+			loadBalancerPropagationTimeout := e2eservice.GetServiceLoadBalancerPropagationTimeout(cs)
-+			ginkgo.By(fmt.Sprintf("Hitting external lb %v from pod %v on node %v", ingressIP, pausePod.Name, pausePod.Spec.NodeName))
-+			if pollErr := wait.PollImmediate(framework.Poll, loadBalancerPropagationTimeout, func() (bool, error) {
-+				stdout, err := framework.RunHostCmd(pausePod.Namespace, pausePod.Name, cmd)
-+				if err != nil {
-+					framework.Logf("got err: %v, retry until timeout", err)
-+					return false, nil
-+				}
-+				srcIP = strings.TrimSpace(strings.Split(stdout, ":")[0])
-+				return srcIP == pausePod.Status.PodIP, nil
-+			}); pollErr != nil {
-+				framework.Failf("Source IP not preserved from %v, expected '%v' got '%v'", pausePod.Name, pausePod.Status.PodIP, srcIP)
-+			}
-+		})
-+
-+		// TODO: Get rid of [DisabledForLargeClusters] tag when issue #90047 is fixed.
-+		ginkgo.It("should handle updates to ExternalTrafficPolicy field [DisabledForLargeClusters]", func() {
-+			namespace := f.Namespace.Name
-+			serviceName := "external-local-update"
-+			jig := e2eservice.NewTestJig(cs, namespace, serviceName)
-+
-+			nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, e2eservice.MaxNodesForEndpointsTests)
-+			framework.ExpectNoError(err)
-+			if len(nodes.Items) < 2 {
-+				framework.Failf("Need at least 2 nodes to verify source ip from a node without endpoint")
-+			}
-+
-+			svc, err := jig.CreateOnlyLocalLoadBalancerService(loadBalancerCreateTimeout, true, nil)
-+			framework.ExpectNoError(err)
-+			serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc))
-+			defer func() {
-+				err = jig.ChangeServiceType(v1.ServiceTypeClusterIP, loadBalancerCreateTimeout)
-+				framework.ExpectNoError(err)
-+				err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{})
-+				framework.ExpectNoError(err)
-+			}()
-+
-+			// save the health check node port because it disappears when ESIPP is turned off.
-+			healthCheckNodePort := int(svc.Spec.HealthCheckNodePort)
-+
-+			ginkgo.By("turning ESIPP off")
-+			svc, err = jig.UpdateService(func(svc *v1.Service) {
-+				svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeCluster
-+			})
-+			framework.ExpectNoError(err)
-+			if svc.Spec.HealthCheckNodePort > 0 {
-+				framework.Failf("Service HealthCheck NodePort still present")
-+			}
-+
-+			endpointNodeMap, err := jig.GetEndpointNodes()
-+			framework.ExpectNoError(err)
-+			noEndpointNodeMap := map[string][]string{}
-+			for _, n := range nodes.Items {
-+				if _, ok := endpointNodeMap[n.Name]; ok {
-+					continue
-+				}
-+				noEndpointNodeMap[n.Name] = e2enode.GetAddresses(&n, v1.NodeExternalIP)
-+			}
-+
-+			svcTCPPort := int(svc.Spec.Ports[0].Port)
-+			svcNodePort := int(svc.Spec.Ports[0].NodePort)
-+			ingressIP := e2eservice.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0])
-+			path := "/clientip"
-+
-+			ginkgo.By(fmt.Sprintf("endpoints present on nodes %v, absent on nodes %v", endpointNodeMap, noEndpointNodeMap))
-+			for nodeName, nodeIPs := range noEndpointNodeMap {
-+				ginkgo.By(fmt.Sprintf("Checking %v (%v:%v%v) proxies to endpoints on another node", nodeName, nodeIPs[0], svcNodePort, path))
-+				GetHTTPContent(nodeIPs[0], svcNodePort, e2eservice.KubeProxyLagTimeout, path)
-+			}
-+
-+			for nodeName, nodeIPs := range endpointNodeMap {
-+				ginkgo.By(fmt.Sprintf("checking kube-proxy health check fails on node with endpoint (%s), public IP %s", nodeName, nodeIPs[0]))
-+				var body bytes.Buffer
-+				pollfn := func() (bool, error) {
-+					result := e2enetwork.PokeHTTP(nodeIPs[0], healthCheckNodePort, "/healthz", nil)
-+					if result.Code == 0 {
-+						return true, nil
-+					}
-+					body.Reset()
-+					body.Write(result.Body)
-+					return false, nil
-+				}
-+				if pollErr := wait.PollImmediate(framework.Poll, e2eservice.TestTimeout, pollfn); pollErr != nil {
-+					framework.Failf("Kube-proxy still exposing health check on node %v:%v, after ESIPP was turned off. body %s",
-+						nodeName, healthCheckNodePort, body.String())
-+				}
-+			}
-+
-+			// Poll till kube-proxy re-adds the MASQUERADE rule on the node.
-+			ginkgo.By(fmt.Sprintf("checking source ip is NOT preserved through loadbalancer %v", ingressIP))
-+			var clientIP string
-+			pollErr := wait.PollImmediate(framework.Poll, e2eservice.KubeProxyLagTimeout, func() (bool, error) {
-+				content := GetHTTPContent(ingressIP, svcTCPPort, e2eservice.KubeProxyLagTimeout, "/clientip")
-+				clientIP = content.String()
-+				if strings.HasPrefix(clientIP, "10.") {
-+					return true, nil
-+				}
-+				return false, nil
-+			})
-+			if pollErr != nil {
-+				framework.Failf("Source IP WAS preserved even after ESIPP turned off. Got %v, expected a ten-dot cluster ip.", clientIP)
-+			}
-+
-+			// TODO: We need to attempt to create another service with the previously
-+			// allocated healthcheck nodePort. If the health check nodePort has been
-+			// freed, the new service creation will succeed, upon which we cleanup.
-+			// If the health check nodePort has NOT been freed, the new service
-+			// creation will fail.
-+
-+			ginkgo.By("setting ExternalTraffic field back to OnlyLocal")
-+			svc, err = jig.UpdateService(func(svc *v1.Service) {
-+				svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
-+				// Request the same healthCheckNodePort as before, to test the user-requested allocation path
-+				svc.Spec.HealthCheckNodePort = int32(healthCheckNodePort)
-+			})
-+			framework.ExpectNoError(err)
-+			pollErr = wait.PollImmediate(framework.Poll, e2eservice.KubeProxyLagTimeout, func() (bool, error) {
-+				content := GetHTTPContent(ingressIP, svcTCPPort, e2eservice.KubeProxyLagTimeout, path)
-+				clientIP = content.String()
-+				ginkgo.By(fmt.Sprintf("Endpoint %v:%v%v returned client ip %v", ingressIP, svcTCPPort, path, clientIP))
-+				if !strings.HasPrefix(clientIP, "10.") {
-+					return true, nil
-+				}
-+				return false, nil
-+			})
-+			if pollErr != nil {
-+				framework.Failf("Source IP (%v) is not the client IP even after ESIPP turned on, expected a public IP.", clientIP)
-+			}
-+		})
-+	})
-+})
-diff --git a/test/e2e/node/recreate_node.go b/test/e2e/node/recreate_node.go
-index da3fc974485..b403fa7f737 100644
---- a/test/e2e/node/recreate_node.go
-+++ b/test/e2e/node/recreate_node.go
-@@ -1,3 +1,5 @@
-+// +build !providerless
-+
- /*
- Copyright 2019 The Kubernetes Authors.
- 
-diff --git a/test/e2e/scheduling/nvidia-gpus.go b/test/e2e/scheduling/nvidia-gpus.go
-index 313e773b8e2..bbf66b59aac 100644
---- a/test/e2e/scheduling/nvidia-gpus.go
-+++ b/test/e2e/scheduling/nvidia-gpus.go
-@@ -1,3 +1,5 @@
-+// +build !providerless
-+
- /*
- Copyright 2017 The Kubernetes Authors.
- 
-diff --git a/test/e2e/scheduling/ubernetes_lite_volumes.go b/test/e2e/scheduling/ubernetes_lite_volumes.go
-index 513ed07543f..78c0f081990 100644
---- a/test/e2e/scheduling/ubernetes_lite_volumes.go
-+++ b/test/e2e/scheduling/ubernetes_lite_volumes.go
-@@ -1,3 +1,5 @@
-+// +build !providerless
-+
- /*
- Copyright 2017 The Kubernetes Authors.
- 
-diff --git a/test/e2e/storage/drivers/in_tree.go b/test/e2e/storage/drivers/in_tree.go
-index a866266c1f1..28e26a10c35 100644
---- a/test/e2e/storage/drivers/in_tree.go
-+++ b/test/e2e/storage/drivers/in_tree.go
-@@ -38,10 +38,8 @@ package drivers
- import (
- 	"context"
- 	"fmt"
--	"os/exec"
- 	"strconv"
- 	"strings"
--	"time"
- 
- 	"github.com/onsi/ginkgo"
- 	v1 "k8s.io/api/core/v1"
-@@ -57,13 +55,11 @@ import (
- 	e2eauth "k8s.io/kubernetes/test/e2e/framework/auth"
- 	e2enode "k8s.io/kubernetes/test/e2e/framework/node"
- 	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
--	e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
- 	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
- 	e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
- 	"k8s.io/kubernetes/test/e2e/storage/testpatterns"
- 	"k8s.io/kubernetes/test/e2e/storage/testsuites"
- 	"k8s.io/kubernetes/test/e2e/storage/utils"
--	vspheretest "k8s.io/kubernetes/test/e2e/storage/vsphere"
- 	imageutils "k8s.io/kubernetes/test/utils/image"
- )
- 
-@@ -1044,734 +1040,6 @@ func (e *emptydirDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTes
- 	}, func() {}
- }
- 
--// Cinder
--// This driver assumes that OpenStack client tools are installed
--// (/usr/bin/nova, /usr/bin/cinder and /usr/bin/keystone)
--// and that the usual OpenStack authentication env. variables are set
--// (OS_USERNAME, OS_PASSWORD, OS_TENANT_NAME at least).
--type cinderDriver struct {
--	driverInfo testsuites.DriverInfo
--}
--
--type cinderVolume struct {
--	volumeName string
--	volumeID   string
--}
--
--var _ testsuites.TestDriver = &cinderDriver{}
--var _ testsuites.PreprovisionedVolumeTestDriver = &cinderDriver{}
--var _ testsuites.InlineVolumeTestDriver = &cinderDriver{}
--var _ testsuites.PreprovisionedPVTestDriver = &cinderDriver{}
--var _ testsuites.DynamicPVTestDriver = &cinderDriver{}
--
--// InitCinderDriver returns cinderDriver that implements TestDriver interface
--func InitCinderDriver() testsuites.TestDriver {
--	return &cinderDriver{
--		driverInfo: testsuites.DriverInfo{
--			Name:             "cinder",
--			InTreePluginName: "kubernetes.io/cinder",
--			MaxFileSize:      testpatterns.FileSizeMedium,
--			SupportedSizeRange: e2evolume.SizeRange{
--				Min: "5Gi",
--			},
--			SupportedFsType: sets.NewString(
--				"", // Default fsType
--				"ext3",
--			),
--			TopologyKeys: []string{v1.LabelZoneFailureDomain},
--			Capabilities: map[testsuites.Capability]bool{
--				testsuites.CapPersistence: true,
--				testsuites.CapFsGroup:     true,
--				testsuites.CapExec:        true,
--				testsuites.CapBlock:       true,
--				// Cinder supports volume limits, but the test creates large
--				// number of volumes and times out test suites.
--				testsuites.CapVolumeLimits: false,
--				testsuites.CapTopology:     true,
--			},
--		},
--	}
--}
--
--func (c *cinderDriver) GetDriverInfo() *testsuites.DriverInfo {
--	return &c.driverInfo
--}
--
--func (c *cinderDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
--	e2eskipper.SkipUnlessProviderIs("openstack")
--}
--
--func (c *cinderDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource {
--	cv, ok := e2evolume.(*cinderVolume)
--	framework.ExpectEqual(ok, true, "Failed to cast test volume to Cinder test volume")
--
--	volSource := v1.VolumeSource{
--		Cinder: &v1.CinderVolumeSource{
--			VolumeID: cv.volumeID,
--			ReadOnly: readOnly,
--		},
--	}
--	if fsType != "" {
--		volSource.Cinder.FSType = fsType
--	}
--	return &volSource
--}
--
--func (c *cinderDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
--	cv, ok := e2evolume.(*cinderVolume)
--	framework.ExpectEqual(ok, true, "Failed to cast test volume to Cinder test volume")
--
--	pvSource := v1.PersistentVolumeSource{
--		Cinder: &v1.CinderPersistentVolumeSource{
--			VolumeID: cv.volumeID,
--			ReadOnly: readOnly,
--		},
--	}
--	if fsType != "" {
--		pvSource.Cinder.FSType = fsType
--	}
--	return &pvSource, nil
--}
--
--func (c *cinderDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
--	provisioner := "kubernetes.io/cinder"
--	parameters := map[string]string{}
--	if fsType != "" {
--		parameters["fsType"] = fsType
--	}
--	ns := config.Framework.Namespace.Name
--	suffix := fmt.Sprintf("%s-sc", c.driverInfo.Name)
--
--	return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix)
--}
--
--func (c *cinderDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
--	return &testsuites.PerTestConfig{
--		Driver:    c,
--		Prefix:    "cinder",
--		Framework: f,
--	}, func() {}
--}
--
--func (c *cinderDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume {
--	f := config.Framework
--	ns := f.Namespace
--
--	// We assume that namespace.Name is a random string
--	volumeName := ns.Name
--	ginkgo.By("creating a test Cinder volume")
--	output, err := exec.Command("cinder", "create", "--display-name="+volumeName, "1").CombinedOutput()
--	outputString := string(output[:])
--	framework.Logf("cinder output:\n%s", outputString)
--	framework.ExpectNoError(err)
--
--	// Parse 'id'' from stdout. Expected format:
--	// |     attachments     |                  []                  |
--	// |  availability_zone  |                 nova                 |
--	// ...
--	// |          id         | 1d6ff08f-5d1c-41a4-ad72-4ef872cae685 |
--	volumeID := ""
--	for _, line := range strings.Split(outputString, "\n") {
--		fields := strings.Fields(line)
--		if len(fields) != 5 {
--			continue
--		}
--		if fields[1] != "id" {
--			continue
--		}
--		volumeID = fields[3]
--		break
--	}
--	framework.Logf("Volume ID: %s", volumeID)
--	framework.ExpectNotEqual(volumeID, "")
--	return &cinderVolume{
--		volumeName: volumeName,
--		volumeID:   volumeID,
--	}
--}
--
--func (v *cinderVolume) DeleteVolume() {
--	name := v.volumeName
--
--	// Try to delete the volume for several seconds - it takes
--	// a while for the plugin to detach it.
--	var output []byte
--	var err error
--	timeout := time.Second * 120
--
--	framework.Logf("Waiting up to %v for removal of cinder volume %s", timeout, name)
--	for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
--		output, err = exec.Command("cinder", "delete", name).CombinedOutput()
--		if err == nil {
--			framework.Logf("Cinder volume %s deleted", name)
--			return
--		}
--		framework.Logf("Failed to delete volume %s: %v", name, err)
--	}
--	framework.Logf("Giving up deleting volume %s: %v\n%s", name, err, string(output[:]))
--}
--
--// GCE
--type gcePdDriver struct {
--	driverInfo testsuites.DriverInfo
--}
--
--type gcePdVolume struct {
--	volumeName string
--}
--
--var _ testsuites.TestDriver = &gcePdDriver{}
--var _ testsuites.PreprovisionedVolumeTestDriver = &gcePdDriver{}
--var _ testsuites.InlineVolumeTestDriver = &gcePdDriver{}
--var _ testsuites.PreprovisionedPVTestDriver = &gcePdDriver{}
--var _ testsuites.DynamicPVTestDriver = &gcePdDriver{}
--
--// InitGcePdDriver returns gcePdDriver that implements TestDriver interface
--func InitGcePdDriver() testsuites.TestDriver {
--	// In current test structure, it first initialize the driver and then set up
--	// the new framework, so we cannot get the correct OS here. So here set to
--	// support all fs types including both linux and windows. We have code to check Node OS later
--	// during test.
--	supportedTypes := sets.NewString(
--		"", // Default fsType
--		"ext2",
--		"ext3",
--		"ext4",
--		"xfs",
--		"ntfs",
--	)
--	return &gcePdDriver{
--		driverInfo: testsuites.DriverInfo{
--			Name:             "gcepd",
--			InTreePluginName: "kubernetes.io/gce-pd",
--			MaxFileSize:      testpatterns.FileSizeMedium,
--			SupportedSizeRange: e2evolume.SizeRange{
--				Min: "5Gi",
--			},
--			SupportedFsType:      supportedTypes,
--			SupportedMountOption: sets.NewString("debug", "nouid32"),
--			TopologyKeys:         []string{v1.LabelZoneFailureDomain},
--			Capabilities: map[testsuites.Capability]bool{
--				testsuites.CapPersistence:         true,
--				testsuites.CapFsGroup:             true,
--				testsuites.CapBlock:               true,
--				testsuites.CapExec:                true,
--				testsuites.CapMultiPODs:           true,
--				testsuites.CapControllerExpansion: true,
--				testsuites.CapNodeExpansion:       true,
--				// GCE supports volume limits, but the test creates large
--				// number of volumes and times out test suites.
--				testsuites.CapVolumeLimits: false,
--				testsuites.CapTopology:     true,
--			},
--		},
--	}
--}
--
--func (g *gcePdDriver) GetDriverInfo() *testsuites.DriverInfo {
--	return &g.driverInfo
--}
--
--func (g *gcePdDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
--	e2eskipper.SkipUnlessProviderIs("gce", "gke")
--	if pattern.FeatureTag == "[sig-windows]" {
--		e2eskipper.SkipUnlessNodeOSDistroIs("windows")
--	}
--}
--
--func (g *gcePdDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource {
--	gv, ok := e2evolume.(*gcePdVolume)
--	framework.ExpectEqual(ok, true, "Failed to cast test volume to GCE PD test volume")
--	volSource := v1.VolumeSource{
--		GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
--			PDName:   gv.volumeName,
--			ReadOnly: readOnly,
--		},
--	}
--	if fsType != "" {
--		volSource.GCEPersistentDisk.FSType = fsType
--	}
--	return &volSource
--}
--
--func (g *gcePdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
--	gv, ok := e2evolume.(*gcePdVolume)
--	framework.ExpectEqual(ok, true, "Failed to cast test volume to GCE PD test volume")
--	pvSource := v1.PersistentVolumeSource{
--		GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
--			PDName:   gv.volumeName,
--			ReadOnly: readOnly,
--		},
--	}
--	if fsType != "" {
--		pvSource.GCEPersistentDisk.FSType = fsType
--	}
--	return &pvSource, nil
--}
--
--func (g *gcePdDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
--	provisioner := "kubernetes.io/gce-pd"
--	parameters := map[string]string{}
--	if fsType != "" {
--		parameters["fsType"] = fsType
--	}
--	ns := config.Framework.Namespace.Name
--	suffix := fmt.Sprintf("%s-sc", g.driverInfo.Name)
--	delayedBinding := storagev1.VolumeBindingWaitForFirstConsumer
--
--	return testsuites.GetStorageClass(provisioner, parameters, &delayedBinding, ns, suffix)
--}
--
--func (g *gcePdDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
--	config := &testsuites.PerTestConfig{
--		Driver:    g,
--		Prefix:    "gcepd",
--		Framework: f,
--	}
--
--	if framework.NodeOSDistroIs("windows") {
--		config.ClientNodeSelection = e2epod.NodeSelection{
--			Selector: map[string]string{
--				"kubernetes.io/os": "windows",
--			},
--		}
--	}
--	return config, func() {}
--
--}
--
--func (g *gcePdDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume {
--	zone := getInlineVolumeZone(config.Framework)
--	if volType == testpatterns.InlineVolume {
--		// PD will be created in framework.TestContext.CloudConfig.Zone zone,
--		// so pods should be also scheduled there.
--		config.ClientNodeSelection = e2epod.NodeSelection{
--			Selector: map[string]string{
--				v1.LabelZoneFailureDomain: zone,
--			},
--		}
--	}
--	ginkgo.By("creating a test gce pd volume")
--	vname, err := e2epv.CreatePDWithRetryAndZone(zone)
--	framework.ExpectNoError(err)
--	return &gcePdVolume{
--		volumeName: vname,
--	}
--}
--
--func (v *gcePdVolume) DeleteVolume() {
--	e2epv.DeletePDWithRetry(v.volumeName)
--}
--
--// vSphere
--type vSphereDriver struct {
--	driverInfo testsuites.DriverInfo
--}
--
--type vSphereVolume struct {
--	volumePath string
--	nodeInfo   *vspheretest.NodeInfo
--}
--
--var _ testsuites.TestDriver = &vSphereDriver{}
--var _ testsuites.PreprovisionedVolumeTestDriver = &vSphereDriver{}
--var _ testsuites.InlineVolumeTestDriver = &vSphereDriver{}
--var _ testsuites.PreprovisionedPVTestDriver = &vSphereDriver{}
--var _ testsuites.DynamicPVTestDriver = &vSphereDriver{}
--
--// InitVSphereDriver returns vSphereDriver that implements TestDriver interface
--func InitVSphereDriver() testsuites.TestDriver {
--	return &vSphereDriver{
--		driverInfo: testsuites.DriverInfo{
--			Name:             "vsphere",
--			InTreePluginName: "kubernetes.io/vsphere-volume",
--			MaxFileSize:      testpatterns.FileSizeMedium,
--			SupportedSizeRange: e2evolume.SizeRange{
--				Min: "5Gi",
--			},
--			SupportedFsType: sets.NewString(
--				"", // Default fsType
--				"ext4",
--			),
--			TopologyKeys: []string{v1.LabelZoneFailureDomain},
--			Capabilities: map[testsuites.Capability]bool{
--				testsuites.CapPersistence: true,
--				testsuites.CapFsGroup:     true,
--				testsuites.CapExec:        true,
--				testsuites.CapMultiPODs:   true,
--				testsuites.CapTopology:    true,
--			},
--		},
--	}
--}
--func (v *vSphereDriver) GetDriverInfo() *testsuites.DriverInfo {
--	return &v.driverInfo
--}
--
--func (v *vSphereDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
--	e2eskipper.SkipUnlessProviderIs("vsphere")
--}
--
--func (v *vSphereDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource {
--	vsv, ok := e2evolume.(*vSphereVolume)
--	framework.ExpectEqual(ok, true, "Failed to cast test volume to vSphere test volume")
--
--	// vSphere driver doesn't seem to support readOnly volume
--	// TODO: check if it is correct
--	if readOnly {
--		return nil
--	}
--	volSource := v1.VolumeSource{
--		VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
--			VolumePath: vsv.volumePath,
--		},
--	}
--	if fsType != "" {
--		volSource.VsphereVolume.FSType = fsType
--	}
--	return &volSource
--}
--
--func (v *vSphereDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
--	vsv, ok := e2evolume.(*vSphereVolume)
--	framework.ExpectEqual(ok, true, "Failed to cast test volume to vSphere test volume")
--
--	// vSphere driver doesn't seem to support readOnly volume
--	// TODO: check if it is correct
--	if readOnly {
--		return nil, nil
--	}
--	pvSource := v1.PersistentVolumeSource{
--		VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
--			VolumePath: vsv.volumePath,
--		},
--	}
--	if fsType != "" {
--		pvSource.VsphereVolume.FSType = fsType
--	}
--	return &pvSource, nil
--}
--
--func (v *vSphereDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
--	provisioner := "kubernetes.io/vsphere-volume"
--	parameters := map[string]string{}
--	if fsType != "" {
--		parameters["fsType"] = fsType
--	}
--	ns := config.Framework.Namespace.Name
--	suffix := fmt.Sprintf("%s-sc", v.driverInfo.Name)
--
--	return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix)
--}
--
--func (v *vSphereDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
--	return &testsuites.PerTestConfig{
--		Driver:    v,
--		Prefix:    "vsphere",
--		Framework: f,
--	}, func() {}
--}
--
--func (v *vSphereDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume {
--	f := config.Framework
--	vspheretest.Bootstrap(f)
--	nodeInfo := vspheretest.GetReadySchedulableRandomNodeInfo()
--	volumePath, err := nodeInfo.VSphere.CreateVolume(&vspheretest.VolumeOptions{}, nodeInfo.DataCenterRef)
--	framework.ExpectNoError(err)
--	return &vSphereVolume{
--		volumePath: volumePath,
--		nodeInfo:   nodeInfo,
--	}
--}
--
--func (v *vSphereVolume) DeleteVolume() {
--	v.nodeInfo.VSphere.DeleteVolume(v.volumePath, v.nodeInfo.DataCenterRef)
--}
--
--// Azure Disk
--type azureDiskDriver struct {
--	driverInfo testsuites.DriverInfo
--}
--
--type azureDiskVolume struct {
--	volumeName string
--}
--
--var _ testsuites.TestDriver = &azureDiskDriver{}
--var _ testsuites.PreprovisionedVolumeTestDriver = &azureDiskDriver{}
--var _ testsuites.InlineVolumeTestDriver = &azureDiskDriver{}
--var _ testsuites.PreprovisionedPVTestDriver = &azureDiskDriver{}
--var _ testsuites.DynamicPVTestDriver = &azureDiskDriver{}
--
--// InitAzureDiskDriver returns azureDiskDriver that implements TestDriver interface
--func InitAzureDiskDriver() testsuites.TestDriver {
--	return &azureDiskDriver{
--		driverInfo: testsuites.DriverInfo{
--			Name:             "azure-disk",
--			InTreePluginName: "kubernetes.io/azure-disk",
--			MaxFileSize:      testpatterns.FileSizeMedium,
--			SupportedSizeRange: e2evolume.SizeRange{
--				Min: "5Gi",
--			},
--			SupportedFsType: sets.NewString(
--				"", // Default fsType
--				"ext3",
--				"ext4",
--				"xfs",
--			),
--			TopologyKeys: []string{v1.LabelZoneFailureDomain},
--			Capabilities: map[testsuites.Capability]bool{
--				testsuites.CapPersistence: true,
--				testsuites.CapFsGroup:     true,
--				testsuites.CapBlock:       true,
--				testsuites.CapExec:        true,
--				testsuites.CapMultiPODs:   true,
--				// Azure supports volume limits, but the test creates large
--				// number of volumes and times out test suites.
--				testsuites.CapVolumeLimits: false,
--				testsuites.CapTopology:     true,
--			},
--		},
--	}
--}
--
--func (a *azureDiskDriver) GetDriverInfo() *testsuites.DriverInfo {
--	return &a.driverInfo
--}
--
--func (a *azureDiskDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
--	e2eskipper.SkipUnlessProviderIs("azure")
--}
--
--func (a *azureDiskDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource {
--	av, ok := e2evolume.(*azureDiskVolume)
--	framework.ExpectEqual(ok, true, "Failed to cast test volume to Azure test volume")
--	diskName := av.volumeName[(strings.LastIndex(av.volumeName, "/") + 1):]
--
--	kind := v1.AzureManagedDisk
--	volSource := v1.VolumeSource{
--		AzureDisk: &v1.AzureDiskVolumeSource{
--			DiskName:    diskName,
--			DataDiskURI: av.volumeName,
--			Kind:        &kind,
--			ReadOnly:    &readOnly,
--		},
--	}
--	if fsType != "" {
--		volSource.AzureDisk.FSType = &fsType
--	}
--	return &volSource
--}
--
--func (a *azureDiskDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
--	av, ok := e2evolume.(*azureDiskVolume)
--	framework.ExpectEqual(ok, true, "Failed to cast test volume to Azure test volume")
--
--	diskName := av.volumeName[(strings.LastIndex(av.volumeName, "/") + 1):]
--
--	kind := v1.AzureManagedDisk
--	pvSource := v1.PersistentVolumeSource{
--		AzureDisk: &v1.AzureDiskVolumeSource{
--			DiskName:    diskName,
--			DataDiskURI: av.volumeName,
--			Kind:        &kind,
--			ReadOnly:    &readOnly,
--		},
--	}
--	if fsType != "" {
--		pvSource.AzureDisk.FSType = &fsType
--	}
--	return &pvSource, nil
--}
--
--func (a *azureDiskDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
--	provisioner := "kubernetes.io/azure-disk"
--	parameters := map[string]string{}
--	if fsType != "" {
--		parameters["fsType"] = fsType
--	}
--	ns := config.Framework.Namespace.Name
--	suffix := fmt.Sprintf("%s-sc", a.driverInfo.Name)
--	delayedBinding := storagev1.VolumeBindingWaitForFirstConsumer
--
--	return testsuites.GetStorageClass(provisioner, parameters, &delayedBinding, ns, suffix)
--}
--
--func (a *azureDiskDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
--	return &testsuites.PerTestConfig{
--		Driver:    a,
--		Prefix:    "azure",
--		Framework: f,
--	}, func() {}
--}
--
--func (a *azureDiskDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume {
--	ginkgo.By("creating a test azure disk volume")
--	zone := getInlineVolumeZone(config.Framework)
--	if volType == testpatterns.InlineVolume {
--		// PD will be created in framework.TestContext.CloudConfig.Zone zone,
--		// so pods should be also scheduled there.
--		config.ClientNodeSelection = e2epod.NodeSelection{
--			Selector: map[string]string{
--				v1.LabelZoneFailureDomain: zone,
--			},
--		}
--	}
--	volumeName, err := e2epv.CreatePDWithRetryAndZone(zone)
--	framework.ExpectNoError(err)
--	return &azureDiskVolume{
--		volumeName: volumeName,
--	}
--}
--
--func (v *azureDiskVolume) DeleteVolume() {
--	e2epv.DeletePDWithRetry(v.volumeName)
--}
--
--// AWS
--type awsDriver struct {
--	driverInfo testsuites.DriverInfo
--}
--
--type awsVolume struct {
--	volumeName string
--}
--
--var _ testsuites.TestDriver = &awsDriver{}
--
--var _ testsuites.PreprovisionedVolumeTestDriver = &awsDriver{}
--var _ testsuites.InlineVolumeTestDriver = &awsDriver{}
--var _ testsuites.PreprovisionedPVTestDriver = &awsDriver{}
--var _ testsuites.DynamicPVTestDriver = &awsDriver{}
--
--// InitAwsDriver returns awsDriver that implements TestDriver interface
--func InitAwsDriver() testsuites.TestDriver {
--	return &awsDriver{
--		driverInfo: testsuites.DriverInfo{
--			Name:             "aws",
--			InTreePluginName: "kubernetes.io/aws-ebs",
--			MaxFileSize:      testpatterns.FileSizeMedium,
--			SupportedSizeRange: e2evolume.SizeRange{
--				Min: "5Gi",
--			},
--			SupportedFsType: sets.NewString(
--				"", // Default fsType
--				"ext2",
--				"ext3",
--				"ext4",
--				"xfs",
--				"ntfs",
--			),
--			SupportedMountOption: sets.NewString("debug", "nouid32"),
--			TopologyKeys:         []string{v1.LabelZoneFailureDomain},
--			Capabilities: map[testsuites.Capability]bool{
--				testsuites.CapPersistence:         true,
--				testsuites.CapFsGroup:             true,
--				testsuites.CapBlock:               true,
--				testsuites.CapExec:                true,
--				testsuites.CapMultiPODs:           true,
--				testsuites.CapControllerExpansion: true,
--				testsuites.CapNodeExpansion:       true,
--				// AWS supports volume limits, but the test creates large
--				// number of volumes and times out test suites.
--				testsuites.CapVolumeLimits: false,
--				testsuites.CapTopology:     true,
--			},
--		},
--	}
--}
--
--func (a *awsDriver) GetDriverInfo() *testsuites.DriverInfo {
--	return &a.driverInfo
--}
--
--func (a *awsDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
--	e2eskipper.SkipUnlessProviderIs("aws")
--}
--
--func (a *awsDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource {
--	av, ok := e2evolume.(*awsVolume)
--	framework.ExpectEqual(ok, true, "Failed to cast test volume to AWS test volume")
--	volSource := v1.VolumeSource{
--		AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
--			VolumeID: av.volumeName,
--			ReadOnly: readOnly,
--		},
--	}
--	if fsType != "" {
--		volSource.AWSElasticBlockStore.FSType = fsType
--	}
--	return &volSource
--}
--
--func (a *awsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
--	av, ok := e2evolume.(*awsVolume)
--	framework.ExpectEqual(ok, true, "Failed to cast test volume to AWS test volume")
--	pvSource := v1.PersistentVolumeSource{
--		AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
--			VolumeID: av.volumeName,
--			ReadOnly: readOnly,
--		},
--	}
--	if fsType != "" {
--		pvSource.AWSElasticBlockStore.FSType = fsType
--	}
--	return &pvSource, nil
--}
--
--func (a *awsDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
--	provisioner := "kubernetes.io/aws-ebs"
--	parameters := map[string]string{}
--	if fsType != "" {
--		parameters["fsType"] = fsType
--	}
--	ns := config.Framework.Namespace.Name
--	suffix := fmt.Sprintf("%s-sc", a.driverInfo.Name)
--	delayedBinding := storagev1.VolumeBindingWaitForFirstConsumer
--
--	return testsuites.GetStorageClass(provisioner, parameters, &delayedBinding, ns, suffix)
--}
--
--func (a *awsDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
--	config := &testsuites.PerTestConfig{
--		Driver:    a,
--		Prefix:    "aws",
--		Framework: f,
--	}
--
--	if framework.NodeOSDistroIs("windows") {
--		config.ClientNodeSelection = e2epod.NodeSelection{
--			Selector: map[string]string{
--				"kubernetes.io/os": "windows",
--			},
--		}
--	}
--	return config, func() {}
--}
--
--func (a *awsDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume {
--	zone := getInlineVolumeZone(config.Framework)
--	if volType == testpatterns.InlineVolume {
--		// PD will be created in framework.TestContext.CloudConfig.Zone zone,
--		// so pods should be also scheduled there.
--		config.ClientNodeSelection = e2epod.NodeSelection{
--			Selector: map[string]string{
--				v1.LabelZoneFailureDomain: zone,
--			},
--		}
--	}
--	ginkgo.By("creating a test aws volume")
--	vname, err := e2epv.CreatePDWithRetryAndZone(zone)
--	framework.ExpectNoError(err)
--	return &awsVolume{
--		volumeName: vname,
--	}
--}
--
--func (v *awsVolume) DeleteVolume() {
--	e2epv.DeletePDWithRetry(v.volumeName)
--}
--
- // local
- type localDriver struct {
- 	driverInfo testsuites.DriverInfo
-diff --git a/test/e2e/storage/drivers/in_tree_providers.go b/test/e2e/storage/drivers/in_tree_providers.go
-new file mode 100644
-index 00000000000..c7f5dd3052e
---- /dev/null
-+++ b/test/e2e/storage/drivers/in_tree_providers.go
-@@ -0,0 +1,751 @@
-+// +build !providerless
-+
-+package drivers
-+
-+import (
-+	"fmt"
-+	"os/exec"
-+	"strings"
-+	"time"
-+
-+	"github.com/onsi/ginkgo"
-+	v1 "k8s.io/api/core/v1"
-+	storagev1 "k8s.io/api/storage/v1"
-+	"k8s.io/apimachinery/pkg/util/sets"
-+	"k8s.io/kubernetes/test/e2e/framework"
-+	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
-+	e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
-+	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
-+	e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
-+	"k8s.io/kubernetes/test/e2e/storage/testpatterns"
-+	"k8s.io/kubernetes/test/e2e/storage/testsuites"
-+	vspheretest "k8s.io/kubernetes/test/e2e/storage/vsphere"
-+)
-+
-+// Cinder
-+// This driver assumes that OpenStack client tools are installed
-+// (/usr/bin/nova, /usr/bin/cinder and /usr/bin/keystone)
-+// and that the usual OpenStack authentication env. variables are set
-+// (OS_USERNAME, OS_PASSWORD, OS_TENANT_NAME at least).
-+type cinderDriver struct {
-+	driverInfo testsuites.DriverInfo
-+}
-+
-+type cinderVolume struct {
-+	volumeName string
-+	volumeID   string
-+}
-+
-+var _ testsuites.TestDriver = &cinderDriver{}
-+var _ testsuites.PreprovisionedVolumeTestDriver = &cinderDriver{}
-+var _ testsuites.InlineVolumeTestDriver = &cinderDriver{}
-+var _ testsuites.PreprovisionedPVTestDriver = &cinderDriver{}
-+var _ testsuites.DynamicPVTestDriver = &cinderDriver{}
-+
-+// InitCinderDriver returns cinderDriver that implements TestDriver interface
-+func InitCinderDriver() testsuites.TestDriver {
-+	return &cinderDriver{
-+		driverInfo: testsuites.DriverInfo{
-+			Name:             "cinder",
-+			InTreePluginName: "kubernetes.io/cinder",
-+			MaxFileSize:      testpatterns.FileSizeMedium,
-+			SupportedSizeRange: e2evolume.SizeRange{
-+				Min: "5Gi",
-+			},
-+			SupportedFsType: sets.NewString(
-+				"", // Default fsType
-+				"ext3",
-+			),
-+			TopologyKeys: []string{v1.LabelZoneFailureDomain},
-+			Capabilities: map[testsuites.Capability]bool{
-+				testsuites.CapPersistence: true,
-+				testsuites.CapFsGroup:     true,
-+				testsuites.CapExec:        true,
-+				testsuites.CapBlock:       true,
-+				// Cinder supports volume limits, but the test creates large
-+				// number of volumes and times out test suites.
-+				testsuites.CapVolumeLimits: false,
-+				testsuites.CapTopology:     true,
-+			},
-+		},
-+	}
-+}
-+
-+func (c *cinderDriver) GetDriverInfo() *testsuites.DriverInfo {
-+	return &c.driverInfo
-+}
-+
-+func (c *cinderDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
-+	e2eskipper.SkipUnlessProviderIs("openstack")
-+}
-+
-+func (c *cinderDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource {
-+	cv, ok := e2evolume.(*cinderVolume)
-+	framework.ExpectEqual(ok, true, "Failed to cast test volume to Cinder test volume")
-+
-+	volSource := v1.VolumeSource{
-+		Cinder: &v1.CinderVolumeSource{
-+			VolumeID: cv.volumeID,
-+			ReadOnly: readOnly,
-+		},
-+	}
-+	if fsType != "" {
-+		volSource.Cinder.FSType = fsType
-+	}
-+	return &volSource
-+}
-+
-+func (c *cinderDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
-+	cv, ok := e2evolume.(*cinderVolume)
-+	framework.ExpectEqual(ok, true, "Failed to cast test volume to Cinder test volume")
-+
-+	pvSource := v1.PersistentVolumeSource{
-+		Cinder: &v1.CinderPersistentVolumeSource{
-+			VolumeID: cv.volumeID,
-+			ReadOnly: readOnly,
-+		},
-+	}
-+	if fsType != "" {
-+		pvSource.Cinder.FSType = fsType
-+	}
-+	return &pvSource, nil
-+}
-+
-+func (c *cinderDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
-+	provisioner := "kubernetes.io/cinder"
-+	parameters := map[string]string{}
-+	if fsType != "" {
-+		parameters["fsType"] = fsType
-+	}
-+	ns := config.Framework.Namespace.Name
-+	suffix := fmt.Sprintf("%s-sc", c.driverInfo.Name)
-+
-+	return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix)
-+}
-+
-+func (c *cinderDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
-+	return &testsuites.PerTestConfig{
-+		Driver:    c,
-+		Prefix:    "cinder",
-+		Framework: f,
-+	}, func() {}
-+}
-+
-+func (c *cinderDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume {
-+	f := config.Framework
-+	ns := f.Namespace
-+
-+	// We assume that namespace.Name is a random string
-+	volumeName := ns.Name
-+	ginkgo.By("creating a test Cinder volume")
-+	output, err := exec.Command("cinder", "create", "--display-name="+volumeName, "1").CombinedOutput()
-+	outputString := string(output[:])
-+	framework.Logf("cinder output:\n%s", outputString)
-+	framework.ExpectNoError(err)
-+
-+	// Parse 'id'' from stdout. Expected format:
-+	// |     attachments     |                  []                  |
-+	// |  availability_zone  |                 nova                 |
-+	// ...
-+	// |          id         | 1d6ff08f-5d1c-41a4-ad72-4ef872cae685 |
-+	volumeID := ""
-+	for _, line := range strings.Split(outputString, "\n") {
-+		fields := strings.Fields(line)
-+		if len(fields) != 5 {
-+			continue
-+		}
-+		if fields[1] != "id" {
-+			continue
-+		}
-+		volumeID = fields[3]
-+		break
-+	}
-+	framework.Logf("Volume ID: %s", volumeID)
-+	framework.ExpectNotEqual(volumeID, "")
-+	return &cinderVolume{
-+		volumeName: volumeName,
-+		volumeID:   volumeID,
-+	}
-+}
-+
-+func (v *cinderVolume) DeleteVolume() {
-+	name := v.volumeName
-+
-+	// Try to delete the volume for several seconds - it takes
-+	// a while for the plugin to detach it.
-+	var output []byte
-+	var err error
-+	timeout := time.Second * 120
-+
-+	framework.Logf("Waiting up to %v for removal of cinder volume %s", timeout, name)
-+	for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
-+		output, err = exec.Command("cinder", "delete", name).CombinedOutput()
-+		if err == nil {
-+			framework.Logf("Cinder volume %s deleted", name)
-+			return
-+		}
-+		framework.Logf("Failed to delete volume %s: %v", name, err)
-+	}
-+	framework.Logf("Giving up deleting volume %s: %v\n%s", name, err, string(output[:]))
-+}
-+
-+// GCE
-+type gcePdDriver struct {
-+	driverInfo testsuites.DriverInfo
-+}
-+
-+type gcePdVolume struct {
-+	volumeName string
-+}
-+
-+var _ testsuites.TestDriver = &gcePdDriver{}
-+var _ testsuites.PreprovisionedVolumeTestDriver = &gcePdDriver{}
-+var _ testsuites.InlineVolumeTestDriver = &gcePdDriver{}
-+var _ testsuites.PreprovisionedPVTestDriver = &gcePdDriver{}
-+var _ testsuites.DynamicPVTestDriver = &gcePdDriver{}
-+
-+// InitGcePdDriver returns gcePdDriver that implements TestDriver interface
-+func InitGcePdDriver() testsuites.TestDriver {
-+	// In current test structure, it first initialize the driver and then set up
-+	// the new framework, so we cannot get the correct OS here. So here set to
-+	// support all fs types including both linux and windows. We have code to check Node OS later
-+	// during test.
-+	supportedTypes := sets.NewString(
-+		"", // Default fsType
-+		"ext2",
-+		"ext3",
-+		"ext4",
-+		"xfs",
-+		"ntfs",
-+	)
-+	return &gcePdDriver{
-+		driverInfo: testsuites.DriverInfo{
-+			Name:             "gcepd",
-+			InTreePluginName: "kubernetes.io/gce-pd",
-+			MaxFileSize:      testpatterns.FileSizeMedium,
-+			SupportedSizeRange: e2evolume.SizeRange{
-+				Min: "5Gi",
-+			},
-+			SupportedFsType:      supportedTypes,
-+			SupportedMountOption: sets.NewString("debug", "nouid32"),
-+			TopologyKeys:         []string{v1.LabelZoneFailureDomain},
-+			Capabilities: map[testsuites.Capability]bool{
-+				testsuites.CapPersistence:         true,
-+				testsuites.CapFsGroup:             true,
-+				testsuites.CapBlock:               true,
-+				testsuites.CapExec:                true,
-+				testsuites.CapMultiPODs:           true,
-+				testsuites.CapControllerExpansion: true,
-+				testsuites.CapNodeExpansion:       true,
-+				// GCE supports volume limits, but the test creates large
-+				// number of volumes and times out test suites.
-+				testsuites.CapVolumeLimits: false,
-+				testsuites.CapTopology:     true,
-+			},
-+		},
-+	}
-+}
-+
-+func (g *gcePdDriver) GetDriverInfo() *testsuites.DriverInfo {
-+	return &g.driverInfo
-+}
-+
-+func (g *gcePdDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
-+	e2eskipper.SkipUnlessProviderIs("gce", "gke")
-+	if pattern.FeatureTag == "[sig-windows]" {
-+		e2eskipper.SkipUnlessNodeOSDistroIs("windows")
-+	}
-+}
-+
-+func (g *gcePdDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource {
-+	gv, ok := e2evolume.(*gcePdVolume)
-+	framework.ExpectEqual(ok, true, "Failed to cast test volume to GCE PD test volume")
-+	volSource := v1.VolumeSource{
-+		GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
-+			PDName:   gv.volumeName,
-+			ReadOnly: readOnly,
-+		},
-+	}
-+	if fsType != "" {
-+		volSource.GCEPersistentDisk.FSType = fsType
-+	}
-+	return &volSource
-+}
-+
-+func (g *gcePdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
-+	gv, ok := e2evolume.(*gcePdVolume)
-+	framework.ExpectEqual(ok, true, "Failed to cast test volume to GCE PD test volume")
-+	pvSource := v1.PersistentVolumeSource{
-+		GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
-+			PDName:   gv.volumeName,
-+			ReadOnly: readOnly,
-+		},
-+	}
-+	if fsType != "" {
-+		pvSource.GCEPersistentDisk.FSType = fsType
-+	}
-+	return &pvSource, nil
-+}
-+
-+func (g *gcePdDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
-+	provisioner := "kubernetes.io/gce-pd"
-+	parameters := map[string]string{}
-+	if fsType != "" {
-+		parameters["fsType"] = fsType
-+	}
-+	ns := config.Framework.Namespace.Name
-+	suffix := fmt.Sprintf("%s-sc", g.driverInfo.Name)
-+	delayedBinding := storagev1.VolumeBindingWaitForFirstConsumer
-+
-+	return testsuites.GetStorageClass(provisioner, parameters, &delayedBinding, ns, suffix)
-+}
-+
-+func (g *gcePdDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
-+	config := &testsuites.PerTestConfig{
-+		Driver:    g,
-+		Prefix:    "gcepd",
-+		Framework: f,
-+	}
-+
-+	if framework.NodeOSDistroIs("windows") {
-+		config.ClientNodeSelection = e2epod.NodeSelection{
-+			Selector: map[string]string{
-+				"kubernetes.io/os": "windows",
-+			},
-+		}
-+	}
-+	return config, func() {}
-+
-+}
-+
-+func (g *gcePdDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume {
-+	zone := getInlineVolumeZone(config.Framework)
-+	if volType == testpatterns.InlineVolume {
-+		// PD will be created in framework.TestContext.CloudConfig.Zone zone,
-+		// so pods should be also scheduled there.
-+		config.ClientNodeSelection = e2epod.NodeSelection{
-+			Selector: map[string]string{
-+				v1.LabelZoneFailureDomain: zone,
-+			},
-+		}
-+	}
-+	ginkgo.By("creating a test gce pd volume")
-+	vname, err := e2epv.CreatePDWithRetryAndZone(zone)
-+	framework.ExpectNoError(err)
-+	return &gcePdVolume{
-+		volumeName: vname,
-+	}
-+}
-+
-+func (v *gcePdVolume) DeleteVolume() {
-+	e2epv.DeletePDWithRetry(v.volumeName)
-+}
-+
-+// vSphere
-+type vSphereDriver struct {
-+	driverInfo testsuites.DriverInfo
-+}
-+
-+type vSphereVolume struct {
-+	volumePath string
-+	nodeInfo   *vspheretest.NodeInfo
-+}
-+
-+var _ testsuites.TestDriver = &vSphereDriver{}
-+var _ testsuites.PreprovisionedVolumeTestDriver = &vSphereDriver{}
-+var _ testsuites.InlineVolumeTestDriver = &vSphereDriver{}
-+var _ testsuites.PreprovisionedPVTestDriver = &vSphereDriver{}
-+var _ testsuites.DynamicPVTestDriver = &vSphereDriver{}
-+
-+// InitVSphereDriver returns vSphereDriver that implements TestDriver interface
-+func InitVSphereDriver() testsuites.TestDriver {
-+	return &vSphereDriver{
-+		driverInfo: testsuites.DriverInfo{
-+			Name:             "vsphere",
-+			InTreePluginName: "kubernetes.io/vsphere-volume",
-+			MaxFileSize:      testpatterns.FileSizeMedium,
-+			SupportedSizeRange: e2evolume.SizeRange{
-+				Min: "5Gi",
-+			},
-+			SupportedFsType: sets.NewString(
-+				"", // Default fsType
-+				"ext4",
-+			),
-+			TopologyKeys: []string{v1.LabelZoneFailureDomain},
-+			Capabilities: map[testsuites.Capability]bool{
-+				testsuites.CapPersistence: true,
-+				testsuites.CapFsGroup:     true,
-+				testsuites.CapExec:        true,
-+				testsuites.CapMultiPODs:   true,
-+				testsuites.CapTopology:    true,
-+			},
-+		},
-+	}
-+}
-+func (v *vSphereDriver) GetDriverInfo() *testsuites.DriverInfo {
-+	return &v.driverInfo
-+}
-+
-+func (v *vSphereDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
-+	e2eskipper.SkipUnlessProviderIs("vsphere")
-+}
-+
-+func (v *vSphereDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource {
-+	vsv, ok := e2evolume.(*vSphereVolume)
-+	framework.ExpectEqual(ok, true, "Failed to cast test volume to vSphere test volume")
-+
-+	// vSphere driver doesn't seem to support readOnly volume
-+	// TODO: check if it is correct
-+	if readOnly {
-+		return nil
-+	}
-+	volSource := v1.VolumeSource{
-+		VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
-+			VolumePath: vsv.volumePath,
-+		},
-+	}
-+	if fsType != "" {
-+		volSource.VsphereVolume.FSType = fsType
-+	}
-+	return &volSource
-+}
-+
-+func (v *vSphereDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
-+	vsv, ok := e2evolume.(*vSphereVolume)
-+	framework.ExpectEqual(ok, true, "Failed to cast test volume to vSphere test volume")
-+
-+	// vSphere driver doesn't seem to support readOnly volume
-+	// TODO: check if it is correct
-+	if readOnly {
-+		return nil, nil
-+	}
-+	pvSource := v1.PersistentVolumeSource{
-+		VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
-+			VolumePath: vsv.volumePath,
-+		},
-+	}
-+	if fsType != "" {
-+		pvSource.VsphereVolume.FSType = fsType
-+	}
-+	return &pvSource, nil
-+}
-+
-+func (v *vSphereDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
-+	provisioner := "kubernetes.io/vsphere-volume"
-+	parameters := map[string]string{}
-+	if fsType != "" {
-+		parameters["fsType"] = fsType
-+	}
-+	ns := config.Framework.Namespace.Name
-+	suffix := fmt.Sprintf("%s-sc", v.driverInfo.Name)
-+
-+	return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix)
-+}
-+
-+func (v *vSphereDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
-+	return &testsuites.PerTestConfig{
-+		Driver:    v,
-+		Prefix:    "vsphere",
-+		Framework: f,
-+	}, func() {}
-+}
-+
-+func (v *vSphereDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume {
-+	f := config.Framework
-+	vspheretest.Bootstrap(f)
-+	nodeInfo := vspheretest.GetReadySchedulableRandomNodeInfo()
-+	volumePath, err := nodeInfo.VSphere.CreateVolume(&vspheretest.VolumeOptions{}, nodeInfo.DataCenterRef)
-+	framework.ExpectNoError(err)
-+	return &vSphereVolume{
-+		volumePath: volumePath,
-+		nodeInfo:   nodeInfo,
-+	}
-+}
-+
-+func (v *vSphereVolume) DeleteVolume() {
-+	v.nodeInfo.VSphere.DeleteVolume(v.volumePath, v.nodeInfo.DataCenterRef)
-+}
-+
-+// Azure Disk
-+type azureDiskDriver struct {
-+	driverInfo testsuites.DriverInfo
-+}
-+
-+type azureDiskVolume struct {
-+	volumeName string
-+}
-+
-+var _ testsuites.TestDriver = &azureDiskDriver{}
-+var _ testsuites.PreprovisionedVolumeTestDriver = &azureDiskDriver{}
-+var _ testsuites.InlineVolumeTestDriver = &azureDiskDriver{}
-+var _ testsuites.PreprovisionedPVTestDriver = &azureDiskDriver{}
-+var _ testsuites.DynamicPVTestDriver = &azureDiskDriver{}
-+
-+// InitAzureDiskDriver returns azureDiskDriver that implements TestDriver interface
-+func InitAzureDiskDriver() testsuites.TestDriver {
-+	return &azureDiskDriver{
-+		driverInfo: testsuites.DriverInfo{
-+			Name:             "azure-disk",
-+			InTreePluginName: "kubernetes.io/azure-disk",
-+			MaxFileSize:      testpatterns.FileSizeMedium,
-+			SupportedSizeRange: e2evolume.SizeRange{
-+				Min: "5Gi",
-+			},
-+			SupportedFsType: sets.NewString(
-+				"", // Default fsType
-+				"ext3",
-+				"ext4",
-+				"xfs",
-+			),
-+			TopologyKeys: []string{v1.LabelZoneFailureDomain},
-+			Capabilities: map[testsuites.Capability]bool{
-+				testsuites.CapPersistence: true,
-+				testsuites.CapFsGroup:     true,
-+				testsuites.CapBlock:       true,
-+				testsuites.CapExec:        true,
-+				testsuites.CapMultiPODs:   true,
-+				// Azure supports volume limits, but the test creates large
-+				// number of volumes and times out test suites.
-+				testsuites.CapVolumeLimits: false,
-+				testsuites.CapTopology:     true,
-+			},
-+		},
-+	}
-+}
-+
-+func (a *azureDiskDriver) GetDriverInfo() *testsuites.DriverInfo {
-+	return &a.driverInfo
-+}
-+
-+func (a *azureDiskDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
-+	e2eskipper.SkipUnlessProviderIs("azure")
-+}
-+
-+func (a *azureDiskDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource {
-+	av, ok := e2evolume.(*azureDiskVolume)
-+	framework.ExpectEqual(ok, true, "Failed to cast test volume to Azure test volume")
-+	diskName := av.volumeName[(strings.LastIndex(av.volumeName, "/") + 1):]
-+
-+	kind := v1.AzureManagedDisk
-+	volSource := v1.VolumeSource{
-+		AzureDisk: &v1.AzureDiskVolumeSource{
-+			DiskName:    diskName,
-+			DataDiskURI: av.volumeName,
-+			Kind:        &kind,
-+			ReadOnly:    &readOnly,
-+		},
-+	}
-+	if fsType != "" {
-+		volSource.AzureDisk.FSType = &fsType
-+	}
-+	return &volSource
-+}
-+
-+func (a *azureDiskDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
-+	av, ok := e2evolume.(*azureDiskVolume)
-+	framework.ExpectEqual(ok, true, "Failed to cast test volume to Azure test volume")
-+
-+	diskName := av.volumeName[(strings.LastIndex(av.volumeName, "/") + 1):]
-+
-+	kind := v1.AzureManagedDisk
-+	pvSource := v1.PersistentVolumeSource{
-+		AzureDisk: &v1.AzureDiskVolumeSource{
-+			DiskName:    diskName,
-+			DataDiskURI: av.volumeName,
-+			Kind:        &kind,
-+			ReadOnly:    &readOnly,
-+		},
-+	}
-+	if fsType != "" {
-+		pvSource.AzureDisk.FSType = &fsType
-+	}
-+	return &pvSource, nil
-+}
-+
-+func (a *azureDiskDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
-+	provisioner := "kubernetes.io/azure-disk"
-+	parameters := map[string]string{}
-+	if fsType != "" {
-+		parameters["fsType"] = fsType
-+	}
-+	ns := config.Framework.Namespace.Name
-+	suffix := fmt.Sprintf("%s-sc", a.driverInfo.Name)
-+	delayedBinding := storagev1.VolumeBindingWaitForFirstConsumer
-+
-+	return testsuites.GetStorageClass(provisioner, parameters, &delayedBinding, ns, suffix)
-+}
-+
-+func (a *azureDiskDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
-+	return &testsuites.PerTestConfig{
-+		Driver:    a,
-+		Prefix:    "azure",
-+		Framework: f,
-+	}, func() {}
-+}
-+
-+func (a *azureDiskDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume {
-+	ginkgo.By("creating a test azure disk volume")
-+	zone := getInlineVolumeZone(config.Framework)
-+	if volType == testpatterns.InlineVolume {
-+		// PD will be created in framework.TestContext.CloudConfig.Zone zone,
-+		// so pods should be also scheduled there.
-+		config.ClientNodeSelection = e2epod.NodeSelection{
-+			Selector: map[string]string{
-+				v1.LabelZoneFailureDomain: zone,
-+			},
-+		}
-+	}
-+	volumeName, err := e2epv.CreatePDWithRetryAndZone(zone)
-+	framework.ExpectNoError(err)
-+	return &azureDiskVolume{
-+		volumeName: volumeName,
-+	}
-+}
-+
-+func (v *azureDiskVolume) DeleteVolume() {
-+	e2epv.DeletePDWithRetry(v.volumeName)
-+}
-+
-+// AWS
-+type awsDriver struct {
-+	driverInfo testsuites.DriverInfo
-+}
-+
-+type awsVolume struct {
-+	volumeName string
-+}
-+
-+var _ testsuites.TestDriver = &awsDriver{}
-+
-+var _ testsuites.PreprovisionedVolumeTestDriver = &awsDriver{}
-+var _ testsuites.InlineVolumeTestDriver = &awsDriver{}
-+var _ testsuites.PreprovisionedPVTestDriver = &awsDriver{}
-+var _ testsuites.DynamicPVTestDriver = &awsDriver{}
-+
-+// InitAwsDriver returns awsDriver that implements TestDriver interface
-+func InitAwsDriver() testsuites.TestDriver {
-+	return &awsDriver{
-+		driverInfo: testsuites.DriverInfo{
-+			Name:             "aws",
-+			InTreePluginName: "kubernetes.io/aws-ebs",
-+			MaxFileSize:      testpatterns.FileSizeMedium,
-+			SupportedSizeRange: e2evolume.SizeRange{
-+				Min: "5Gi",
-+			},
-+			SupportedFsType: sets.NewString(
-+				"", // Default fsType
-+				"ext2",
-+				"ext3",
-+				"ext4",
-+				"xfs",
-+				"ntfs",
-+			),
-+			SupportedMountOption: sets.NewString("debug", "nouid32"),
-+			TopologyKeys:         []string{v1.LabelZoneFailureDomain},
-+			Capabilities: map[testsuites.Capability]bool{
-+				testsuites.CapPersistence:         true,
-+				testsuites.CapFsGroup:             true,
-+				testsuites.CapBlock:               true,
-+				testsuites.CapExec:                true,
-+				testsuites.CapMultiPODs:           true,
-+				testsuites.CapControllerExpansion: true,
-+				testsuites.CapNodeExpansion:       true,
-+				// AWS supports volume limits, but the test creates large
-+				// number of volumes and times out test suites.
-+				testsuites.CapVolumeLimits: false,
-+				testsuites.CapTopology:     true,
-+			},
-+		},
-+	}
-+}
-+
-+func (a *awsDriver) GetDriverInfo() *testsuites.DriverInfo {
-+	return &a.driverInfo
-+}
-+
-+func (a *awsDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
-+	e2eskipper.SkipUnlessProviderIs("aws")
-+}
-+
-+func (a *awsDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource {
-+	av, ok := e2evolume.(*awsVolume)
-+	framework.ExpectEqual(ok, true, "Failed to cast test volume to AWS test volume")
-+	volSource := v1.VolumeSource{
-+		AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
-+			VolumeID: av.volumeName,
-+			ReadOnly: readOnly,
-+		},
-+	}
-+	if fsType != "" {
-+		volSource.AWSElasticBlockStore.FSType = fsType
-+	}
-+	return &volSource
-+}
-+
-+func (a *awsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
-+	av, ok := e2evolume.(*awsVolume)
-+	framework.ExpectEqual(ok, true, "Failed to cast test volume to AWS test volume")
-+	pvSource := v1.PersistentVolumeSource{
-+		AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
-+			VolumeID: av.volumeName,
-+			ReadOnly: readOnly,
-+		},
-+	}
-+	if fsType != "" {
-+		pvSource.AWSElasticBlockStore.FSType = fsType
-+	}
-+	return &pvSource, nil
-+}
-+
-+func (a *awsDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
-+	provisioner := "kubernetes.io/aws-ebs"
-+	parameters := map[string]string{}
-+	if fsType != "" {
-+		parameters["fsType"] = fsType
-+	}
-+	ns := config.Framework.Namespace.Name
-+	suffix := fmt.Sprintf("%s-sc", a.driverInfo.Name)
-+	delayedBinding := storagev1.VolumeBindingWaitForFirstConsumer
-+
-+	return testsuites.GetStorageClass(provisioner, parameters, &delayedBinding, ns, suffix)
-+}
-+
-+func (a *awsDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
-+	config := &testsuites.PerTestConfig{
-+		Driver:    a,
-+		Prefix:    "aws",
-+		Framework: f,
-+	}
-+
-+	if framework.NodeOSDistroIs("windows") {
-+		config.ClientNodeSelection = e2epod.NodeSelection{
-+			Selector: map[string]string{
-+				"kubernetes.io/os": "windows",
-+			},
-+		}
-+	}
-+	return config, func() {}
-+}
-+
-+func (a *awsDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume {
-+	zone := getInlineVolumeZone(config.Framework)
-+	if volType == testpatterns.InlineVolume {
-+		// PD will be created in framework.TestContext.CloudConfig.Zone zone,
-+		// so pods should be also scheduled there.
-+		config.ClientNodeSelection = e2epod.NodeSelection{
-+			Selector: map[string]string{
-+				v1.LabelZoneFailureDomain: zone,
-+			},
-+		}
-+	}
-+	ginkgo.By("creating a test aws volume")
-+	vname, err := e2epv.CreatePDWithRetryAndZone(zone)
-+	framework.ExpectNoError(err)
-+	return &awsVolume{
-+		volumeName: vname,
-+	}
-+}
-+
-+func (v *awsVolume) DeleteVolume() {
-+	e2epv.DeletePDWithRetry(v.volumeName)
-+}
-diff --git a/test/e2e/storage/in_tree_volumes.go b/test/e2e/storage/in_tree_volumes.go
-index 19372062407..8322db743cd 100644
---- a/test/e2e/storage/in_tree_volumes.go
-+++ b/test/e2e/storage/in_tree_volumes.go
-@@ -33,11 +33,6 @@ var testDrivers = []func() testsuites.TestDriver{
- 	drivers.InitHostPathDriver,
- 	drivers.InitHostPathSymlinkDriver,
- 	drivers.InitEmptydirDriver,
--	drivers.InitCinderDriver,
--	drivers.InitGcePdDriver,
--	drivers.InitVSphereDriver,
--	drivers.InitAzureDiskDriver,
--	drivers.InitAwsDriver,
- 	drivers.InitLocalDriverWithVolumeType(utils.LocalVolumeDirectory),
- 	drivers.InitLocalDriverWithVolumeType(utils.LocalVolumeDirectoryLink),
- 	drivers.InitLocalDriverWithVolumeType(utils.LocalVolumeDirectoryBindMounted),
-diff --git a/test/e2e/storage/in_tree_volumes_providers.go b/test/e2e/storage/in_tree_volumes_providers.go
-new file mode 100644
-index 00000000000..d6a5dbca191
---- /dev/null
-+++ b/test/e2e/storage/in_tree_volumes_providers.go
-@@ -0,0 +1,46 @@
-+// +build !providerless
-+
-+/*
-+Copyright 2020 The Kubernetes Authors.
-+
-+Licensed under the Apache License, Version 2.0 (the "License");
-+you may not use this file except in compliance with the License.
-+You may obtain a copy of the License at
-+
-+    http://www.apache.org/licenses/LICENSE-2.0
-+
-+Unless required by applicable law or agreed to in writing, software
-+distributed under the License is distributed on an "AS IS" BASIS,
-+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-+See the License for the specific language governing permissions and
-+limitations under the License.
-+*/
-+
-+package storage
-+
-+import (
-+	"github.com/onsi/ginkgo"
-+	"k8s.io/kubernetes/test/e2e/storage/drivers"
-+	"k8s.io/kubernetes/test/e2e/storage/testsuites"
-+	"k8s.io/kubernetes/test/e2e/storage/utils"
-+)
-+
-+// List of testDrivers to be executed in below loop
-+var testDriversProviders = []func() testsuites.TestDriver{
-+	drivers.InitCinderDriver,
-+	drivers.InitGcePdDriver,
-+	drivers.InitVSphereDriver,
-+	drivers.InitAzureDiskDriver,
-+	drivers.InitAwsDriver,
-+}
-+
-+// This executes testSuites for in-tree volumes.
-+var _ = utils.SIGDescribe("In-tree Volumes for Cloud Providers", func() {
-+	for _, initDriver := range testDriversProviders {
-+		curDriver := initDriver()
-+
-+		ginkgo.Context(testsuites.GetDriverNameWithFeatureTags(curDriver), func() {
-+			testsuites.DefineTestSuite(curDriver, testsuites.BaseSuites)
-+		})
-+	}
-+})
-diff --git a/test/e2e/storage/nfs_persistent_volume-disruptive.go b/test/e2e/storage/nfs_persistent_volume-disruptive.go
-index 5afebb5e903..b197eee99a6 100644
---- a/test/e2e/storage/nfs_persistent_volume-disruptive.go
-+++ b/test/e2e/storage/nfs_persistent_volume-disruptive.go
-@@ -91,7 +91,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
- 
- 	ginkgo.BeforeEach(func() {
- 		// To protect the NFS volume pod from the kubelet restart, we isolate it on its own node.
--		e2eskipper.SkipUnlessNodeCountIsAtLeast(minNodes)
-+		e2eskipper.SkipUnlessNodeCountIsAtLeast(2)
- 		e2eskipper.SkipIfProviderIs("local")
- 
- 		c = f.ClientSet
-diff --git a/test/e2e/storage/pd.go b/test/e2e/storage/pd.go
-index f5b6060a834..addd304147c 100644
---- a/test/e2e/storage/pd.go
-+++ b/test/e2e/storage/pd.go
-@@ -1,3 +1,5 @@
-+// +build !providerless
-+
- /*
- Copyright 2015 The Kubernetes Authors.
- 
-diff --git a/test/e2e/storage/persistent_volumes-gce.go b/test/e2e/storage/persistent_volumes-gce.go
-index b8bc887384e..f572754c5e8 100644
---- a/test/e2e/storage/persistent_volumes-gce.go
-+++ b/test/e2e/storage/persistent_volumes-gce.go
-@@ -1,3 +1,5 @@
-+// +build !providerless
-+
- /*
- Copyright 2017 The Kubernetes Authors.
- 
-diff --git a/test/e2e/storage/regional_pd.go b/test/e2e/storage/regional_pd.go
-index 7763afaf6b1..a042dcc9d4a 100644
---- a/test/e2e/storage/regional_pd.go
-+++ b/test/e2e/storage/regional_pd.go
-@@ -1,3 +1,5 @@
-+// +build !providerless
-+
- /*
- Copyright 2016 The Kubernetes Authors.
- 
-@@ -18,6 +20,7 @@ package storage
- 
- import (
- 	"context"
-+
- 	"github.com/onsi/ginkgo"
- 	"github.com/onsi/gomega"
- 
-diff --git a/test/e2e/storage/utils/BUILD b/test/e2e/storage/utils/BUILD
-index bdc78982dcb..c57ff325f76 100644
---- a/test/e2e/storage/utils/BUILD
-+++ b/test/e2e/storage/utils/BUILD
-@@ -7,7 +7,6 @@ go_library(
-     srcs = [
-         "create.go",
-         "deployment.go",
--        "ebs.go",
-         "framework.go",
-         "host_exec.go",
-         "local.go",
-@@ -37,8 +36,6 @@ go_library(
-         "//test/e2e/framework/ssh:go_default_library",
-         "//test/e2e/framework/testfiles:go_default_library",
-         "//test/utils/image:go_default_library",
--        "//vendor/github.com/aws/aws-sdk-go/aws:go_default_library",
--        "//vendor/github.com/aws/aws-sdk-go/service/ec2:go_default_library",
-         "//vendor/github.com/onsi/ginkgo:go_default_library",
-         "//vendor/github.com/onsi/gomega:go_default_library",
-         "//vendor/github.com/pkg/errors:go_default_library",
-diff --git a/test/e2e/storage/utils/ebs.go b/test/e2e/storage/utils/ebs.go
-index 39e223f36aa..55065ea07b7 100644
---- a/test/e2e/storage/utils/ebs.go
-+++ b/test/e2e/storage/utils/ebs.go
-@@ -1,3 +1,5 @@
-+// +build !providerless
-+
- /*
- Copyright 2020 The Kubernetes Authors.
- 
-diff --git a/test/e2e/storage/volume_provisioning.go b/test/e2e/storage/volume_provisioning.go
-index a8b494eb3ac..c070a81283c 100644
---- a/test/e2e/storage/volume_provisioning.go
-+++ b/test/e2e/storage/volume_provisioning.go
-@@ -24,11 +24,6 @@ import (
- 	"time"
- 
- 	"github.com/onsi/ginkgo"
--	"github.com/onsi/gomega"
--
--	"github.com/aws/aws-sdk-go/aws"
--	"github.com/aws/aws-sdk-go/aws/session"
--	"github.com/aws/aws-sdk-go/service/ec2"
- 
- 	v1 "k8s.io/api/core/v1"
- 	rbacv1 "k8s.io/api/rbac/v1"
-@@ -37,9 +32,7 @@ import (
- 	apierrors "k8s.io/apimachinery/pkg/api/errors"
- 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- 	"k8s.io/apimachinery/pkg/runtime/schema"
--	"k8s.io/apimachinery/pkg/types"
- 	"k8s.io/apimachinery/pkg/util/rand"
--	"k8s.io/apimachinery/pkg/util/sets"
- 	"k8s.io/apimachinery/pkg/util/wait"
- 	"k8s.io/apiserver/pkg/authentication/serviceaccount"
- 	clientset "k8s.io/client-go/kubernetes"
-@@ -48,7 +41,6 @@ import (
- 	e2eauth "k8s.io/kubernetes/test/e2e/framework/auth"
- 	e2enode "k8s.io/kubernetes/test/e2e/framework/node"
- 	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
--	"k8s.io/kubernetes/test/e2e/framework/providers/gce"
- 	e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
- 	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
- 	"k8s.io/kubernetes/test/e2e/storage/testsuites"
-@@ -61,80 +53,6 @@ const (
- 	externalPluginName = "example.com/nfs"
- )
- 
--// checkAWSEBS checks properties of an AWS EBS. Test framework does not
--// instantiate full AWS provider, therefore we need use ec2 API directly.
--func checkAWSEBS(volume *v1.PersistentVolume, volumeType string, encrypted bool) error {
--	diskName := volume.Spec.AWSElasticBlockStore.VolumeID
--
--	var client *ec2.EC2
--
--	tokens := strings.Split(diskName, "/")
--	volumeID := tokens[len(tokens)-1]
--
--	zone := framework.TestContext.CloudConfig.Zone
--
--	awsSession, err := session.NewSession()
--	if err != nil {
--		return fmt.Errorf("error creating session: %v", err)
--	}
--
--	if len(zone) > 0 {
--		region := zone[:len(zone)-1]
--		cfg := aws.Config{Region: &region}
--		framework.Logf("using region %s", region)
--		client = ec2.New(awsSession, &cfg)
--	} else {
--		framework.Logf("no region configured")
--		client = ec2.New(awsSession)
--	}
--
--	request := &ec2.DescribeVolumesInput{
--		VolumeIds: []*string{&volumeID},
--	}
--	info, err := client.DescribeVolumes(request)
--	if err != nil {
--		return fmt.Errorf("error querying ec2 for volume %q: %v", volumeID, err)
--	}
--	if len(info.Volumes) == 0 {
--		return fmt.Errorf("no volumes found for volume %q", volumeID)
--	}
--	if len(info.Volumes) > 1 {
--		return fmt.Errorf("multiple volumes found for volume %q", volumeID)
--	}
--
--	awsVolume := info.Volumes[0]
--	if awsVolume.VolumeType == nil {
--		return fmt.Errorf("expected volume type %q, got nil", volumeType)
--	}
--	if *awsVolume.VolumeType != volumeType {
--		return fmt.Errorf("expected volume type %q, got %q", volumeType, *awsVolume.VolumeType)
--	}
--	if encrypted && awsVolume.Encrypted == nil {
--		return fmt.Errorf("expected encrypted volume, got no encryption")
--	}
--	if encrypted && !*awsVolume.Encrypted {
--		return fmt.Errorf("expected encrypted volume, got %v", *awsVolume.Encrypted)
--	}
--	return nil
--}
--
--func checkGCEPD(volume *v1.PersistentVolume, volumeType string) error {
--	cloud, err := gce.GetGCECloud()
--	if err != nil {
--		return err
--	}
--	diskName := volume.Spec.GCEPersistentDisk.PDName
--	disk, err := cloud.GetDiskByNameUnknownZone(diskName)
--	if err != nil {
--		return err
--	}
--
--	if !strings.HasSuffix(disk.Type, volumeType) {
--		return fmt.Errorf("unexpected disk type %q, expected suffix %q", disk.Type, volumeType)
--	}
--	return nil
--}
--
- var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
- 	f := framework.NewDefaultFramework("volume-provisioning")
- 
-@@ -147,451 +65,6 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
- 		ns = f.Namespace.Name
- 	})
- 
--	ginkgo.Describe("DynamicProvisioner [Slow]", func() {
--		ginkgo.It("should provision storage with different parameters", func() {
--
--			// This test checks that dynamic provisioning can provision a volume
--			// that can be used to persist data among pods.
--			tests := []testsuites.StorageClassTest{
--				// GCE/GKE
--				{
--					Name:           "SSD PD on GCE/GKE",
--					CloudProviders: []string{"gce", "gke"},
--					Provisioner:    "kubernetes.io/gce-pd",
--					Parameters: map[string]string{
--						"type": "pd-ssd",
--						"zone": getRandomClusterZone(c),
--					},
--					ClaimSize:    "1.5Gi",
--					ExpectedSize: "2Gi",
--					PvCheck: func(claim *v1.PersistentVolumeClaim) {
--						volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
--						gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
--
--						err := checkGCEPD(volume, "pd-ssd")
--						framework.ExpectNoError(err, "checkGCEPD pd-ssd")
--					},
--				},
--				{
--					Name:           "HDD PD on GCE/GKE",
--					CloudProviders: []string{"gce", "gke"},
--					Provisioner:    "kubernetes.io/gce-pd",
--					Parameters: map[string]string{
--						"type": "pd-standard",
--					},
--					ClaimSize:    "1.5Gi",
--					ExpectedSize: "2Gi",
--					PvCheck: func(claim *v1.PersistentVolumeClaim) {
--						volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
--						gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
--
--						err := checkGCEPD(volume, "pd-standard")
--						framework.ExpectNoError(err, "checkGCEPD pd-standard")
--					},
--				},
--				// AWS
--				{
--					Name:           "gp2 EBS on AWS",
--					CloudProviders: []string{"aws"},
--					Provisioner:    "kubernetes.io/aws-ebs",
--					Parameters: map[string]string{
--						"type": "gp2",
--						"zone": getRandomClusterZone(c),
--					},
--					ClaimSize:    "1.5Gi",
--					ExpectedSize: "2Gi",
--					PvCheck: func(claim *v1.PersistentVolumeClaim) {
--						volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
--						gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
--
--						err := checkAWSEBS(volume, "gp2", false)
--						framework.ExpectNoError(err, "checkAWSEBS gp2")
--					},
--				},
--				{
--					Name:           "io1 EBS on AWS",
--					CloudProviders: []string{"aws"},
--					Provisioner:    "kubernetes.io/aws-ebs",
--					Parameters: map[string]string{
--						"type":      "io1",
--						"iopsPerGB": "50",
--					},
--					ClaimSize:    "3.5Gi",
--					ExpectedSize: "4Gi", // 4 GiB is minimum for io1
--					PvCheck: func(claim *v1.PersistentVolumeClaim) {
--						volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
--						gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
--
--						err := checkAWSEBS(volume, "io1", false)
--						framework.ExpectNoError(err, "checkAWSEBS io1")
--					},
--				},
--				{
--					Name:           "sc1 EBS on AWS",
--					CloudProviders: []string{"aws"},
--					Provisioner:    "kubernetes.io/aws-ebs",
--					Parameters: map[string]string{
--						"type": "sc1",
--					},
--					ClaimSize:    "500Gi", // minimum for sc1
--					ExpectedSize: "500Gi",
--					PvCheck: func(claim *v1.PersistentVolumeClaim) {
--						volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
--						gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
--
--						err := checkAWSEBS(volume, "sc1", false)
--						framework.ExpectNoError(err, "checkAWSEBS sc1")
--					},
--				},
--				{
--					Name:           "st1 EBS on AWS",
--					CloudProviders: []string{"aws"},
--					Provisioner:    "kubernetes.io/aws-ebs",
--					Parameters: map[string]string{
--						"type": "st1",
--					},
--					ClaimSize:    "500Gi", // minimum for st1
--					ExpectedSize: "500Gi",
--					PvCheck: func(claim *v1.PersistentVolumeClaim) {
--						volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
--						gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
--
--						err := checkAWSEBS(volume, "st1", false)
--						framework.ExpectNoError(err, "checkAWSEBS st1")
--					},
--				},
--				{
--					Name:           "encrypted EBS on AWS",
--					CloudProviders: []string{"aws"},
--					Provisioner:    "kubernetes.io/aws-ebs",
--					Parameters: map[string]string{
--						"encrypted": "true",
--					},
--					ClaimSize:    "1Gi",
--					ExpectedSize: "1Gi",
--					PvCheck: func(claim *v1.PersistentVolumeClaim) {
--						volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
--						gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
--
--						err := checkAWSEBS(volume, "gp2", true)
--						framework.ExpectNoError(err, "checkAWSEBS gp2 encrypted")
--					},
--				},
--				// OpenStack generic tests (works on all OpenStack deployments)
--				{
--					Name:           "generic Cinder volume on OpenStack",
--					CloudProviders: []string{"openstack"},
--					Provisioner:    "kubernetes.io/cinder",
--					Parameters:     map[string]string{},
--					ClaimSize:      "1.5Gi",
--					ExpectedSize:   "2Gi",
--					PvCheck: func(claim *v1.PersistentVolumeClaim) {
--						testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
--					},
--				},
--				{
--					Name:           "Cinder volume with empty volume type and zone on OpenStack",
--					CloudProviders: []string{"openstack"},
--					Provisioner:    "kubernetes.io/cinder",
--					Parameters: map[string]string{
--						"type":         "",
--						"availability": "",
--					},
--					ClaimSize:    "1.5Gi",
--					ExpectedSize: "2Gi",
--					PvCheck: func(claim *v1.PersistentVolumeClaim) {
--						testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
--					},
--				},
--				// vSphere generic test
--				{
--					Name:           "generic vSphere volume",
--					CloudProviders: []string{"vsphere"},
--					Provisioner:    "kubernetes.io/vsphere-volume",
--					Parameters:     map[string]string{},
--					ClaimSize:      "1.5Gi",
--					ExpectedSize:   "1.5Gi",
--					PvCheck: func(claim *v1.PersistentVolumeClaim) {
--						testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
--					},
--				},
--				// Azure
--				{
--					Name:           "Azure disk volume with empty sku and location",
--					CloudProviders: []string{"azure"},
--					Provisioner:    "kubernetes.io/azure-disk",
--					Parameters:     map[string]string{},
--					ClaimSize:      "1Gi",
--					ExpectedSize:   "1Gi",
--					PvCheck: func(claim *v1.PersistentVolumeClaim) {
--						testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
--					},
--				},
--			}
--
--			var betaTest *testsuites.StorageClassTest
--			for i, t := range tests {
--				// Beware of clojure, use local variables instead of those from
--				// outer scope
--				test := t
--
--				if !framework.ProviderIs(test.CloudProviders...) {
--					framework.Logf("Skipping %q: cloud providers is not %v", test.Name, test.CloudProviders)
--					continue
--				}
--
--				// Remember the last supported test for subsequent test of beta API
--				betaTest = &test
--
--				ginkgo.By("Testing " + test.Name)
--				suffix := fmt.Sprintf("%d", i)
--				test.Client = c
--				test.Class = newStorageClass(test, ns, suffix)
--				test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
--					ClaimSize:        test.ClaimSize,
--					StorageClassName: &test.Class.Name,
--					VolumeMode:       &test.VolumeMode,
--				}, ns)
--				test.TestDynamicProvisioning()
--			}
--
--			// Run the last test with storage.k8s.io/v1beta1 on pvc
--			if betaTest != nil {
--				ginkgo.By("Testing " + betaTest.Name + " with beta volume provisioning")
--				class := newBetaStorageClass(*betaTest, "beta")
--				// we need to create the class manually, testDynamicProvisioning does not accept beta class
--				class, err := c.StorageV1beta1().StorageClasses().Create(context.TODO(), class, metav1.CreateOptions{})
--				framework.ExpectNoError(err)
--				defer deleteStorageClass(c, class.Name)
--
--				betaTest.Client = c
--				betaTest.Class = nil
--				betaTest.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
--					ClaimSize:        betaTest.ClaimSize,
--					StorageClassName: &class.Name,
--					VolumeMode:       &betaTest.VolumeMode,
--				}, ns)
--				betaTest.Claim.Spec.StorageClassName = &(class.Name)
--				(*betaTest).TestDynamicProvisioning()
--			}
--		})
--
--		ginkgo.It("should provision storage with non-default reclaim policy Retain", func() {
--			e2eskipper.SkipUnlessProviderIs("gce", "gke")
--
--			test := testsuites.StorageClassTest{
--				Client:         c,
--				Name:           "HDD PD on GCE/GKE",
--				CloudProviders: []string{"gce", "gke"},
--				Provisioner:    "kubernetes.io/gce-pd",
--				Parameters: map[string]string{
--					"type": "pd-standard",
--				},
--				ClaimSize:    "1Gi",
--				ExpectedSize: "1Gi",
--				PvCheck: func(claim *v1.PersistentVolumeClaim) {
--					volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
--					gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
--
--					err := checkGCEPD(volume, "pd-standard")
--					framework.ExpectNoError(err, "checkGCEPD")
--				},
--			}
--			test.Class = newStorageClass(test, ns, "reclaimpolicy")
--			retain := v1.PersistentVolumeReclaimRetain
--			test.Class.ReclaimPolicy = &retain
--			test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
--				ClaimSize:        test.ClaimSize,
--				StorageClassName: &test.Class.Name,
--				VolumeMode:       &test.VolumeMode,
--			}, ns)
--			pv := test.TestDynamicProvisioning()
--
--			ginkgo.By(fmt.Sprintf("waiting for the provisioned PV %q to enter phase %s", pv.Name, v1.VolumeReleased))
--			framework.ExpectNoError(e2epv.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 1*time.Second, 30*time.Second))
--
--			ginkgo.By(fmt.Sprintf("deleting the storage asset backing the PV %q", pv.Name))
--			framework.ExpectNoError(e2epv.DeletePDWithRetry(pv.Spec.GCEPersistentDisk.PDName))
--
--			ginkgo.By(fmt.Sprintf("deleting the PV %q", pv.Name))
--			framework.ExpectNoError(e2epv.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name)
--			framework.ExpectNoError(e2epv.WaitForPersistentVolumeDeleted(c, pv.Name, 1*time.Second, 30*time.Second))
--		})
--
--		ginkgo.It("should not provision a volume in an unmanaged GCE zone.", func() {
--			e2eskipper.SkipUnlessProviderIs("gce", "gke")
--			var suffix string = "unmananged"
--
--			ginkgo.By("Discovering an unmanaged zone")
--			allZones := sets.NewString() // all zones in the project
--
--			gceCloud, err := gce.GetGCECloud()
--			framework.ExpectNoError(err)
--
--			// Get all k8s managed zones (same as zones with nodes in them for test)
--			managedZones, err := gceCloud.GetAllZonesFromCloudProvider()
--			framework.ExpectNoError(err)
--
--			// Get a list of all zones in the project
--			zones, err := gceCloud.ComputeServices().GA.Zones.List(framework.TestContext.CloudConfig.ProjectID).Do()
--			framework.ExpectNoError(err)
--			for _, z := range zones.Items {
--				allZones.Insert(z.Name)
--			}
--
--			// Get the subset of zones not managed by k8s
--			var unmanagedZone string
--			var popped bool
--			unmanagedZones := allZones.Difference(managedZones)
--			// And select one of them at random.
--			if unmanagedZone, popped = unmanagedZones.PopAny(); !popped {
--				e2eskipper.Skipf("No unmanaged zones found.")
--			}
--
--			ginkgo.By("Creating a StorageClass for the unmanaged zone")
--			test := testsuites.StorageClassTest{
--				Name:        "unmanaged_zone",
--				Provisioner: "kubernetes.io/gce-pd",
--				Parameters:  map[string]string{"zone": unmanagedZone},
--				ClaimSize:   "1Gi",
--			}
--			sc := newStorageClass(test, ns, suffix)
--			sc, err = c.StorageV1().StorageClasses().Create(context.TODO(), sc, metav1.CreateOptions{})
--			framework.ExpectNoError(err)
--			defer deleteStorageClass(c, sc.Name)
--
--			ginkgo.By("Creating a claim and expecting it to timeout")
--			pvc := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
--				ClaimSize:        test.ClaimSize,
--				StorageClassName: &sc.Name,
--				VolumeMode:       &test.VolumeMode,
--			}, ns)
--			pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), pvc, metav1.CreateOptions{})
--			framework.ExpectNoError(err)
--			defer func() {
--				framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)
--			}()
--
--			// The claim should timeout phase:Pending
--			err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 2*time.Second, framework.ClaimProvisionShortTimeout)
--			framework.ExpectError(err)
--			framework.Logf(err.Error())
--		})
--
--		ginkgo.It("should test that deleting a claim before the volume is provisioned deletes the volume.", func() {
--			// This case tests for the regressions of a bug fixed by PR #21268
--			// REGRESSION: Deleting the PVC before the PV is provisioned can result in the PV
--			// not being deleted.
--			// NOTE:  Polls until no PVs are detected, times out at 5 minutes.
--
--			e2eskipper.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure")
--
--			const raceAttempts int = 100
--			var residualPVs []*v1.PersistentVolume
--			ginkgo.By(fmt.Sprintf("Creating and deleting PersistentVolumeClaims %d times", raceAttempts))
--			test := testsuites.StorageClassTest{
--				Name:        "deletion race",
--				Provisioner: "", // Use a native one based on current cloud provider
--				ClaimSize:   "1Gi",
--			}
--
--			class := newStorageClass(test, ns, "race")
--			class, err := c.StorageV1().StorageClasses().Create(context.TODO(), class, metav1.CreateOptions{})
--			framework.ExpectNoError(err)
--			defer deleteStorageClass(c, class.Name)
--
--			// To increase chance of detection, attempt multiple iterations
--			for i := 0; i < raceAttempts; i++ {
--				prefix := fmt.Sprintf("race-%d", i)
--				claim := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
--					NamePrefix:       prefix,
--					ClaimSize:        test.ClaimSize,
--					StorageClassName: &class.Name,
--					VolumeMode:       &test.VolumeMode,
--				}, ns)
--				tmpClaim, err := e2epv.CreatePVC(c, ns, claim)
--				framework.ExpectNoError(err)
--				framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, tmpClaim.Name, ns))
--			}
--
--			ginkgo.By(fmt.Sprintf("Checking for residual PersistentVolumes associated with StorageClass %s", class.Name))
--			residualPVs, err = waitForProvisionedVolumesDeleted(c, class.Name)
--			// Cleanup the test resources before breaking
--			defer deleteProvisionedVolumesAndDisks(c, residualPVs)
--			framework.ExpectNoError(err, "PersistentVolumes were not deleted as expected. %d remain", len(residualPVs))
--
--			framework.Logf("0 PersistentVolumes remain.")
--		})
--
--		ginkgo.It("deletion should be idempotent", func() {
--			// This test ensures that deletion of a volume is idempotent.
--			// It creates a PV with Retain policy, deletes underlying AWS / GCE
--			// volume and changes the reclaim policy to Delete.
--			// PV controller should delete the PV even though the underlying volume
--			// is already deleted.
--			e2eskipper.SkipUnlessProviderIs("gce", "gke", "aws")
--			ginkgo.By("creating PD")
--			diskName, err := e2epv.CreatePDWithRetry()
--			framework.ExpectNoError(err)
--
--			ginkgo.By("creating PV")
--			pv := e2epv.MakePersistentVolume(e2epv.PersistentVolumeConfig{
--				NamePrefix: "volume-idempotent-delete-",
--				// Use Retain to keep the PV, the test will change it to Delete
--				// when the time comes.
--				ReclaimPolicy: v1.PersistentVolumeReclaimRetain,
--				AccessModes: []v1.PersistentVolumeAccessMode{
--					v1.ReadWriteOnce,
--				},
--				Capacity: "1Gi",
--				// PV is bound to non-existing PVC, so it's reclaim policy is
--				// executed immediately
--				Prebind: &v1.PersistentVolumeClaim{
--					ObjectMeta: metav1.ObjectMeta{
--						Name:      "dummy-claim-name",
--						Namespace: ns,
--						UID:       types.UID("01234567890"),
--					},
--				},
--			})
--			switch framework.TestContext.Provider {
--			case "aws":
--				pv.Spec.PersistentVolumeSource = v1.PersistentVolumeSource{
--					AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
--						VolumeID: diskName,
--					},
--				}
--			case "gce", "gke":
--				pv.Spec.PersistentVolumeSource = v1.PersistentVolumeSource{
--					GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
--						PDName: diskName,
--					},
--				}
--			}
--			pv, err = c.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{})
--			framework.ExpectNoError(err)
--
--			ginkgo.By("waiting for the PV to get Released")
--			err = e2epv.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 2*time.Second, e2epv.PVReclaimingTimeout)
--			framework.ExpectNoError(err)
--
--			ginkgo.By("deleting the PD")
--			err = e2epv.DeletePVSource(&pv.Spec.PersistentVolumeSource)
--			framework.ExpectNoError(err)
--
--			ginkgo.By("changing the PV reclaim policy")
--			pv, err = c.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{})
--			framework.ExpectNoError(err)
--			pv.Spec.PersistentVolumeReclaimPolicy = v1.PersistentVolumeReclaimDelete
--			pv, err = c.CoreV1().PersistentVolumes().Update(context.TODO(), pv, metav1.UpdateOptions{})
--			framework.ExpectNoError(err)
--
--			ginkgo.By("waiting for the PV to get deleted")
--			err = e2epv.WaitForPersistentVolumeDeleted(c, pv.Name, 5*time.Second, e2epv.PVDeletingTimeout)
--			framework.ExpectNoError(err)
--		})
--	})
--
- 	ginkgo.Describe("DynamicProvisioner External", func() {
- 		ginkgo.It("should let an external dynamic provisioner create and delete persistent volumes [Slow]", func() {
- 			// external dynamic provisioner pods need additional permissions provided by the
-diff --git a/test/e2e/storage/volume_provisioning_providers.go b/test/e2e/storage/volume_provisioning_providers.go
-new file mode 100644
-index 00000000000..932c644af7a
---- /dev/null
-+++ b/test/e2e/storage/volume_provisioning_providers.go
-@@ -0,0 +1,577 @@
-+// +build !providerless
-+
-+/*
-+Copyright 2016 The Kubernetes Authors.
-+
-+Licensed under the Apache License, Version 2.0 (the "License");
-+you may not use this file except in compliance with the License.
-+You may obtain a copy of the License at
-+
-+    http://www.apache.org/licenses/LICENSE-2.0
-+
-+Unless required by applicable law or agreed to in writing, software
-+distributed under the License is distributed on an "AS IS" BASIS,
-+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-+See the License for the specific language governing permissions and
-+limitations under the License.
-+*/
-+
-+package storage
-+
-+import (
-+	"context"
-+	"fmt"
-+	"strings"
-+	"time"
-+
-+	"github.com/aws/aws-sdk-go/aws"
-+	"github.com/aws/aws-sdk-go/aws/session"
-+	"github.com/aws/aws-sdk-go/service/ec2"
-+	"github.com/onsi/ginkgo"
-+	"github.com/onsi/gomega"
-+
-+	v1 "k8s.io/api/core/v1"
-+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-+	"k8s.io/apimachinery/pkg/types"
-+	"k8s.io/apimachinery/pkg/util/sets"
-+	clientset "k8s.io/client-go/kubernetes"
-+	"k8s.io/kubernetes/test/e2e/framework"
-+	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
-+	"k8s.io/kubernetes/test/e2e/framework/providers/gce"
-+	e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
-+	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
-+	"k8s.io/kubernetes/test/e2e/storage/testsuites"
-+	"k8s.io/kubernetes/test/e2e/storage/utils"
-+)
-+
-+// checkAWSEBS checks properties of an AWS EBS. Test framework does not
-+// instantiate full AWS provider, therefore we need use ec2 API directly.
-+func checkAWSEBS(volume *v1.PersistentVolume, volumeType string, encrypted bool) error {
-+	diskName := volume.Spec.AWSElasticBlockStore.VolumeID
-+
-+	var client *ec2.EC2
-+
-+	tokens := strings.Split(diskName, "/")
-+	volumeID := tokens[len(tokens)-1]
-+
-+	zone := framework.TestContext.CloudConfig.Zone
-+
-+	awsSession, err := session.NewSession()
-+	if err != nil {
-+		return fmt.Errorf("error creating session: %v", err)
-+	}
-+
-+	if len(zone) > 0 {
-+		region := zone[:len(zone)-1]
-+		cfg := aws.Config{Region: &region}
-+		framework.Logf("using region %s", region)
-+		client = ec2.New(awsSession, &cfg)
-+	} else {
-+		framework.Logf("no region configured")
-+		client = ec2.New(awsSession)
-+	}
-+
-+	request := &ec2.DescribeVolumesInput{
-+		VolumeIds: []*string{&volumeID},
-+	}
-+	info, err := client.DescribeVolumes(request)
-+	if err != nil {
-+		return fmt.Errorf("error querying ec2 for volume %q: %v", volumeID, err)
-+	}
-+	if len(info.Volumes) == 0 {
-+		return fmt.Errorf("no volumes found for volume %q", volumeID)
-+	}
-+	if len(info.Volumes) > 1 {
-+		return fmt.Errorf("multiple volumes found for volume %q", volumeID)
-+	}
-+
-+	awsVolume := info.Volumes[0]
-+	if awsVolume.VolumeType == nil {
-+		return fmt.Errorf("expected volume type %q, got nil", volumeType)
-+	}
-+	if *awsVolume.VolumeType != volumeType {
-+		return fmt.Errorf("expected volume type %q, got %q", volumeType, *awsVolume.VolumeType)
-+	}
-+	if encrypted && awsVolume.Encrypted == nil {
-+		return fmt.Errorf("expected encrypted volume, got no encryption")
-+	}
-+	if encrypted && !*awsVolume.Encrypted {
-+		return fmt.Errorf("expected encrypted volume, got %v", *awsVolume.Encrypted)
-+	}
-+	return nil
-+}
-+
-+func checkGCEPD(volume *v1.PersistentVolume, volumeType string) error {
-+	cloud, err := gce.GetGCECloud()
-+	if err != nil {
-+		return err
-+	}
-+	diskName := volume.Spec.GCEPersistentDisk.PDName
-+	disk, err := cloud.GetDiskByNameUnknownZone(diskName)
-+	if err != nil {
-+		return err
-+	}
-+
-+	if !strings.HasSuffix(disk.Type, volumeType) {
-+		return fmt.Errorf("unexpected disk type %q, expected suffix %q", disk.Type, volumeType)
-+	}
-+	return nil
-+}
-+
-+var _ = utils.SIGDescribe("Dynamic Provisioning with cloud providers", func() {
-+	f := framework.NewDefaultFramework("volume-provisioning")
-+
-+	// filled in BeforeEach
-+	var c clientset.Interface
-+	var ns string
-+
-+	ginkgo.BeforeEach(func() {
-+		c = f.ClientSet
-+		ns = f.Namespace.Name
-+	})
-+
-+	ginkgo.Describe("DynamicProvisioner [Slow]", func() {
-+		ginkgo.It("should provision storage with different parameters", func() {
-+
-+			// This test checks that dynamic provisioning can provision a volume
-+			// that can be used to persist data among pods.
-+			tests := []testsuites.StorageClassTest{
-+				// GCE/GKE
-+				{
-+					Name:           "SSD PD on GCE/GKE",
-+					CloudProviders: []string{"gce", "gke"},
-+					Provisioner:    "kubernetes.io/gce-pd",
-+					Parameters: map[string]string{
-+						"type": "pd-ssd",
-+						"zone": getRandomClusterZone(c),
-+					},
-+					ClaimSize:    "1.5Gi",
-+					ExpectedSize: "2Gi",
-+					PvCheck: func(claim *v1.PersistentVolumeClaim) {
-+						volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
-+						gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
-+
-+						err := checkGCEPD(volume, "pd-ssd")
-+						framework.ExpectNoError(err, "checkGCEPD pd-ssd")
-+					},
-+				},
-+				{
-+					Name:           "HDD PD on GCE/GKE",
-+					CloudProviders: []string{"gce", "gke"},
-+					Provisioner:    "kubernetes.io/gce-pd",
-+					Parameters: map[string]string{
-+						"type": "pd-standard",
-+					},
-+					ClaimSize:    "1.5Gi",
-+					ExpectedSize: "2Gi",
-+					PvCheck: func(claim *v1.PersistentVolumeClaim) {
-+						volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
-+						gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
-+
-+						err := checkGCEPD(volume, "pd-standard")
-+						framework.ExpectNoError(err, "checkGCEPD pd-standard")
-+					},
-+				},
-+				// AWS
-+				{
-+					Name:           "gp2 EBS on AWS",
-+					CloudProviders: []string{"aws"},
-+					Provisioner:    "kubernetes.io/aws-ebs",
-+					Parameters: map[string]string{
-+						"type": "gp2",
-+						"zone": getRandomClusterZone(c),
-+					},
-+					ClaimSize:    "1.5Gi",
-+					ExpectedSize: "2Gi",
-+					PvCheck: func(claim *v1.PersistentVolumeClaim) {
-+						volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
-+						gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
-+
-+						err := checkAWSEBS(volume, "gp2", false)
-+						framework.ExpectNoError(err, "checkAWSEBS gp2")
-+					},
-+				},
-+				{
-+					Name:           "io1 EBS on AWS",
-+					CloudProviders: []string{"aws"},
-+					Provisioner:    "kubernetes.io/aws-ebs",
-+					Parameters: map[string]string{
-+						"type":      "io1",
-+						"iopsPerGB": "50",
-+					},
-+					ClaimSize:    "3.5Gi",
-+					ExpectedSize: "4Gi", // 4 GiB is minimum for io1
-+					PvCheck: func(claim *v1.PersistentVolumeClaim) {
-+						volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
-+						gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
-+
-+						err := checkAWSEBS(volume, "io1", false)
-+						framework.ExpectNoError(err, "checkAWSEBS io1")
-+					},
-+				},
-+				{
-+					Name:           "sc1 EBS on AWS",
-+					CloudProviders: []string{"aws"},
-+					Provisioner:    "kubernetes.io/aws-ebs",
-+					Parameters: map[string]string{
-+						"type": "sc1",
-+					},
-+					ClaimSize:    "500Gi", // minimum for sc1
-+					ExpectedSize: "500Gi",
-+					PvCheck: func(claim *v1.PersistentVolumeClaim) {
-+						volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
-+						gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
-+
-+						err := checkAWSEBS(volume, "sc1", false)
-+						framework.ExpectNoError(err, "checkAWSEBS sc1")
-+					},
-+				},
-+				{
-+					Name:           "st1 EBS on AWS",
-+					CloudProviders: []string{"aws"},
-+					Provisioner:    "kubernetes.io/aws-ebs",
-+					Parameters: map[string]string{
-+						"type": "st1",
-+					},
-+					ClaimSize:    "500Gi", // minimum for st1
-+					ExpectedSize: "500Gi",
-+					PvCheck: func(claim *v1.PersistentVolumeClaim) {
-+						volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
-+						gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
-+
-+						err := checkAWSEBS(volume, "st1", false)
-+						framework.ExpectNoError(err, "checkAWSEBS st1")
-+					},
-+				},
-+				{
-+					Name:           "encrypted EBS on AWS",
-+					CloudProviders: []string{"aws"},
-+					Provisioner:    "kubernetes.io/aws-ebs",
-+					Parameters: map[string]string{
-+						"encrypted": "true",
-+					},
-+					ClaimSize:    "1Gi",
-+					ExpectedSize: "1Gi",
-+					PvCheck: func(claim *v1.PersistentVolumeClaim) {
-+						volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
-+						gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
-+
-+						err := checkAWSEBS(volume, "gp2", true)
-+						framework.ExpectNoError(err, "checkAWSEBS gp2 encrypted")
-+					},
-+				},
-+				// OpenStack generic tests (works on all OpenStack deployments)
-+				{
-+					Name:           "generic Cinder volume on OpenStack",
-+					CloudProviders: []string{"openstack"},
-+					Provisioner:    "kubernetes.io/cinder",
-+					Parameters:     map[string]string{},
-+					ClaimSize:      "1.5Gi",
-+					ExpectedSize:   "2Gi",
-+					PvCheck: func(claim *v1.PersistentVolumeClaim) {
-+						testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
-+					},
-+				},
-+				{
-+					Name:           "Cinder volume with empty volume type and zone on OpenStack",
-+					CloudProviders: []string{"openstack"},
-+					Provisioner:    "kubernetes.io/cinder",
-+					Parameters: map[string]string{
-+						"type":         "",
-+						"availability": "",
-+					},
-+					ClaimSize:    "1.5Gi",
-+					ExpectedSize: "2Gi",
-+					PvCheck: func(claim *v1.PersistentVolumeClaim) {
-+						testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
-+					},
-+				},
-+				// vSphere generic test
-+				{
-+					Name:           "generic vSphere volume",
-+					CloudProviders: []string{"vsphere"},
-+					Provisioner:    "kubernetes.io/vsphere-volume",
-+					Parameters:     map[string]string{},
-+					ClaimSize:      "1.5Gi",
-+					ExpectedSize:   "1.5Gi",
-+					PvCheck: func(claim *v1.PersistentVolumeClaim) {
-+						testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
-+					},
-+				},
-+				// Azure
-+				{
-+					Name:           "Azure disk volume with empty sku and location",
-+					CloudProviders: []string{"azure"},
-+					Provisioner:    "kubernetes.io/azure-disk",
-+					Parameters:     map[string]string{},
-+					ClaimSize:      "1Gi",
-+					ExpectedSize:   "1Gi",
-+					PvCheck: func(claim *v1.PersistentVolumeClaim) {
-+						testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
-+					},
-+				},
-+			}
-+
-+			var betaTest *testsuites.StorageClassTest
-+			for i, t := range tests {
-+				// Beware of clojure, use local variables instead of those from
-+				// outer scope
-+				test := t
-+
-+				if !framework.ProviderIs(test.CloudProviders...) {
-+					framework.Logf("Skipping %q: cloud providers is not %v", test.Name, test.CloudProviders)
-+					continue
-+				}
-+
-+				// Remember the last supported test for subsequent test of beta API
-+				betaTest = &test
-+
-+				ginkgo.By("Testing " + test.Name)
-+				suffix := fmt.Sprintf("%d", i)
-+				test.Client = c
-+				test.Class = newStorageClass(test, ns, suffix)
-+				test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
-+					ClaimSize:        test.ClaimSize,
-+					StorageClassName: &test.Class.Name,
-+					VolumeMode:       &test.VolumeMode,
-+				}, ns)
-+				test.TestDynamicProvisioning()
-+			}
-+
-+			// Run the last test with storage.k8s.io/v1beta1 on pvc
-+			if betaTest != nil {
-+				ginkgo.By("Testing " + betaTest.Name + " with beta volume provisioning")
-+				class := newBetaStorageClass(*betaTest, "beta")
-+				// we need to create the class manually, testDynamicProvisioning does not accept beta class
-+				class, err := c.StorageV1beta1().StorageClasses().Create(context.TODO(), class, metav1.CreateOptions{})
-+				framework.ExpectNoError(err)
-+				defer deleteStorageClass(c, class.Name)
-+
-+				betaTest.Client = c
-+				betaTest.Class = nil
-+				betaTest.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
-+					ClaimSize:        betaTest.ClaimSize,
-+					StorageClassName: &class.Name,
-+					VolumeMode:       &betaTest.VolumeMode,
-+				}, ns)
-+				betaTest.Claim.Spec.StorageClassName = &(class.Name)
-+				(*betaTest).TestDynamicProvisioning()
-+			}
-+		})
-+
-+		ginkgo.It("should provision storage with non-default reclaim policy Retain", func() {
-+			e2eskipper.SkipUnlessProviderIs("gce", "gke")
-+
-+			test := testsuites.StorageClassTest{
-+				Client:         c,
-+				Name:           "HDD PD on GCE/GKE",
-+				CloudProviders: []string{"gce", "gke"},
-+				Provisioner:    "kubernetes.io/gce-pd",
-+				Parameters: map[string]string{
-+					"type": "pd-standard",
-+				},
-+				ClaimSize:    "1Gi",
-+				ExpectedSize: "1Gi",
-+				PvCheck: func(claim *v1.PersistentVolumeClaim) {
-+					volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
-+					gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
-+
-+					err := checkGCEPD(volume, "pd-standard")
-+					framework.ExpectNoError(err, "checkGCEPD")
-+				},
-+			}
-+			test.Class = newStorageClass(test, ns, "reclaimpolicy")
-+			retain := v1.PersistentVolumeReclaimRetain
-+			test.Class.ReclaimPolicy = &retain
-+			test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
-+				ClaimSize:        test.ClaimSize,
-+				StorageClassName: &test.Class.Name,
-+				VolumeMode:       &test.VolumeMode,
-+			}, ns)
-+			pv := test.TestDynamicProvisioning()
-+
-+			ginkgo.By(fmt.Sprintf("waiting for the provisioned PV %q to enter phase %s", pv.Name, v1.VolumeReleased))
-+			framework.ExpectNoError(e2epv.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 1*time.Second, 30*time.Second))
-+
-+			ginkgo.By(fmt.Sprintf("deleting the storage asset backing the PV %q", pv.Name))
-+			framework.ExpectNoError(e2epv.DeletePDWithRetry(pv.Spec.GCEPersistentDisk.PDName))
-+
-+			ginkgo.By(fmt.Sprintf("deleting the PV %q", pv.Name))
-+			framework.ExpectNoError(e2epv.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name)
-+			framework.ExpectNoError(e2epv.WaitForPersistentVolumeDeleted(c, pv.Name, 1*time.Second, 30*time.Second))
-+		})
-+
-+		ginkgo.It("should not provision a volume in an unmanaged GCE zone.", func() {
-+			e2eskipper.SkipUnlessProviderIs("gce", "gke")
-+			var suffix string = "unmananged"
-+
-+			ginkgo.By("Discovering an unmanaged zone")
-+			allZones := sets.NewString() // all zones in the project
-+
-+			gceCloud, err := gce.GetGCECloud()
-+			framework.ExpectNoError(err)
-+
-+			// Get all k8s managed zones (same as zones with nodes in them for test)
-+			managedZones, err := gceCloud.GetAllZonesFromCloudProvider()
-+			framework.ExpectNoError(err)
-+
-+			// Get a list of all zones in the project
-+			zones, err := gceCloud.ComputeServices().GA.Zones.List(framework.TestContext.CloudConfig.ProjectID).Do()
-+			framework.ExpectNoError(err)
-+			for _, z := range zones.Items {
-+				allZones.Insert(z.Name)
-+			}
-+
-+			// Get the subset of zones not managed by k8s
-+			var unmanagedZone string
-+			var popped bool
-+			unmanagedZones := allZones.Difference(managedZones)
-+			// And select one of them at random.
-+			if unmanagedZone, popped = unmanagedZones.PopAny(); !popped {
-+				e2eskipper.Skipf("No unmanaged zones found.")
-+			}
-+
-+			ginkgo.By("Creating a StorageClass for the unmanaged zone")
-+			test := testsuites.StorageClassTest{
-+				Name:        "unmanaged_zone",
-+				Provisioner: "kubernetes.io/gce-pd",
-+				Parameters:  map[string]string{"zone": unmanagedZone},
-+				ClaimSize:   "1Gi",
-+			}
-+			sc := newStorageClass(test, ns, suffix)
-+			sc, err = c.StorageV1().StorageClasses().Create(context.TODO(), sc, metav1.CreateOptions{})
-+			framework.ExpectNoError(err)
-+			defer deleteStorageClass(c, sc.Name)
-+
-+			ginkgo.By("Creating a claim and expecting it to timeout")
-+			pvc := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
-+				ClaimSize:        test.ClaimSize,
-+				StorageClassName: &sc.Name,
-+				VolumeMode:       &test.VolumeMode,
-+			}, ns)
-+			pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), pvc, metav1.CreateOptions{})
-+			framework.ExpectNoError(err)
-+			defer func() {
-+				framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)
-+			}()
-+
-+			// The claim should timeout phase:Pending
-+			err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 2*time.Second, framework.ClaimProvisionShortTimeout)
-+			framework.ExpectError(err)
-+			framework.Logf(err.Error())
-+		})
-+
-+		ginkgo.It("should test that deleting a claim before the volume is provisioned deletes the volume.", func() {
-+			// This case tests for the regressions of a bug fixed by PR #21268
-+			// REGRESSION: Deleting the PVC before the PV is provisioned can result in the PV
-+			// not being deleted.
-+			// NOTE:  Polls until no PVs are detected, times out at 5 minutes.
-+
-+			e2eskipper.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure")
-+
-+			const raceAttempts int = 100
-+			var residualPVs []*v1.PersistentVolume
-+			ginkgo.By(fmt.Sprintf("Creating and deleting PersistentVolumeClaims %d times", raceAttempts))
-+			test := testsuites.StorageClassTest{
-+				Name:        "deletion race",
-+				Provisioner: "", // Use a native one based on current cloud provider
-+				ClaimSize:   "1Gi",
-+			}
-+
-+			class := newStorageClass(test, ns, "race")
-+			class, err := c.StorageV1().StorageClasses().Create(context.TODO(), class, metav1.CreateOptions{})
-+			framework.ExpectNoError(err)
-+			defer deleteStorageClass(c, class.Name)
-+
-+			// To increase chance of detection, attempt multiple iterations
-+			for i := 0; i < raceAttempts; i++ {
-+				prefix := fmt.Sprintf("race-%d", i)
-+				claim := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
-+					NamePrefix:       prefix,
-+					ClaimSize:        test.ClaimSize,
-+					StorageClassName: &class.Name,
-+					VolumeMode:       &test.VolumeMode,
-+				}, ns)
-+				tmpClaim, err := e2epv.CreatePVC(c, ns, claim)
-+				framework.ExpectNoError(err)
-+				framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, tmpClaim.Name, ns))
-+			}
-+
-+			ginkgo.By(fmt.Sprintf("Checking for residual PersistentVolumes associated with StorageClass %s", class.Name))
-+			residualPVs, err = waitForProvisionedVolumesDeleted(c, class.Name)
-+			// Cleanup the test resources before breaking
-+			defer deleteProvisionedVolumesAndDisks(c, residualPVs)
-+			framework.ExpectNoError(err, "PersistentVolumes were not deleted as expected. %d remain", len(residualPVs))
-+
-+			framework.Logf("0 PersistentVolumes remain.")
-+		})
-+
-+		ginkgo.It("deletion should be idempotent", func() {
-+			// This test ensures that deletion of a volume is idempotent.
-+			// It creates a PV with Retain policy, deletes underlying AWS / GCE
-+			// volume and changes the reclaim policy to Delete.
-+			// PV controller should delete the PV even though the underlying volume
-+			// is already deleted.
-+			e2eskipper.SkipUnlessProviderIs("gce", "gke", "aws")
-+			ginkgo.By("creating PD")
-+			diskName, err := e2epv.CreatePDWithRetry()
-+			framework.ExpectNoError(err)
-+
-+			ginkgo.By("creating PV")
-+			pv := e2epv.MakePersistentVolume(e2epv.PersistentVolumeConfig{
-+				NamePrefix: "volume-idempotent-delete-",
-+				// Use Retain to keep the PV, the test will change it to Delete
-+				// when the time comes.
-+				ReclaimPolicy: v1.PersistentVolumeReclaimRetain,
-+				AccessModes: []v1.PersistentVolumeAccessMode{
-+					v1.ReadWriteOnce,
-+				},
-+				Capacity: "1Gi",
-+				// PV is bound to non-existing PVC, so it's reclaim policy is
-+				// executed immediately
-+				Prebind: &v1.PersistentVolumeClaim{
-+					ObjectMeta: metav1.ObjectMeta{
-+						Name:      "dummy-claim-name",
-+						Namespace: ns,
-+						UID:       types.UID("01234567890"),
-+					},
-+				},
-+			})
-+			switch framework.TestContext.Provider {
-+			case "aws":
-+				pv.Spec.PersistentVolumeSource = v1.PersistentVolumeSource{
-+					AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
-+						VolumeID: diskName,
-+					},
-+				}
-+			case "gce", "gke":
-+				pv.Spec.PersistentVolumeSource = v1.PersistentVolumeSource{
-+					GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
-+						PDName: diskName,
-+					},
-+				}
-+			}
-+			pv, err = c.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{})
-+			framework.ExpectNoError(err)
-+
-+			ginkgo.By("waiting for the PV to get Released")
-+			err = e2epv.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 2*time.Second, e2epv.PVReclaimingTimeout)
-+			framework.ExpectNoError(err)
-+
-+			ginkgo.By("deleting the PD")
-+			err = e2epv.DeletePVSource(&pv.Spec.PersistentVolumeSource)
-+			framework.ExpectNoError(err)
-+
-+			ginkgo.By("changing the PV reclaim policy")
-+			pv, err = c.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{})
-+			framework.ExpectNoError(err)
-+			pv.Spec.PersistentVolumeReclaimPolicy = v1.PersistentVolumeReclaimDelete
-+			pv, err = c.CoreV1().PersistentVolumes().Update(context.TODO(), pv, metav1.UpdateOptions{})
-+			framework.ExpectNoError(err)
-+
-+			ginkgo.By("waiting for the PV to get deleted")
-+			err = e2epv.WaitForPersistentVolumeDeleted(c, pv.Name, 5*time.Second, e2epv.PVDeletingTimeout)
-+			framework.ExpectNoError(err)
-+		})
-+	})
-+})
-diff --git a/test/e2e/upgrades/nvidia-gpu.go b/test/e2e/upgrades/nvidia-gpu.go
-index cf3b8c0cda3..30515197ef7 100644
---- a/test/e2e/upgrades/nvidia-gpu.go
-+++ b/test/e2e/upgrades/nvidia-gpu.go
-@@ -1,3 +1,5 @@
-+// +build !providerless
-+
- /*
- Copyright 2018 The Kubernetes Authors.
- 
--- 
-2.26.2
-
diff --git a/third_party/go/patches/k8s-fix-paths.patch b/third_party/go/patches/k8s-fix-paths.patch
deleted file mode 100644
index ba39a43..0000000
--- a/third_party/go/patches/k8s-fix-paths.patch
+++ /dev/null
@@ -1,56 +0,0 @@
-Copyright 2020 The Monogon Project Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-  http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-
-From db9ab831cd17f9be540917a77bbb3e0551f4fb4f Mon Sep 17 00:00:00 2001
-From: Lorenz Brun <lorenz@brun.one>
-Date: Mon, 25 Jan 2021 15:08:20 +0100
-Subject: [PATCH] Make DeviceManager socket relative to RootDir
-
----
- pkg/kubelet/cm/container_manager_linux.go | 2 +-
- pkg/kubelet/cm/devicemanager/manager.go   | 4 ++--
- 2 files changed, 3 insertions(+), 3 deletions(-)
-
-diff --git a/pkg/kubelet/cm/container_manager_linux.go b/pkg/kubelet/cm/container_manager_linux.go
-index 57110ed7745..15cf010074e 100644
---- a/pkg/kubelet/cm/container_manager_linux.go
-+++ b/pkg/kubelet/cm/container_manager_linux.go
-@@ -315,7 +315,7 @@ func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.I
- 
- 	klog.Infof("Creating device plugin manager: %t", devicePluginEnabled)
- 	if devicePluginEnabled {
--		cm.deviceManager, err = devicemanager.NewManagerImpl(numaNodeInfo, cm.topologyManager)
-+		cm.deviceManager, err = devicemanager.NewManagerImpl(numaNodeInfo, cm.topologyManager, nodeConfig.KubeletRootDir)
- 		cm.topologyManager.AddHintProvider(cm.deviceManager)
- 	} else {
- 		cm.deviceManager, err = devicemanager.NewManagerStub()
-diff --git a/pkg/kubelet/cm/devicemanager/manager.go b/pkg/kubelet/cm/devicemanager/manager.go
-index 5d1925f9458..bfff3c50fcc 100644
---- a/pkg/kubelet/cm/devicemanager/manager.go
-+++ b/pkg/kubelet/cm/devicemanager/manager.go
-@@ -124,8 +124,8 @@ func (s *sourcesReadyStub) AddSource(source string) {}
- func (s *sourcesReadyStub) AllReady() bool          { return true }
- 
- // NewManagerImpl creates a new manager.
--func NewManagerImpl(numaNodeInfo cputopology.NUMANodeInfo, topologyAffinityStore topologymanager.Store) (*ManagerImpl, error) {
--	return newManagerImpl(pluginapi.KubeletSocket, numaNodeInfo, topologyAffinityStore)
-+func NewManagerImpl(numaNodeInfo cputopology.NUMANodeInfo, topologyAffinityStore topologymanager.Store, kubeletRootDir string) (*ManagerImpl, error) {
-+	return newManagerImpl(filepath.Join(kubeletRootDir, "device-plugins/kubelet.sock"), numaNodeInfo, topologyAffinityStore)
- }
- 
- func newManagerImpl(socketPath string, numaNodeInfo cputopology.NUMANodeInfo, topologyAffinityStore topologymanager.Store) (*ManagerImpl, error) {
--- 
-2.25.1
-
diff --git a/third_party/go/patches/k8s-infra-bzl4-compat.patch b/third_party/go/patches/k8s-infra-bzl4-compat.patch
deleted file mode 100644
index 4be0fe8..0000000
--- a/third_party/go/patches/k8s-infra-bzl4-compat.patch
+++ /dev/null
@@ -1,82 +0,0 @@
-Copyright 2020 The Monogon Project Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-  http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-
-From 6313aef65ed37aa971737058af391f5be1ae976c Mon Sep 17 00:00:00 2001
-From: Lorenz Brun <lorenz@nexantic.com>
-Date: Wed, 3 Feb 2021 18:11:03 +0100
-Subject: [PATCH] Don't use run_shell with list as cmd
-
-Going upstream at https://github.com/kubernetes/repo-infra/pull/225
----
- defs/go.bzl | 26 ++++++++++++++------------
- 1 file changed, 14 insertions(+), 12 deletions(-)
-
-diff --git a/defs/go.bzl b/defs/go.bzl
-index 21cffdd..f4617e1 100644
---- a/defs/go.bzl
-+++ b/defs/go.bzl
-@@ -63,16 +63,7 @@ def _go_genrule_impl(ctx):
- 
-     srcs = [src for srcs in ctx.attr.srcs for src in srcs.files.to_list()]
- 
--    deps = depset(
--        gopath_files + srcs,
--        transitive =
--            # tools
--            [dep.files for dep in ctx.attr.tools] +
--            # go toolchain
--            [depset(go.sdk.libs + go.sdk.srcs + go.sdk.tools + [go.sdk.go])],
--    )
--
--    _, cmd, _ = ctx.resolve_command(
-+    inputs, cmd, input_manifests = ctx.resolve_command(
-         command = ctx.attr.cmd,
-         attribute = "cmd",
-         expand_locations = True,
-@@ -83,6 +74,15 @@ def _go_genrule_impl(ctx):
-         tools = ctx.attr.tools,
-     )
- 
-+    deps = depset(
-+        gopath_files + srcs + inputs,
-+        transitive =
-+            # tools
-+            [dep.files for dep in ctx.attr.tools] +
-+            # go toolchain
-+            [depset(go.sdk.libs + go.sdk.srcs + go.sdk.tools + [go.sdk.go])],
-+    )
-+
-     env = dict()
-     env.update(ctx.configuration.default_shell_env)
-     env.update(go.env)
-@@ -92,11 +92,13 @@ def _go_genrule_impl(ctx):
-         "GOROOT": paths.dirname(go.sdk.root_file.path),
-     })
- 
--    ctx.actions.run_shell(
-+    ctx.actions.run(
-         inputs = deps,
-         outputs = ctx.outputs.outs,
-         env = env,
--        command = cmd,
-+        executable = cmd[0],
-+        arguments = cmd[1:],
-+        input_manifests = input_manifests,
-         progress_message = "%s %s" % (ctx.attr.message, ctx),
-         mnemonic = "GoGenrule",
-     )
--- 
-2.25.1
-
diff --git a/third_party/go/patches/k8s-infra-fix-go116.patch b/third_party/go/patches/k8s-infra-fix-go116.patch
deleted file mode 100644
index bf35938..0000000
--- a/third_party/go/patches/k8s-infra-fix-go116.patch
+++ /dev/null
@@ -1,23 +0,0 @@
-From 5cf776eb8f872f78cf7325c39fbfe1777f702407 Mon Sep 17 00:00:00 2001
-From: Mike Danese <mikedanese@google.com>
-Date: Fri, 26 Feb 2021 13:39:16 -0800
-Subject: [PATCH] fix go_genrule for 1.16
-
----
- defs/go.bzl | 3 +++
- 1 file changed, 3 insertions(+)
-
-diff --git a/defs/go.bzl b/defs/go.bzl
-index f4617e1..8873253 100644
---- a/defs/go.bzl
-+++ b/defs/go.bzl
-@@ -90,6 +90,9 @@ def _go_genrule_impl(ctx):
-         "PATH": ctx.configuration.host_path_separator.join(["/usr/local/bin", "/bin", "/usr/bin"]),
-         "GOPATH": paths.dirname(gopath_placeholder.path),
-         "GOROOT": paths.dirname(go.sdk.root_file.path),
-+        # hack to tie us over until we fix this to use modules or stop using
-+        # it.
-+        "GO111MODULE": "off",
-     })
- 
-     ctx.actions.run(
diff --git a/third_party/go/patches/k8s-jose-semver-fix.patch b/third_party/go/patches/k8s-jose-semver-fix.patch
new file mode 100644
index 0000000..423485a
--- /dev/null
+++ b/third_party/go/patches/k8s-jose-semver-fix.patch
@@ -0,0 +1,34 @@
+From b0b42e86e834a1d02fe83f7be3663d19f6a1ee80 Mon Sep 17 00:00:00 2001
+From: Lorenz Brun <lorenz@monogon.tech>
+Date: Thu, 17 Mar 2022 16:56:29 +0100
+Subject: [PATCH] Fix for semver breakage in go-jose
+
+---
+ pkg/serviceaccount/claims.go | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/pkg/serviceaccount/claims.go b/pkg/serviceaccount/claims.go
+index 1e1475c779f..06620f7a420 100644
+--- a/pkg/serviceaccount/claims.go
++++ b/pkg/serviceaccount/claims.go
+@@ -50,7 +50,7 @@ type kubernetes struct {
+ 	Svcacct   ref             `json:"serviceaccount,omitempty"`
+ 	Pod       *ref            `json:"pod,omitempty"`
+ 	Secret    *ref            `json:"secret,omitempty"`
+-	WarnAfter jwt.NumericDate `json:"warnafter,omitempty"`
++	WarnAfter *jwt.NumericDate `json:"warnafter,omitempty"`
+ }
+ 
+ type ref struct {
+@@ -198,7 +198,7 @@ func (v *validator) Validate(ctx context.Context, _ string, public *jwt.Claims,
+ 
+ 	// Check special 'warnafter' field for projected service account token transition.
+ 	warnafter := private.Kubernetes.WarnAfter
+-	if warnafter != 0 {
++	if warnafter != nil {
+ 		if nowTime.After(warnafter.Time()) {
+ 			secondsAfterWarn := nowTime.Unix() - warnafter.Time().Unix()
+ 			auditInfo := fmt.Sprintf("subject: %s, seconds after warning threshold: %d", public.Subject, secondsAfterWarn)
+-- 
+2.25.1
+
diff --git a/third_party/go/patches/k8s-kubernetes-build.patch b/third_party/go/patches/k8s-kubernetes-build.patch
deleted file mode 100644
index 7debfda..0000000
--- a/third_party/go/patches/k8s-kubernetes-build.patch
+++ /dev/null
@@ -1,108 +0,0 @@
-Copyright 2020 The Monogon Project Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-  http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-
-This patch updates BUILD files to reflect changes in Go sources. This only needs to be applied because Gazelle applies patches after BUILDfile generation.
-
-diff -ur io_k8s_kubernetes.orig/cmd/kubelet/app/plugins.go io_k8s_kubernetes/cmd/kubelet/app/plugins.go
---- io_k8s_kubernetes.orig/cmd/kubelet/app/BUILD 13:43:57.827669732 +0200
-+++ io_k8s_kubernetes/cmd/kubelet/app/BUILD 15:12:22.682316924 +0200
-@@ -45,8 +45,6 @@
-         "//pkg/capabilities:go_default_library",
-         "//pkg/cloudprovider/providers:go_default_library",
-         "//pkg/credentialprovider:go_default_library",
--        "//pkg/credentialprovider/aws:go_default_library",
--        "//pkg/credentialprovider/azure:go_default_library",
-         "//pkg/credentialprovider/gcp:go_default_library",
-         "//pkg/features:go_default_library",
-         "//pkg/kubelet:go_default_library",
-@@ -78,25 +76,13 @@
-         "//pkg/util/oom:go_default_library",
-         "//pkg/util/rlimit:go_default_library",
-         "//pkg/volume:go_default_library",
--        "//pkg/volume/cephfs:go_default_library",
-         "//pkg/volume/configmap:go_default_library",
-         "//pkg/volume/csi:go_default_library",
-         "//pkg/volume/downwardapi:go_default_library",
-         "//pkg/volume/emptydir:go_default_library",
--        "//pkg/volume/fc:go_default_library",
-         "//pkg/volume/flexvolume:go_default_library",
--        "//pkg/volume/flocker:go_default_library",
--        "//pkg/volume/git_repo:go_default_library",
--        "//pkg/volume/glusterfs:go_default_library",
-         "//pkg/volume/hostpath:go_default_library",
--        "//pkg/volume/iscsi:go_default_library",
-         "//pkg/volume/local:go_default_library",
--        "//pkg/volume/nfs:go_default_library",
--        "//pkg/volume/portworx:go_default_library",
-         "//pkg/volume/projected:go_default_library",
--        "//pkg/volume/quobyte:go_default_library",
--        "//pkg/volume/rbd:go_default_library",
--        "//pkg/volume/scaleio:go_default_library",
-         "//pkg/volume/secret:go_default_library",
--        "//pkg/volume/storageos:go_default_library",
-         "//pkg/volume/util/hostutil:go_default_library",
---- io_k8s_kubernetes.orig/cmd/kubelet/app/options/BUILD 13:43:57.827669732 +0200
-+++ io_k8s_kubernetes/cmd/kubelet/app/options/BUILD 15:12:22.682316924 +0200
-@@ -20,8 +20,6 @@
-     importpath = "k8s.io/kubernetes/cmd/kubelet/app/options",
-     deps = [
-         "//pkg/apis/core:go_default_library",
--        "//pkg/credentialprovider/azure:go_default_library",
--        "//pkg/credentialprovider/gcp:go_default_library",
-         "//pkg/features:go_default_library",
-         "//pkg/kubelet/apis:go_default_library",
-         "//pkg/kubelet/apis/config:go_default_library",
---- io_k8s_kubernetes.orig/pkg/kubelet/cadvisor/BUILD 13:43:57.827669732 +0200
-+++ io_k8s_kubernetes/pkg/kubelet/cadvisor/BUILD 15:12:22.682316924 +0200
-@@ -37,8 +37,6 @@
-             "@com_github_google_cadvisor//container/systemd/install:go_default_library",
-             "@com_github_google_cadvisor//fs:go_default_library",
-             "@com_github_google_cadvisor//manager:go_default_library",
--            "@com_github_google_cadvisor//utils/cloudinfo/aws:go_default_library",
--            "@com_github_google_cadvisor//utils/cloudinfo/azure:go_default_library",
-             "@com_github_google_cadvisor//utils/cloudinfo/gce:go_default_library",
-             "@com_github_google_cadvisor//utils/sysfs:go_default_library",
-             "@io_k8s_klog//:go_default_library",
-@@ -52,8 +50,6 @@
-             "@com_github_google_cadvisor//container/systemd/install:go_default_library",
-             "@com_github_google_cadvisor//fs:go_default_library",
-             "@com_github_google_cadvisor//manager:go_default_library",
--            "@com_github_google_cadvisor//utils/cloudinfo/aws:go_default_library",
--            "@com_github_google_cadvisor//utils/cloudinfo/azure:go_default_library",
-             "@com_github_google_cadvisor//utils/cloudinfo/gce:go_default_library",
-             "@com_github_google_cadvisor//utils/sysfs:go_default_library",
-             "@io_k8s_klog//:go_default_library",
---- io_k8s_kubernetes.orig/cmd/kube-controller-manager/app/BUILD 13:43:57.827669732 +0200
-+++ io_k8s_kubernetes/cmd/kube-controller-manager/app/BUILD 15:12:22.682316924 +0200
-@@ -90,19 +90,9 @@
-         "//pkg/volume:go_default_library",
-         "//pkg/volume/csi:go_default_library",
-         "//pkg/volume/csimigration:go_default_library",
--        "//pkg/volume/fc:go_default_library",
-         "//pkg/volume/flexvolume:go_default_library",
--        "//pkg/volume/flocker:go_default_library",
--        "//pkg/volume/glusterfs:go_default_library",
-         "//pkg/volume/hostpath:go_default_library",
--        "//pkg/volume/iscsi:go_default_library",
-         "//pkg/volume/local:go_default_library",
--        "//pkg/volume/nfs:go_default_library",
--        "//pkg/volume/portworx:go_default_library",
--        "//pkg/volume/quobyte:go_default_library",
--        "//pkg/volume/rbd:go_default_library",
--        "//pkg/volume/scaleio:go_default_library",
--        "//pkg/volume/storageos:go_default_library",
-         "//pkg/volume/util:go_default_library",
-         "@com_github_spf13_cobra//:go_default_library",
-         "@io_k8s_api//core/v1:go_default_library",
diff --git a/third_party/go/patches/k8s-kubernetes.patch b/third_party/go/patches/k8s-kubernetes.patch
deleted file mode 100644
index 0efaa37..0000000
--- a/third_party/go/patches/k8s-kubernetes.patch
+++ /dev/null
@@ -1,399 +0,0 @@
-Copyright 2020 The Monogon Project Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-  http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-
-This fixes OpenAPI codegen for when included from the Monogon workspace. It basically undoes vendorification.
-
-diff -ur io_k8s_kubernetes.orig/build/code_generation.bzl io_k8s_kubernetes/build/code_generation.bzl
---- io_k8s_kubernetes.orig/build/code_generation.bzl	2021-01-26 12:10:52.593725692 +0100
-+++ io_k8s_kubernetes/build/code_generation.bzl	2021-01-26 12:11:04.571786562 +0100
-@@ -27,6 +27,12 @@
-         ...
-     )
-     """
-+    if pkg.startswith('staging/src/k8s.io/'):
-+        parts = pkg.split('/', 4)
-+        project = parts[3]
-+        project = project.replace('-', '_')
-+        path = parts[4]
-+        return "@io_k8s_%s//%s:go_default_library" % (project, path)
-     return "//%s:go_default_library" % pkg
- 
- def go_pkg(pkg):
-@@ -42,6 +48,8 @@
-         ...
-     )
-     """
-+    if pkg.startswith('staging/src/'):
-+        return pkg[len('staging/src/'):]
-     for prefix in ["staging/src", "vendor"]:
-         if pkg.startswith(prefix):
-             return paths.relativize(pkg, prefix)
-@@ -49,8 +57,8 @@
- 
- def openapi_deps():
-     deps = [
--        "//vendor/github.com/go-openapi/spec:go_default_library",
--        "//vendor/k8s.io/kube-openapi/pkg/common:go_default_library",
-+        "@com_github_go_openapi_spec//:go_default_library",
-+        "@io_k8s_kube_openapi//pkg/common:go_default_library",
-     ]
-     deps.extend([bazel_go_library(pkg) for pkg in tags_values_pkgs["openapi-gen"]["true"]])
-     return deps
-@@ -76,7 +84,7 @@
-         # the generator must run from the repo root inside the generated GOPATH.
-         # All of bazel's $(location)s are relative to the original working directory, however.
-         cmd = " ".join([
--            "$(location //vendor/k8s.io/kube-openapi/cmd/openapi-gen)",
-+            "$(location @io_k8s_kube_openapi//cmd/openapi-gen)",
-             "--v 1",
-             "--logtostderr",
-             "--go-header-file $(location //" + openapi_vendor_prefix + "hack/boilerplate:boilerplate.generatego.txt)",
-@@ -88,6 +96,6 @@
-             "&& rm tmp_api_violations.report",
-         ]),
-         go_deps = openapi_deps(),
--        tools = ["//vendor/k8s.io/kube-openapi/cmd/openapi-gen"],
-+        tools = ["@io_k8s_kube_openapi//cmd/openapi-gen"],
-         message = "GenOpenAPI",
-     )
-diff -ur io_k8s_kubernetes.orig/cmd/kube-apiserver/app/options/globalflags.go io_k8s_kubernetes/cmd/kube-apiserver/app/options/globalflags.go
---- io_k8s_kubernetes.orig/cmd/kube-apiserver/app/options/globalflags.go	2021-01-26 12:10:52.605725751 +0100
-+++ io_k8s_kubernetes/cmd/kube-apiserver/app/options/globalflags.go	2021-01-26 12:11:04.572786567 +0100
-@@ -32,9 +32,6 @@
- func AddCustomGlobalFlags(fs *pflag.FlagSet) {
- 	// Lookup flags in global flag set and re-register the values with our flagset.
- 
--	// Adds flags from k8s.io/kubernetes/pkg/cloudprovider/providers.
--	registerLegacyGlobalFlags(fs)
--
- 	// Adds flags from k8s.io/apiserver/pkg/admission.
- 	globalflag.Register(fs, "default-not-ready-toleration-seconds")
- 	globalflag.Register(fs, "default-unreachable-toleration-seconds")
-diff -ur io_k8s_kubernetes.orig/cmd/kube-controller-manager/app/controllermanager.go io_k8s_kubernetes/cmd/kube-controller-manager/app/controllermanager.go
---- io_k8s_kubernetes.orig/cmd/kube-controller-manager/app/controllermanager.go	2021-01-26 12:10:52.605725751 +0100
-+++ io_k8s_kubernetes/cmd/kube-controller-manager/app/controllermanager.go	2021-01-26 12:11:04.572786567 +0100
-@@ -140,7 +140,6 @@
- 	namedFlagSets := s.Flags(KnownControllers(), ControllersDisabledByDefault.List())
- 	verflag.AddFlags(namedFlagSets.FlagSet("global"))
- 	globalflag.AddGlobalFlags(namedFlagSets.FlagSet("global"), cmd.Name())
--	registerLegacyGlobalFlags(namedFlagSets)
- 	for _, f := range namedFlagSets.FlagSets {
- 		fs.AddFlagSet(f)
- 	}
-Only in io_k8s_kubernetes/cmd/kube-controller-manager/app: controllermanager.go.orig
-diff -ur io_k8s_kubernetes.orig/cmd/kube-controller-manager/app/plugins.go io_k8s_kubernetes/cmd/kube-controller-manager/app/plugins.go
---- io_k8s_kubernetes.orig/cmd/kube-controller-manager/app/plugins.go	2021-01-26 12:10:52.606725757 +0100
-+++ io_k8s_kubernetes/cmd/kube-controller-manager/app/plugins.go	2021-01-26 12:11:04.572786567 +0100
-@@ -32,19 +32,9 @@
- 	// Volume plugins
- 	"k8s.io/kubernetes/pkg/volume"
- 	"k8s.io/kubernetes/pkg/volume/csi"
--	"k8s.io/kubernetes/pkg/volume/fc"
- 	"k8s.io/kubernetes/pkg/volume/flexvolume"
--	"k8s.io/kubernetes/pkg/volume/flocker"
--	"k8s.io/kubernetes/pkg/volume/glusterfs"
- 	"k8s.io/kubernetes/pkg/volume/hostpath"
--	"k8s.io/kubernetes/pkg/volume/iscsi"
- 	"k8s.io/kubernetes/pkg/volume/local"
--	"k8s.io/kubernetes/pkg/volume/nfs"
--	"k8s.io/kubernetes/pkg/volume/portworx"
--	"k8s.io/kubernetes/pkg/volume/quobyte"
--	"k8s.io/kubernetes/pkg/volume/rbd"
--	"k8s.io/kubernetes/pkg/volume/scaleio"
--	"k8s.io/kubernetes/pkg/volume/storageos"
- 	volumeutil "k8s.io/kubernetes/pkg/volume/util"
- 
- 	utilfeature "k8s.io/apiserver/pkg/util/feature"
-@@ -58,18 +48,7 @@
- // The list of plugins is manually compiled. This code and the plugin
- // initialization code for kubelet really, really need a through refactor.
- func ProbeAttachableVolumePlugins() ([]volume.VolumePlugin, error) {
--	var err error
- 	allPlugins := []volume.VolumePlugin{}
--	allPlugins, err = appendAttachableLegacyProviderVolumes(allPlugins, utilfeature.DefaultFeatureGate)
--	if err != nil {
--		return allPlugins, err
--	}
--	allPlugins = append(allPlugins, portworx.ProbeVolumePlugins()...)
--	allPlugins = append(allPlugins, scaleio.ProbeVolumePlugins()...)
--	allPlugins = append(allPlugins, storageos.ProbeVolumePlugins()...)
--	allPlugins = append(allPlugins, fc.ProbeVolumePlugins()...)
--	allPlugins = append(allPlugins, iscsi.ProbeVolumePlugins()...)
--	allPlugins = append(allPlugins, rbd.ProbeVolumePlugins()...)
- 	allPlugins = append(allPlugins, csi.ProbeVolumePlugins()...)
- 	return allPlugins, nil
- }
-@@ -83,18 +62,7 @@
- 
- // ProbeExpandableVolumePlugins returns volume plugins which are expandable
- func ProbeExpandableVolumePlugins(config persistentvolumeconfig.VolumeConfiguration) ([]volume.VolumePlugin, error) {
--	var err error
- 	allPlugins := []volume.VolumePlugin{}
--	allPlugins, err = appendExpandableLegacyProviderVolumes(allPlugins, utilfeature.DefaultFeatureGate)
--	if err != nil {
--		return allPlugins, err
--	}
--	allPlugins = append(allPlugins, portworx.ProbeVolumePlugins()...)
--	allPlugins = append(allPlugins, glusterfs.ProbeVolumePlugins()...)
--	allPlugins = append(allPlugins, rbd.ProbeVolumePlugins()...)
--	allPlugins = append(allPlugins, scaleio.ProbeVolumePlugins()...)
--	allPlugins = append(allPlugins, storageos.ProbeVolumePlugins()...)
--	allPlugins = append(allPlugins, fc.ProbeVolumePlugins()...)
- 	return allPlugins, nil
- }
- 
-@@ -124,30 +92,7 @@
- 	}
- 	allPlugins = append(allPlugins, hostpath.ProbeVolumePlugins(hostPathConfig)...)
- 
--	nfsConfig := volume.VolumeConfig{
--		RecyclerMinimumTimeout:   int(config.PersistentVolumeRecyclerConfiguration.MinimumTimeoutNFS),
--		RecyclerTimeoutIncrement: int(config.PersistentVolumeRecyclerConfiguration.IncrementTimeoutNFS),
--		RecyclerPodTemplate:      volume.NewPersistentVolumeRecyclerPodTemplate(),
--	}
--	if err := AttemptToLoadRecycler(config.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathNFS, &nfsConfig); err != nil {
--		klog.Fatalf("Could not create NFS recycler pod from file %s: %+v", config.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathNFS, err)
--	}
--	allPlugins = append(allPlugins, nfs.ProbeVolumePlugins(nfsConfig)...)
--	allPlugins = append(allPlugins, glusterfs.ProbeVolumePlugins()...)
--	// add rbd provisioner
--	allPlugins = append(allPlugins, rbd.ProbeVolumePlugins()...)
--	allPlugins = append(allPlugins, quobyte.ProbeVolumePlugins()...)
--	var err error
--	allPlugins, err = appendExpandableLegacyProviderVolumes(allPlugins, utilfeature.DefaultFeatureGate)
--	if err != nil {
--		return allPlugins, err
--	}
--
--	allPlugins = append(allPlugins, flocker.ProbeVolumePlugins()...)
--	allPlugins = append(allPlugins, portworx.ProbeVolumePlugins()...)
--	allPlugins = append(allPlugins, scaleio.ProbeVolumePlugins()...)
- 	allPlugins = append(allPlugins, local.ProbeVolumePlugins()...)
--	allPlugins = append(allPlugins, storageos.ProbeVolumePlugins()...)
- 
- 	if utilfeature.DefaultFeatureGate.Enabled(features.CSIInlineVolume) {
- 		allPlugins = append(allPlugins, csi.ProbeVolumePlugins()...)
-diff -ur io_k8s_kubernetes.orig/cmd/kubectl/BUILD io_k8s_kubernetes/cmd/kubectl/BUILD
---- io_k8s_kubernetes.orig/cmd/kubectl/BUILD	2021-01-26 12:10:52.616725807 +0100
-+++ io_k8s_kubernetes/cmd/kubectl/BUILD	2021-01-26 12:11:04.572786567 +0100
-@@ -3,7 +3,7 @@
-     "go_binary",
-     "go_library",
- )
--load("//staging/src/k8s.io/component-base/version:def.bzl", "version_x_defs")
-+load("@//third_party/go:kubernetes_version_def.bzl", "version_x_defs")
- 
- go_binary(
-     name = "kubectl",
-diff -ur io_k8s_kubernetes.orig/cmd/kubelet/app/options/globalflags.go io_k8s_kubernetes/cmd/kubelet/app/options/globalflags.go
---- io_k8s_kubernetes.orig/cmd/kubelet/app/options/globalflags.go	2021-01-26 12:10:52.617725812 +0100
-+++ io_k8s_kubernetes/cmd/kubelet/app/options/globalflags.go	2021-01-26 12:12:03.724087183 +0100
-@@ -28,10 +28,6 @@
- 	"k8s.io/component-base/logs"
- 	"k8s.io/component-base/version/verflag"
- 	"k8s.io/klog/v2"
--
--	// ensure libs have a chance to globally register their flags
--	_ "k8s.io/kubernetes/pkg/credentialprovider/azure"
--	_ "k8s.io/kubernetes/pkg/credentialprovider/gcp"
- )
- 
- // AddGlobalFlags explicitly registers flags that libraries (glog, verflag, etc.) register
-@@ -80,12 +76,8 @@
- 
- // addCredentialProviderFlags adds flags from k8s.io/kubernetes/pkg/credentialprovider
- func addCredentialProviderFlags(fs *pflag.FlagSet) {
--	// lookup flags in global flag set and re-register the values with our flagset
--	global := pflag.CommandLine
- 	local := pflag.NewFlagSet(os.Args[0], pflag.ExitOnError)
- 
--	addLegacyCloudProviderCredentialProviderFlags(global, local)
--
- 	fs.AddFlagSet(local)
- }
- 
-diff -ur io_k8s_kubernetes.orig/cmd/kubelet/app/plugins.go io_k8s_kubernetes/cmd/kubelet/app/plugins.go
---- io_k8s_kubernetes.orig/cmd/kubelet/app/plugins.go	2021-01-26 12:10:52.617725812 +0100
-+++ io_k8s_kubernetes/cmd/kubelet/app/plugins.go	2021-01-26 12:11:04.573786572 +0100
-@@ -19,8 +19,6 @@
- // This file exists to force the desired plugin implementations to be linked.
- import (
- 	// Credential providers
--	_ "k8s.io/kubernetes/pkg/credentialprovider/aws"
--	_ "k8s.io/kubernetes/pkg/credentialprovider/azure"
- 	_ "k8s.io/kubernetes/pkg/credentialprovider/gcp"
- 
- 	"k8s.io/component-base/featuregate"
-@@ -28,27 +26,15 @@
- 
- 	// Volume plugins
- 	"k8s.io/kubernetes/pkg/volume"
--	"k8s.io/kubernetes/pkg/volume/cephfs"
- 	"k8s.io/kubernetes/pkg/volume/configmap"
- 	"k8s.io/kubernetes/pkg/volume/csi"
- 	"k8s.io/kubernetes/pkg/volume/downwardapi"
- 	"k8s.io/kubernetes/pkg/volume/emptydir"
--	"k8s.io/kubernetes/pkg/volume/fc"
- 	"k8s.io/kubernetes/pkg/volume/flexvolume"
--	"k8s.io/kubernetes/pkg/volume/flocker"
--	"k8s.io/kubernetes/pkg/volume/git_repo"
--	"k8s.io/kubernetes/pkg/volume/glusterfs"
- 	"k8s.io/kubernetes/pkg/volume/hostpath"
--	"k8s.io/kubernetes/pkg/volume/iscsi"
- 	"k8s.io/kubernetes/pkg/volume/local"
--	"k8s.io/kubernetes/pkg/volume/nfs"
--	"k8s.io/kubernetes/pkg/volume/portworx"
- 	"k8s.io/kubernetes/pkg/volume/projected"
--	"k8s.io/kubernetes/pkg/volume/quobyte"
--	"k8s.io/kubernetes/pkg/volume/rbd"
--	"k8s.io/kubernetes/pkg/volume/scaleio"
- 	"k8s.io/kubernetes/pkg/volume/secret"
--	"k8s.io/kubernetes/pkg/volume/storageos"
- 
- 	// Cloud providers
- 	_ "k8s.io/kubernetes/pkg/cloudprovider/providers"
-@@ -64,30 +50,13 @@
- 	//
- 	// Kubelet does not currently need to configure volume plugins.
- 	// If/when it does, see kube-controller-manager/app/plugins.go for example of using volume.VolumeConfig
--	var err error
--	allPlugins, err = appendLegacyProviderVolumes(allPlugins, featureGate)
--	if err != nil {
--		return allPlugins, err
--	}
- 	allPlugins = append(allPlugins, emptydir.ProbeVolumePlugins()...)
--	allPlugins = append(allPlugins, git_repo.ProbeVolumePlugins()...)
- 	allPlugins = append(allPlugins, hostpath.ProbeVolumePlugins(volume.VolumeConfig{})...)
--	allPlugins = append(allPlugins, nfs.ProbeVolumePlugins(volume.VolumeConfig{})...)
- 	allPlugins = append(allPlugins, secret.ProbeVolumePlugins()...)
--	allPlugins = append(allPlugins, iscsi.ProbeVolumePlugins()...)
--	allPlugins = append(allPlugins, glusterfs.ProbeVolumePlugins()...)
--	allPlugins = append(allPlugins, rbd.ProbeVolumePlugins()...)
--	allPlugins = append(allPlugins, quobyte.ProbeVolumePlugins()...)
--	allPlugins = append(allPlugins, cephfs.ProbeVolumePlugins()...)
- 	allPlugins = append(allPlugins, downwardapi.ProbeVolumePlugins()...)
--	allPlugins = append(allPlugins, fc.ProbeVolumePlugins()...)
--	allPlugins = append(allPlugins, flocker.ProbeVolumePlugins()...)
- 	allPlugins = append(allPlugins, configmap.ProbeVolumePlugins()...)
- 	allPlugins = append(allPlugins, projected.ProbeVolumePlugins()...)
--	allPlugins = append(allPlugins, portworx.ProbeVolumePlugins()...)
--	allPlugins = append(allPlugins, scaleio.ProbeVolumePlugins()...)
- 	allPlugins = append(allPlugins, local.ProbeVolumePlugins()...)
--	allPlugins = append(allPlugins, storageos.ProbeVolumePlugins()...)
- 	allPlugins = append(allPlugins, csi.ProbeVolumePlugins()...)
- 	return allPlugins, nil
- }
-diff -ur io_k8s_kubernetes.orig/cmd/kubelet/BUILD io_k8s_kubernetes/cmd/kubelet/BUILD
---- io_k8s_kubernetes.orig/cmd/kubelet/BUILD	2021-01-26 12:10:52.616725807 +0100
-+++ io_k8s_kubernetes/cmd/kubelet/BUILD	2021-01-26 12:11:04.573786572 +0100
-@@ -5,7 +5,7 @@
-     go_binary = "go_binary_conditional_pure",
- )
- load("@io_bazel_rules_go//go:def.bzl", "go_library")
--load("//staging/src/k8s.io/component-base/version:def.bzl", "version_x_defs")
-+load("@//third_party/go:kubernetes_version_def.bzl", "version_x_defs")
- 
- go_binary(
-     name = "kubelet",
-diff -ur io_k8s_kubernetes.orig/pkg/controller/nodeipam/ipam/cidr_allocator.go io_k8s_kubernetes/pkg/controller/nodeipam/ipam/cidr_allocator.go
---- io_k8s_kubernetes.orig/pkg/controller/nodeipam/ipam/cidr_allocator.go	2021-01-26 12:10:52.664726051 +0100
-+++ io_k8s_kubernetes/pkg/controller/nodeipam/ipam/cidr_allocator.go	2021-01-26 12:11:04.573786572 +0100
-@@ -112,8 +112,6 @@
- 	switch allocatorType {
- 	case RangeAllocatorType:
- 		return NewCIDRRangeAllocator(kubeClient, nodeInformer, allocatorParams, nodeList)
--	case CloudAllocatorType:
--		return NewCloudCIDRAllocator(kubeClient, cloud, nodeInformer)
- 	default:
- 		return nil, fmt.Errorf("invalid CIDR allocator type: %v", allocatorType)
- 	}
-Only in io_k8s_kubernetes/pkg/controller/nodeipam/ipam: cidr_allocator.go.orig
-diff -ur io_k8s_kubernetes.orig/pkg/controller/nodeipam/nolegacyprovider.go io_k8s_kubernetes/pkg/controller/nodeipam/nolegacyprovider.go
---- io_k8s_kubernetes.orig/pkg/controller/nodeipam/nolegacyprovider.go	2021-01-26 12:10:52.665726056 +0100
-+++ io_k8s_kubernetes/pkg/controller/nodeipam/nolegacyprovider.go	2021-01-26 12:11:04.573786572 +0100
-@@ -1,5 +1,3 @@
--// +build providerless
--
- /*
- Copyright 2019 The Kubernetes Authors.
- 
-diff -ur io_k8s_kubernetes.orig/pkg/kubelet/cadvisor/cadvisor_linux.go io_k8s_kubernetes/pkg/kubelet/cadvisor/cadvisor_linux.go
---- io_k8s_kubernetes.orig/pkg/kubelet/cadvisor/cadvisor_linux.go	2021-01-26 12:10:52.676726112 +0100
-+++ io_k8s_kubernetes/pkg/kubelet/cadvisor/cadvisor_linux.go	2021-01-26 12:11:04.573786572 +0100
-@@ -33,8 +33,6 @@
- 
- 	// Register cloud info providers.
- 	// TODO(#68522): Remove this in 1.20+ once the cAdvisor endpoints are removed.
--	_ "github.com/google/cadvisor/utils/cloudinfo/aws"
--	_ "github.com/google/cadvisor/utils/cloudinfo/azure"
- 	_ "github.com/google/cadvisor/utils/cloudinfo/gce"
- 
- 	"github.com/google/cadvisor/cache/memory"
-Only in io_k8s_kubernetes/pkg/kubelet/cadvisor: cadvisor_linux.go.orig
-diff -ur io_k8s_kubernetes.orig/test/e2e/BUILD io_k8s_kubernetes/test/e2e/BUILD
---- io_k8s_kubernetes.orig/test/e2e/BUILD	2021-01-26 12:10:52.736726417 +0100
-+++ io_k8s_kubernetes/test/e2e/BUILD	2021-01-26 12:11:04.573786572 +0100
-@@ -5,7 +5,7 @@
-     go_test = "go_test_conditional_pure",
- )
- load("@io_bazel_rules_go//go:def.bzl", "go_library")
--load("//staging/src/k8s.io/component-base/version:def.bzl", "version_x_defs")
-+load("@//third_party/go:kubernetes_version_def.bzl", "version_x_defs")
- 
- go_test(
-     name = "go_default_test",
-diff -ur io_k8s_kubernetes.orig/test/e2e/generated/BUILD io_k8s_kubernetes/test/e2e/generated/BUILD
---- io_k8s_kubernetes.orig/test/e2e/generated/BUILD	2021-01-26 12:10:52.743726453 +0100
-+++ io_k8s_kubernetes/test/e2e/generated/BUILD	2021-01-26 12:11:04.573786572 +0100
-@@ -4,23 +4,24 @@
-     "@io_bazel_rules_go//go:def.bzl",
-     "go_library",
- )
--load("//build:bindata.bzl", "go_bindata")
-+load("@dev_source_monogon//build/bindata:bindata.bzl", "bindata")
- 
- go_library(
-     name = "go_default_library",
-     srcs = [
--        "bindata.go",
-         "gobindata_util.go",
-         "main.go",
-     ],
-+    embed = [
-+        ":bindata",
-+    ],
-     importpath = "k8s.io/kubernetes/test/e2e/generated",
-     deps = [
-         "@io_k8s_klog_v2//:go_default_library",
-     ],
- )
- 
--# IMPORTANT: if you make any changes here, you must also update hack/generate-bindata.sh.
--go_bindata(
-+bindata(
-     name = "bindata",
-     srcs = [
-         "//test/conformance/testdata:all-srcs",
-@@ -29,9 +30,7 @@
-         "//test/fixtures:all-srcs",
-         "//test/images:all-srcs",
-     ],
--    outs = ["bindata.go"],
--    compress = True,
--    include_metadata = False,
-+    package = "generated",
- )
- 
- filegroup(
diff --git a/third_party/go/patches/k8s-native-metrics.patch b/third_party/go/patches/k8s-native-metrics.patch
index 859ee74..2edc60d 100644
--- a/third_party/go/patches/k8s-native-metrics.patch
+++ b/third_party/go/patches/k8s-native-metrics.patch
@@ -1,59 +1,15 @@
-Copyright 2020 The Monogon Project Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-  http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-
-From b16e57cc52a437465bbd12c24fb05fe5790afe1d Mon Sep 17 00:00:00 2001
+From e3b5a31bff00c89fc95f85212bf0943d46692616 Mon Sep 17 00:00:00 2001
 From: Lorenz Brun <lorenz@brun.one>
 Date: Tue, 17 Mar 2020 21:41:08 +0100
-Subject: [PATCH 2/3] Add a native volume metrics implementation
+Subject: [PATCH 2/2] Add a native volume metrics implementation
 
 ---
- pkg/volume/BUILD                  |   3 +
  pkg/volume/metrics_native.go      | 101 +++++++++++++++++++++++++++++
  pkg/volume/metrics_native_test.go | 102 ++++++++++++++++++++++++++++++
- 3 files changed, 206 insertions(+)
+ 2 files changed, 203 insertions(+)
  create mode 100644 pkg/volume/metrics_native.go
  create mode 100644 pkg/volume/metrics_native_test.go
 
-diff --git a/pkg/volume/BUILD b/pkg/volume/BUILD
-index 720b13406dc..b6e4b7e6d6f 100644
---- a/pkg/volume/BUILD
-+++ b/pkg/volume/BUILD
-@@ -7,6 +7,7 @@ go_library(
-         "metrics_cached.go",
-         "metrics_du.go",
-         "metrics_errors.go",
-+        "metrics_native.go",
-         "metrics_nil.go",
-         "metrics_statfs.go",
-         "noop_expandable_plugin.go",
-@@ -35,6 +36,7 @@ go_library(
-         "@io_k8s_client_go//tools/cache:go_default_library",
-         "@io_k8s_client_go//tools/record:go_default_library",
-         "@io_k8s_cloud_provider//:go_default_library",
-+        "@org_golang_x_sys//unix:go_default_library",
-         "@io_k8s_klog_v2//:go_default_library",
-         "@io_k8s_utils//exec:go_default_library",
-         "@io_k8s_utils//mount:go_default_library",
-@@ -55,6 +57,7 @@ go_test(
-     name = "go_default_test",
-     srcs = [
-         "metrics_du_test.go",
-+        "metrics_native_test.go",
-         "metrics_nil_test.go",
-         "metrics_statfs_test.go",
-         "plugins_test.go",
 diff --git a/pkg/volume/metrics_native.go b/pkg/volume/metrics_native.go
 new file mode 100644
 index 00000000000..3934b946f2e
diff --git a/third_party/go/patches/k8s-native-mounter.patch b/third_party/go/patches/k8s-native-mounter.patch
index 2f754b6..a5ced4d 100644
--- a/third_party/go/patches/k8s-native-mounter.patch
+++ b/third_party/go/patches/k8s-native-mounter.patch
@@ -1,69 +1,33 @@
-Copyright 2020 The Monogon Project Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-  http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-
-From 8335005ed1983ca5ac036af15dd04b8717898c35 Mon Sep 17 00:00:00 2001
+From 6c346b4fbfd800af47ffa2ec24456f9f58a1b0f2 Mon Sep 17 00:00:00 2001
 From: Lorenz Brun <lorenz@brun.one>
 Date: Mon, 16 Mar 2020 22:13:08 +0100
-Subject: [PATCH 1/3] Provide native mounter implementation for Linux
+Subject: [PATCH 1/7] Provide native mounter implementation for Linux
 
 ---
- BUILD.bazel          |   2 +
- mount/mount_linux.go | 141 ++++++++++++++++++++++-
- 2 files changed, 139 insertions(+), 4 deletions(-)
+ mount_linux.go | 148 +++++++++++++++++-
+ 1 file changed, 144 insertions(+), 4 deletions(-)
 
-diff --git a/mount/BUILD b/mount/BUILD.bazel
-index bef3ec2cf55..6f997103dac 100644
---- a/mount/BUILD.bazel
-+++ b/mount/BUILD.bazel
-@@ -21,6 +21,7 @@ go_library(
-         "//exec:go_default_library",
-     ] + select({
-         "@io_bazel_rules_go//go/platform:android": [
-+            "@org_golang_x_sys//unix:go_default_library",
-             "//io:go_default_library",
-         ],
-         "@io_bazel_rules_go//go/platform:darwin": [
-@@ -36,6 +37,7 @@ go_library(
-             "//io:go_default_library",
-         ],
-         "@io_bazel_rules_go//go/platform:linux": [
-+            "@org_golang_x_sys//unix:go_default_library",
-             "//io:go_default_library",
-         ],
-         "@io_bazel_rules_go//go/platform:nacl": [
-diff --git a/mount/mount_linux.go b/mount/mount_linux.go
-index 41f69efe3f0..01182684653 100644
---- a/mount/mount_linux.go
-+++ b/mount/mount_linux.go
-@@ -20,6 +20,7 @@ package mount
- 
+diff --git a/mount_linux.go b/mount_linux.go
+index aaa592161d4..517bf0b2541 100644
+--- a/mount_linux.go
++++ b/mount_linux.go
+@@ -22,6 +22,7 @@ package mount
  import (
+ 	"context"
  	"fmt"
 +	"io/ioutil"
  	"os"
  	"os/exec"
  	"path/filepath"
-@@ -27,6 +28,7 @@ import (
- 	"strings"
+@@ -30,6 +31,7 @@ import (
  	"syscall"
+ 	"time"
  
 +	"golang.org/x/sys/unix"
  	"k8s.io/klog/v2"
  	utilexec "k8s.io/utils/exec"
  	utilio "k8s.io/utils/io"
-@@ -49,8 +51,10 @@ const (
+@@ -54,8 +56,10 @@ const (
  // for the linux platform.  This implementation assumes that the
  // kubelet is running in the host's root mount namespace.
  type Mounter struct {
@@ -75,8 +39,8 @@
 +	nativeSupportedFstypes map[string]struct{}
  }
  
- // New returns a mount.Interface for the current system.
-@@ -58,8 +62,10 @@ type Mounter struct {
+ var _ MounterForceUnmounter = &Mounter{}
+@@ -65,11 +69,36 @@ var _ MounterForceUnmounter = &Mounter{}
  // mounterPath allows using an alternative to `/bin/mount` for mounting.
  func New(mounterPath string) Interface {
  	return &Mounter{
@@ -89,38 +53,55 @@
  	}
  }
  
-@@ -78,6 +84,29 @@ func (mounter *Mounter) Mount(source string, target string, fstype string, optio
++func (mounter *Mounter) mountNative(source string, target string, fstype string, options []string, sensitiveOptions []string) error {
++	flags, pflags, fsoptions := parseMountOptions(options)
++	if len(pflags) > 0 {
++		return fmt.Errorf("the native mounter is active and does not support mount propagation at the moment")
++	}
++
++	if !mounter.nativeSupportsFstype(fstype) && flags&unix.MS_BIND == 0 {
++		return fmt.Errorf("the native mounter is active and cannot mount filesystems of type \"%v\"", fstype)
++	}
++
++	if flags&unix.MS_BIND != 0 && flags & ^uintptr(unix.MS_BIND) != 0 {
++		if err := unix.Mount(source, target, "", unix.MS_BIND, ""); err != nil {
++			return fmt.Errorf("bind pre-mount failed: %w", err)
++		}
++		flags |= unix.MS_REMOUNT
++	}
++
++	if err := unix.Mount(source, target, fstype, flags, fsoptions); err != nil {
++		return fmt.Errorf("failed to mount filesystem: %w", err)
++	}
++	return nil
++}
++
+ // Mount mounts source to target as fstype with given options. 'source' and 'fstype' must
+ // be an empty string in case it's not required, e.g. for remount, or for auto filesystem
+ // type, where kernel handles fstype for you. The mount 'options' is a list of options,
+@@ -85,6 +114,10 @@ func (mounter *Mounter) Mount(source string, target string, fstype string, optio
  // method should be used by callers that pass sensitive material (like
  // passwords) as mount options.
  func (mounter *Mounter) MountSensitive(source string, target string, fstype string, options []string, sensitiveOptions []string) error {
 +	if !mounter.withLinuxUtils {
-+		flags, pflags, fsoptions := parseMountOptions(options)
-+		if len(pflags) > 0 {
-+			return fmt.Errorf("the native mounter is active and does not support mount propagation at the moment")
-+		}
-+
-+		if !mounter.nativeSupportsFstype(fstype) && flags&unix.MS_BIND == 0 {
-+			return fmt.Errorf("the native mounter is active and cannot mount filesystems of type \"%v\"", fstype)
-+		}
-+
-+		if flags&unix.MS_BIND != 0 && flags & ^uintptr(unix.MS_BIND) != 0 {
-+			if err := unix.Mount(source, target, "", unix.MS_BIND, ""); err != nil {
-+				return fmt.Errorf("bind pre-mount failed: %w", err)
-+			}
-+			flags |= unix.MS_REMOUNT
-+		}
-+
-+		if err := unix.Mount(source, target, fstype, flags, fsoptions); err != nil {
-+			return fmt.Errorf("failed to mount filesystem: %w", err)
-+		}
-+		return nil
++		return mounter.mountNative(source, target, fstype, options, sensitiveOptions)
 +	}
 +
  	// Path to mounter binary if containerized mounter is needed. Otherwise, it is set to empty.
  	// All Linux distros are expected to be shipped with a mount utility that a support bind mounts.
  	mounterPath := ""
-@@ -102,6 +131,80 @@ func (mounter *Mounter) MountSensitive(source string, target string, fstype stri
- 	return mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, options, sensitiveOptions)
+@@ -116,6 +149,9 @@ func (mounter *Mounter) MountSensitiveWithoutSystemd(source string, target strin
+ 
+ // MountSensitiveWithoutSystemdWithMountFlags is the same as MountSensitiveWithoutSystemd with additional mount flags.
+ func (mounter *Mounter) MountSensitiveWithoutSystemdWithMountFlags(source string, target string, fstype string, options []string, sensitiveOptions []string, mountFlags []string) error {
++	if !mounter.withLinuxUtils {
++		return mounter.mountNative(source, target, fstype, options, sensitiveOptions)
++	}
+ 	mounterPath := ""
+ 	bind, bindOpts, bindRemountOpts, bindRemountOptsSensitive := MakeBindOptsSensitive(options, sensitiveOptions)
+ 	if bind {
+@@ -138,6 +174,80 @@ func (mounter *Mounter) MountSensitiveWithoutSystemdWithMountFlags(source string
+ 	return mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, options, sensitiveOptions, mountFlags, false)
  }
  
 +// nativeSupportsFstype checks if the native mounter can mount the given fstype
@@ -198,9 +179,9 @@
 +}
 +
  // doMount runs the mount command. mounterPath is the path to mounter binary if containerized mounter is used.
- // sensitiveOptions is an extention of options except they will not be logged (because they may contain sensitive material)
- func (mounter *Mounter) doMount(mounterPath string, mountCmd string, source string, target string, fstype string, options []string, sensitiveOptions []string) error {
-@@ -179,6 +282,30 @@ func detectSystemd() bool {
+ // sensitiveOptions is an extension of options except they will not be logged (because they may contain sensitive material)
+ // systemdMountRequired is an extension of option to decide whether uses systemd mount.
+@@ -223,6 +333,30 @@ func detectSystemd() bool {
  	return true
  }
  
@@ -231,7 +212,7 @@
  // MakeMountArgs makes the arguments to the mount(8) command.
  // options MUST not contain sensitive material (like passwords).
  func MakeMountArgs(source, target, fstype string, options []string) (mountArgs []string) {
-@@ -236,6 +363,12 @@ func AddSystemdScopeSensitive(systemdRunPath, mountName, command string, args []
+@@ -292,6 +426,12 @@ func AddSystemdScopeSensitive(systemdRunPath, mountName, command string, args []
  // Unmount unmounts the target.
  func (mounter *Mounter) Unmount(target string) error {
  	klog.V(4).Infof("Unmounting %s", target)
diff --git a/third_party/go/patches/k8s-revert-seccomp-runtime-default.patch b/third_party/go/patches/k8s-revert-seccomp-runtime-default.patch
deleted file mode 100644
index d8377b5..0000000
--- a/third_party/go/patches/k8s-revert-seccomp-runtime-default.patch
+++ /dev/null
@@ -1,48 +0,0 @@
-Copyright 2020 The Monogon Project Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-  http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-
-From 2adf4ec9796839014a708761b8fb6ad815306def Mon Sep 17 00:00:00 2001
-From: Serge Bazanski <serge@nexantic.com>
-Date: Tue, 26 Jan 2021 11:37:01 +0100
-Subject: [PATCH] Manually revert 
- https://github.com/kubernetes/kubernetes/pull/90949
-
-This reverts PR 90494 which breaks runc within Metropolis. See T916.
-
----
- pkg/kubelet/kuberuntime/kuberuntime_sandbox.go | 7 ++-----
- 1 file changed, 2 insertions(+), 5 deletions(-)
-
-diff --git a/pkg/kubelet/kuberuntime/kuberuntime_sandbox.go b/pkg/kubelet/kuberuntime/kuberuntime_sandbox.go
-index 0978044f753..c46436f2a41 100644
---- a/pkg/kubelet/kuberuntime/kuberuntime_sandbox.go
-+++ b/pkg/kubelet/kuberuntime/kuberuntime_sandbox.go
-@@ -148,11 +148,8 @@ func (m *kubeGenericRuntimeManager) generatePodSandboxLinuxConfig(pod *v1.Pod) (
- 	lc := &runtimeapi.LinuxPodSandboxConfig{
- 		CgroupParent: cgroupParent,
- 		SecurityContext: &runtimeapi.LinuxSandboxSecurityContext{
--			Privileged: kubecontainer.HasPrivilegedContainer(pod),
--
--			// Forcing sandbox to run as `runtime/default` allow users to
--			// use least privileged seccomp profiles at pod level. Issue #84623
--			SeccompProfilePath: v1.SeccompProfileRuntimeDefault,
-+			Privileged:         kubecontainer.HasPrivilegedContainer(pod),
-+			SeccompProfilePath: m.getSeccompProfile(pod.Annotations, "", pod.Spec.SecurityContext, nil),
- 		},
- 	}
- 
--- 
-2.26.2
-
diff --git a/third_party/go/patches/k8s-use-native.patch b/third_party/go/patches/k8s-use-native.patch
deleted file mode 100644
index 61001da..0000000
--- a/third_party/go/patches/k8s-use-native.patch
+++ /dev/null
@@ -1,153 +0,0 @@
-Copyright 2020 The Monogon Project Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-  http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-
-From ee4a7df588550ee5cbc3b8419e1ce185a8abb302 Mon Sep 17 00:00:00 2001
-From: Lorenz Brun <lorenz@brun.one>
-Date: Tue, 17 Mar 2020 22:07:24 +0100
-Subject: [PATCH 3/3] Use StatFS and Native volume metrics instead of du
-
----
- pkg/kubelet/stats/log_metrics_provider.go | 2 +-
- pkg/volume/configmap/configmap.go         | 4 ++--
- pkg/volume/downwardapi/downwardapi.go     | 4 ++--
- pkg/volume/emptydir/empty_dir.go          | 4 ++--
- pkg/volume/projected/projected.go         | 4 ++--
- pkg/volume/secret/secret.go               | 4 ++--
- 6 files changed, 11 insertions(+), 11 deletions(-)
-
-diff --git a/pkg/kubelet/stats/log_metrics_provider.go b/pkg/kubelet/stats/log_metrics_provider.go
-index 4a53eef74a3..ff87fec5ec3 100644
---- a/pkg/kubelet/stats/log_metrics_provider.go
-+++ b/pkg/kubelet/stats/log_metrics_provider.go
-@@ -33,5 +33,5 @@ func NewLogMetricsService() LogMetricsService {
- }
- 
- func (l logMetrics) createLogMetricsProvider(path string) volume.MetricsProvider {
--	return volume.NewMetricsDu(path)
-+	return volume.NewMetricsNative(path)
- }
-diff --git a/pkg/volume/configmap/configmap.go b/pkg/volume/configmap/configmap.go
-index 0e74dd0a1d8..430d739aab7 100644
---- a/pkg/volume/configmap/configmap.go
-+++ b/pkg/volume/configmap/configmap.go
-@@ -97,7 +97,7 @@ func (plugin *configMapPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, opts v
- 			pod.UID,
- 			plugin,
- 			plugin.host.GetMounter(plugin.GetPluginName()),
--			volume.NewCachedMetrics(volume.NewMetricsDu(getPath(pod.UID, spec.Name(), plugin.host))),
-+			volume.NewCachedMetrics(volume.NewMetricsNative(getPath(pod.UID, spec.Name(), plugin.host))),
- 		},
- 		source:       *spec.Volume.ConfigMap,
- 		pod:          *pod,
-@@ -113,7 +113,7 @@ func (plugin *configMapPlugin) NewUnmounter(volName string, podUID types.UID) (v
- 			podUID,
- 			plugin,
- 			plugin.host.GetMounter(plugin.GetPluginName()),
--			volume.NewCachedMetrics(volume.NewMetricsDu(getPath(podUID, volName, plugin.host))),
-+			volume.NewCachedMetrics(volume.NewMetricsNative(getPath(podUID, volName, plugin.host))),
- 		},
- 	}, nil
- }
-diff --git a/pkg/volume/downwardapi/downwardapi.go b/pkg/volume/downwardapi/downwardapi.go
-index a1779c0dac9..f0a0f99b318 100644
---- a/pkg/volume/downwardapi/downwardapi.go
-+++ b/pkg/volume/downwardapi/downwardapi.go
-@@ -99,7 +99,7 @@ func (plugin *downwardAPIPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, opts
- 		pod:             pod,
- 		podUID:          pod.UID,
- 		plugin:          plugin,
--		MetricsProvider: volume.NewCachedMetrics(volume.NewMetricsDu(getPath(pod.UID, spec.Name(), plugin.host))),
-+		MetricsProvider: volume.NewCachedMetrics(volume.NewMetricsStatFS(getPath(pod.UID, spec.Name(), plugin.host))),
- 	}
- 	return &downwardAPIVolumeMounter{
- 		downwardAPIVolume: v,
-@@ -114,7 +114,7 @@ func (plugin *downwardAPIPlugin) NewUnmounter(volName string, podUID types.UID)
- 			volName:         volName,
- 			podUID:          podUID,
- 			plugin:          plugin,
--			MetricsProvider: volume.NewCachedMetrics(volume.NewMetricsDu(getPath(podUID, volName, plugin.host))),
-+			MetricsProvider: volume.NewCachedMetrics(volume.NewMetricsStatFS(getPath(podUID, volName, plugin.host))),
- 		},
- 	}, nil
- }
-diff --git a/pkg/volume/emptydir/empty_dir.go b/pkg/volume/emptydir/empty_dir.go
-index 0a25d2b684c..5dc83b90c5b 100644
---- a/pkg/volume/emptydir/empty_dir.go
-+++ b/pkg/volume/emptydir/empty_dir.go
-@@ -121,7 +121,7 @@ func (plugin *emptyDirPlugin) newMounterInternal(spec *volume.Spec, pod *v1.Pod,
- 		mounter:         mounter,
- 		mountDetector:   mountDetector,
- 		plugin:          plugin,
--		MetricsProvider: volume.NewMetricsDu(getPath(pod.UID, spec.Name(), plugin.host)),
-+		MetricsProvider: volume.NewMetricsNative(getPath(pod.UID, spec.Name(), plugin.host)),
- 	}, nil
- }
- 
-@@ -138,7 +138,7 @@ func (plugin *emptyDirPlugin) newUnmounterInternal(volName string, podUID types.
- 		mounter:         mounter,
- 		mountDetector:   mountDetector,
- 		plugin:          plugin,
--		MetricsProvider: volume.NewMetricsDu(getPath(podUID, volName, plugin.host)),
-+		MetricsProvider: volume.NewMetricsNative(getPath(podUID, volName, plugin.host)),
- 	}
- 	return ed, nil
- }
-diff --git a/pkg/volume/projected/projected.go b/pkg/volume/projected/projected.go
-index 0f65a97610c..890f9c1c7bc 100644
---- a/pkg/volume/projected/projected.go
-+++ b/pkg/volume/projected/projected.go
-@@ -114,7 +114,7 @@ func (plugin *projectedPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, opts v
- 			sources:         spec.Volume.Projected.Sources,
- 			podUID:          pod.UID,
- 			plugin:          plugin,
--			MetricsProvider: volume.NewCachedMetrics(volume.NewMetricsDu(getPath(pod.UID, spec.Name(), plugin.host))),
-+			MetricsProvider: volume.NewCachedMetrics(volume.NewMetricsStatFS(getPath(pod.UID, spec.Name(), plugin.host))),
- 		},
- 		source: *spec.Volume.Projected,
- 		pod:    pod,
-@@ -128,7 +128,7 @@ func (plugin *projectedPlugin) NewUnmounter(volName string, podUID types.UID) (v
- 			volName:         volName,
- 			podUID:          podUID,
- 			plugin:          plugin,
--			MetricsProvider: volume.NewCachedMetrics(volume.NewMetricsDu(getPath(podUID, volName, plugin.host))),
-+			MetricsProvider: volume.NewCachedMetrics(volume.NewMetricsStatFS(getPath(podUID, volName, plugin.host))),
- 		},
- 	}, nil
- }
-diff --git a/pkg/volume/secret/secret.go b/pkg/volume/secret/secret.go
-index a195c59ddd8..4c290cb8f24 100644
---- a/pkg/volume/secret/secret.go
-+++ b/pkg/volume/secret/secret.go
-@@ -100,7 +100,7 @@ func (plugin *secretPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, opts volu
- 			pod.UID,
- 			plugin,
- 			plugin.host.GetMounter(plugin.GetPluginName()),
--			volume.NewCachedMetrics(volume.NewMetricsDu(getPath(pod.UID, spec.Name(), plugin.host))),
-+			volume.NewCachedMetrics(volume.NewMetricsStatFS(getPath(pod.UID, spec.Name(), plugin.host))),
- 		},
- 		source:    *spec.Volume.Secret,
- 		pod:       *pod,
-@@ -116,7 +116,7 @@ func (plugin *secretPlugin) NewUnmounter(volName string, podUID types.UID) (volu
- 			podUID,
- 			plugin,
- 			plugin.host.GetMounter(plugin.GetPluginName()),
--			volume.NewCachedMetrics(volume.NewMetricsDu(getPath(podUID, volName, plugin.host))),
-+			volume.NewCachedMetrics(volume.NewMetricsStatFS(getPath(podUID, volName, plugin.host))),
- 		},
- 	}, nil
- }
--- 
-2.25.1
-
diff --git a/third_party/go/patches/kubelet-fix-path.patch b/third_party/go/patches/kubelet-fix-path.patch
new file mode 100644
index 0000000..069dece
--- /dev/null
+++ b/third_party/go/patches/kubelet-fix-path.patch
@@ -0,0 +1,25 @@
+From 500aace7a0bf5904c740e14ad18cd073df080d37 Mon Sep 17 00:00:00 2001
+From: Lorenz Brun <lorenz@monogon.tech>
+Date: Wed, 16 Mar 2022 18:05:54 +0100
+Subject: [PATCH] Change device plugin path
+
+---
+ pkg/apis/deviceplugin/v1beta1/constants.go | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/pkg/apis/deviceplugin/v1beta1/constants.go b/pkg/apis/deviceplugin/v1beta1/constants.go
+index 3bed214..30f06fe 100644
+--- a/pkg/apis/deviceplugin/v1beta1/constants.go
++++ b/pkg/apis/deviceplugin/v1beta1/constants.go
+@@ -27,7 +27,7 @@ const (
+ 	// DevicePluginPath is the folder the Device Plugin is expecting sockets to be on
+ 	// Only privileged pods have access to this path
+ 	// Note: Placeholder until we find a "standard path"
+-	DevicePluginPath = "/var/lib/kubelet/device-plugins/"
++	DevicePluginPath = "/data/kubernetes/kubelet/device-plugins/"
+ 	// KubeletSocket is the path of the Kubelet registry socket
+ 	KubeletSocket = DevicePluginPath + "kubelet.sock"
+ 
+-- 
+2.25.1
+
diff --git a/third_party/go/patches/libseccomp.patch b/third_party/go/patches/libseccomp.patch
new file mode 100644
index 0000000..0778f33
--- /dev/null
+++ b/third_party/go/patches/libseccomp.patch
@@ -0,0 +1,12 @@
+--- a/BUILD.bazel  2022-03-21 17:04:56.160536936 +0100
++++ b/BUILD.bazel   2022-03-21 17:05:52.439616250 +0100
+@@ -6,6 +6,9 @@
+         "seccomp.go",
+         "seccomp_internal.go",
+     ],
++    cdeps = [
++        "@seccomp",
++    ],
+     cgo = True,
+     importpath = "github.com/seccomp/libseccomp-golang",
+     visibility = ["//visibility:public"],
diff --git a/third_party/go/patches/nfproxy.patch b/third_party/go/patches/nfproxy.patch
deleted file mode 100644
index 307d3d4..0000000
--- a/third_party/go/patches/nfproxy.patch
+++ /dev/null
@@ -1,36 +0,0 @@
-Copyright 2020 The Monogon Project Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-  http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-
-Fix nfproxy compatibility with our version of Kubernetes/utilproxy
---- com_github_sbezverk_nfproxy.orig/pkg/proxy/proxy_service.go	2020-07-16 14:24:06.901176302 +0200
-+++ com_github_sbezverk_nfproxy/pkg/proxy/proxy_service.go	2020-07-16 14:08:34.118927035 +0200
-@@ -22,7 +22,6 @@
- 	utilnftables "github.com/google/nftables"
- 	"github.com/sbezverk/nfproxy/pkg/nftables"
- 	v1 "k8s.io/api/core/v1"
--	"k8s.io/apimachinery/pkg/types"
- 	"k8s.io/klog"
- 	utilproxy "k8s.io/kubernetes/pkg/proxy/util"
- 	utilnet "k8s.io/utils/net"
-@@ -44,8 +43,7 @@
- 		stickySeconds := int(*svc.Spec.SessionAffinityConfig.ClientIP.TimeoutSeconds)
- 		klog.V(5).Infof("Service %s/%s has SessionAffinity set for %d seconds", svc.Namespace, svc.Name, stickySeconds)
- 	}
--	svcName := types.NamespacedName{Namespace: svc.Namespace, Name: svc.Name}
--	if utilproxy.ShouldSkipService(svcName, svc) {
-+	if utilproxy.ShouldSkipService(svc) {
- 		return
- 	}
- 	for i := range svc.Spec.Ports {
diff --git a/third_party/go/patches/runc-add-cdeps.patch b/third_party/go/patches/runc-add-cdeps.patch
new file mode 100644
index 0000000..fd15c15
--- /dev/null
+++ b/third_party/go/patches/runc-add-cdeps.patch
@@ -0,0 +1,10 @@
+--- a/libcontainer/seccomp/patchbpf/BUILD.bazel	2022-03-24 20:12:34.325421847 +0100
++++ b/libcontainer/seccomp/patchbpf/BUILD.bazel	2022-03-24 20:12:59.777602881 +0100
+@@ -7,6 +7,7 @@
+         "enosys_unsupported.go",
+     ],
+     cgo = True,
++    cdeps = ["@seccomp"],
+     importpath = "github.com/opencontainers/runc/libcontainer/seccomp/patchbpf",
+     visibility = ["//visibility:public"],
+     deps = select({