Add all dependencies for Kubernetes worker

Adds Kubelet, CNI plugins, containerd, runc and gVisor using a
pre-baked list of dependencies generated using scripts/gazelle-deps/sh.

This moves all dependencies of gVisor, Kubernetes, runc, etc into the
same 'namespace' of Bazel external repositories, giving us ease of
accessing code as libraries, and benefits when it comes to version
auditing.

The gazelle-deps.sh script is a temporary solution that will be replaced
ASAP, see T725.

This unblocks T486.

This is an alternative to D389.

Test Plan: `bazel build //core:image` runs and picks up the new binaries

X-Origin-Diff: phab/D487
GitOrigin-RevId: a28a25071fa2ae76b272d237ce9af777485065ff
diff --git a/third_party/go/patches/k8s-kubernetes.patch b/third_party/go/patches/k8s-kubernetes.patch
new file mode 100644
index 0000000..6d4663e
--- /dev/null
+++ b/third_party/go/patches/k8s-kubernetes.patch
@@ -0,0 +1,386 @@
+Copyright 2020 The Monogon Project Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+
+This fixes OpenAPI codegen for when included from the Smalltown workspace. It basically undoes vendorification.
+
+diff -ur io_k8s_kubernetes.orig/build/code_generation.bzl io_k8s_kubernetes/build/code_generation.bzl
+--- io_k8s_kubernetes.orig/build/code_generation.bzl	2020-04-15 13:43:57.785669620 +0200
++++ io_k8s_kubernetes/build/code_generation.bzl	2020-04-16 18:19:44.297531873 +0200
+@@ -27,6 +27,12 @@
+         ...
+     )
+     """
++    if pkg.startswith('staging/src/k8s.io/'):
++        parts = pkg.split('/', 4)
++        project = parts[3]
++        project = project.replace('-', '_')
++        path = parts[4]
++        return "@io_k8s_%s//%s:go_default_library" % (project, path)
+     return "//%s:go_default_library" % pkg
+
+ def go_pkg(pkg):
+@@ -42,6 +48,8 @@
+         ...
+     )
+     """
++    if pkg.startswith('staging/src/'):
++        return pkg[len('staging/src/'):]
+     for prefix in ["staging/src", "vendor"]:
+         if pkg.startswith(prefix):
+             return paths.relativize(pkg, prefix)
+@@ -49,8 +57,8 @@
+
+ def openapi_deps():
+     deps = [
+-        "//vendor/github.com/go-openapi/spec:go_default_library",
+-        "//vendor/k8s.io/kube-openapi/pkg/common:go_default_library",
++        "@com_github_go_openapi_spec//:go_default_library",
++        "@io_k8s_kube_openapi//pkg/common:go_default_library",
+     ]
+     deps.extend([bazel_go_library(pkg) for pkg in tags_values_pkgs["openapi-gen"]["true"]])
+     return deps
+@@ -76,7 +84,7 @@
+         # the generator must run from the repo root inside the generated GOPATH.
+         # All of bazel's $(location)s are relative to the original working directory, however.
+         cmd = " ".join([
+-            "$(location //vendor/k8s.io/kube-openapi/cmd/openapi-gen)",
++            "$(location @io_k8s_kube_openapi//cmd/openapi-gen)",
+             "--v 1",
+             "--logtostderr",
+             "--go-header-file $(location //" + openapi_vendor_prefix + "hack/boilerplate:boilerplate.generatego.txt)",
+@@ -88,6 +96,6 @@
+             "&& rm tmp_api_violations.report",
+         ]),
+         go_deps = openapi_deps(),
+-        tools = ["//vendor/k8s.io/kube-openapi/cmd/openapi-gen"],
++        tools = ["@io_k8s_kube_openapi//cmd/openapi-gen"],
+         message = "GenOpenAPI",
+     )
+
+The rest rips out a bunch of volume providers. We're only interested in CSI and hostpath/local.
+
+diff -ur io_k8s_kubernetes.orig/cmd/kube-apiserver/app/options/globalflags.go io_k8s_kubernetes/cmd/kube-apiserver/app/options/globalflags.go
+--- io_k8s_kubernetes.orig/cmd/kube-apiserver/app/options/globalflags.go	2020-04-15 13:43:57.811669689 +0200
++++ io_k8s_kubernetes/cmd/kube-apiserver/app/options/globalflags.go	2020-04-17 13:29:41.578264893 +0200
+@@ -32,9 +32,6 @@
+ func AddCustomGlobalFlags(fs *pflag.FlagSet) {
+ 	// Lookup flags in global flag set and re-register the values with our flagset.
+ 
+-	// Adds flags from k8s.io/kubernetes/pkg/cloudprovider/providers.
+-	registerLegacyGlobalFlags(fs)
+-
+ 	// Adds flags from k8s.io/apiserver/pkg/admission.
+ 	globalflag.Register(fs, "default-not-ready-toleration-seconds")
+ 	globalflag.Register(fs, "default-unreachable-toleration-seconds")
+diff -ur io_k8s_kubernetes.orig/cmd/kube-controller-manager/app/controllermanager.go io_k8s_kubernetes/cmd/kube-controller-manager/app/controllermanager.go
+--- io_k8s_kubernetes.orig/cmd/kube-controller-manager/app/controllermanager.go	2020-04-15 13:43:57.812669692 +0200
++++ io_k8s_kubernetes/cmd/kube-controller-manager/app/controllermanager.go	2020-04-17 13:30:39.975397151 +0200
+@@ -126,7 +126,6 @@
+ 	namedFlagSets := s.Flags(KnownControllers(), ControllersDisabledByDefault.List())
+ 	verflag.AddFlags(namedFlagSets.FlagSet("global"))
+ 	globalflag.AddGlobalFlags(namedFlagSets.FlagSet("global"), cmd.Name())
+-	registerLegacyGlobalFlags(namedFlagSets)
+ 	for _, f := range namedFlagSets.FlagSets {
+ 		fs.AddFlagSet(f)
+ 	}
+diff -ur io_k8s_kubernetes.orig/cmd/kube-controller-manager/app/plugins.go io_k8s_kubernetes/cmd/kube-controller-manager/app/plugins.go
+--- io_k8s_kubernetes.orig/cmd/kube-controller-manager/app/plugins.go	2020-04-15 13:43:57.813669694 +0200
++++ io_k8s_kubernetes/cmd/kube-controller-manager/app/plugins.go	2020-04-17 13:34:19.393894095 +0200
+@@ -32,19 +32,9 @@
+ 	// Volume plugins
+ 	"k8s.io/kubernetes/pkg/volume"
+ 	"k8s.io/kubernetes/pkg/volume/csi"
+-	"k8s.io/kubernetes/pkg/volume/fc"
+ 	"k8s.io/kubernetes/pkg/volume/flexvolume"
+-	"k8s.io/kubernetes/pkg/volume/flocker"
+-	"k8s.io/kubernetes/pkg/volume/glusterfs"
+ 	"k8s.io/kubernetes/pkg/volume/hostpath"
+-	"k8s.io/kubernetes/pkg/volume/iscsi"
+ 	"k8s.io/kubernetes/pkg/volume/local"
+-	"k8s.io/kubernetes/pkg/volume/nfs"
+-	"k8s.io/kubernetes/pkg/volume/portworx"
+-	"k8s.io/kubernetes/pkg/volume/quobyte"
+-	"k8s.io/kubernetes/pkg/volume/rbd"
+-	"k8s.io/kubernetes/pkg/volume/scaleio"
+-	"k8s.io/kubernetes/pkg/volume/storageos"
+ 	volumeutil "k8s.io/kubernetes/pkg/volume/util"
+ 
+ 	utilfeature "k8s.io/apiserver/pkg/util/feature"
+@@ -58,18 +48,7 @@
+ // The list of plugins is manually compiled. This code and the plugin
+ // initialization code for kubelet really, really need a through refactor.
+ func ProbeAttachableVolumePlugins() ([]volume.VolumePlugin, error) {
+-	var err error
+ 	allPlugins := []volume.VolumePlugin{}
+-	allPlugins, err = appendAttachableLegacyProviderVolumes(allPlugins, utilfeature.DefaultFeatureGate)
+-	if err != nil {
+-		return allPlugins, err
+-	}
+-	allPlugins = append(allPlugins, portworx.ProbeVolumePlugins()...)
+-	allPlugins = append(allPlugins, scaleio.ProbeVolumePlugins()...)
+-	allPlugins = append(allPlugins, storageos.ProbeVolumePlugins()...)
+-	allPlugins = append(allPlugins, fc.ProbeVolumePlugins()...)
+-	allPlugins = append(allPlugins, iscsi.ProbeVolumePlugins()...)
+-	allPlugins = append(allPlugins, rbd.ProbeVolumePlugins()...)
+ 	allPlugins = append(allPlugins, csi.ProbeVolumePlugins()...)
+ 	return allPlugins, nil
+ }
+@@ -83,18 +62,7 @@
+ 
+ // ProbeExpandableVolumePlugins returns volume plugins which are expandable
+ func ProbeExpandableVolumePlugins(config persistentvolumeconfig.VolumeConfiguration) ([]volume.VolumePlugin, error) {
+-	var err error
+ 	allPlugins := []volume.VolumePlugin{}
+-	allPlugins, err = appendExpandableLegacyProviderVolumes(allPlugins, utilfeature.DefaultFeatureGate)
+-	if err != nil {
+-		return allPlugins, err
+-	}
+-	allPlugins = append(allPlugins, portworx.ProbeVolumePlugins()...)
+-	allPlugins = append(allPlugins, glusterfs.ProbeVolumePlugins()...)
+-	allPlugins = append(allPlugins, rbd.ProbeVolumePlugins()...)
+-	allPlugins = append(allPlugins, scaleio.ProbeVolumePlugins()...)
+-	allPlugins = append(allPlugins, storageos.ProbeVolumePlugins()...)
+-	allPlugins = append(allPlugins, fc.ProbeVolumePlugins()...)
+ 	return allPlugins, nil
+ }
+ 
+@@ -124,30 +92,7 @@
+ 	}
+ 	allPlugins = append(allPlugins, hostpath.ProbeVolumePlugins(hostPathConfig)...)
+ 
+-	nfsConfig := volume.VolumeConfig{
+-		RecyclerMinimumTimeout:   int(config.PersistentVolumeRecyclerConfiguration.MinimumTimeoutNFS),
+-		RecyclerTimeoutIncrement: int(config.PersistentVolumeRecyclerConfiguration.IncrementTimeoutNFS),
+-		RecyclerPodTemplate:      volume.NewPersistentVolumeRecyclerPodTemplate(),
+-	}
+-	if err := AttemptToLoadRecycler(config.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathNFS, &nfsConfig); err != nil {
+-		klog.Fatalf("Could not create NFS recycler pod from file %s: %+v", config.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathNFS, err)
+-	}
+-	allPlugins = append(allPlugins, nfs.ProbeVolumePlugins(nfsConfig)...)
+-	allPlugins = append(allPlugins, glusterfs.ProbeVolumePlugins()...)
+-	// add rbd provisioner
+-	allPlugins = append(allPlugins, rbd.ProbeVolumePlugins()...)
+-	allPlugins = append(allPlugins, quobyte.ProbeVolumePlugins()...)
+-	var err error
+-	allPlugins, err = appendExpandableLegacyProviderVolumes(allPlugins, utilfeature.DefaultFeatureGate)
+-	if err != nil {
+-		return allPlugins, err
+-	}
+-
+-	allPlugins = append(allPlugins, flocker.ProbeVolumePlugins()...)
+-	allPlugins = append(allPlugins, portworx.ProbeVolumePlugins()...)
+-	allPlugins = append(allPlugins, scaleio.ProbeVolumePlugins()...)
+ 	allPlugins = append(allPlugins, local.ProbeVolumePlugins()...)
+-	allPlugins = append(allPlugins, storageos.ProbeVolumePlugins()...)
+ 
+ 	if utilfeature.DefaultFeatureGate.Enabled(features.CSIInlineVolume) {
+ 		allPlugins = append(allPlugins, csi.ProbeVolumePlugins()...)
+diff -ur io_k8s_kubernetes.orig/cmd/kubectl/BUILD io_k8s_kubernetes/cmd/kubectl/BUILD
+--- io_k8s_kubernetes.orig/cmd/kubectl/BUILD	2020-04-20 14:58:52.573455879 +0200
++++ io_k8s_kubernetes/cmd/kubectl/BUILD	2020-04-20 14:56:41.199032687 +0200
+@@ -3,7 +3,7 @@
+     "go_binary",
+     "go_library",
+ )
+-load("//staging/src/k8s.io/component-base/version:def.bzl", "version_x_defs")
++load("@//third_party/go:kubernetes_version_def.bzl", "version_x_defs")
+ 
+ go_binary(
+     name = "kubectl",
+
+
+We also take the opportunity to remove azure/gcp auth.
+
+diff -ur io_k8s_kubernetes.orig/cmd/kubelet/app/options/globalflags.go io_k8s_kubernetes/cmd/kubelet/app/options/globalflags.go
+--- io_k8s_kubernetes.orig/cmd/kubelet/app/options/globalflags.go	2020-04-15 13:43:57.827669732 +0200
++++ io_k8s_kubernetes/cmd/kubelet/app/options/globalflags.go	2020-04-16 15:58:30.964945445 +0200
+@@ -28,10 +28,6 @@
+ 	"k8s.io/component-base/logs"
+ 	"k8s.io/component-base/version/verflag"
+ 	"k8s.io/klog"
+-
+-	// ensure libs have a chance to globally register their flags
+-	_ "k8s.io/kubernetes/pkg/credentialprovider/azure"
+-	_ "k8s.io/kubernetes/pkg/credentialprovider/gcp"
+ )
+ 
+ // AddGlobalFlags explicitly registers flags that libraries (glog, verflag, etc.) register
+@@ -80,14 +76,8 @@
+ 
+ // addCredentialProviderFlags adds flags from k8s.io/kubernetes/pkg/credentialprovider
+ func addCredentialProviderFlags(fs *pflag.FlagSet) {
+-	// lookup flags in global flag set and re-register the values with our flagset
+-	global := pflag.CommandLine
+ 	local := pflag.NewFlagSet(os.Args[0], pflag.ExitOnError)
+ 
+-	// TODO(#58034): This is not a static file, so it's not quite as straightforward as --google-json-key.
+-	// We need to figure out how ACR users can dynamically provide pull credentials before we can deprecate this.
+-	pflagRegister(global, local, "azure-container-registry-config")
+-
+ 	fs.AddFlagSet(local)
+ }
+ 
+diff -ur io_k8s_kubernetes.orig/cmd/kubelet/app/plugins.go io_k8s_kubernetes/cmd/kubelet/app/plugins.go
+--- io_k8s_kubernetes.orig/cmd/kubelet/app/plugins.go	2020-04-15 13:43:57.827669732 +0200
++++ io_k8s_kubernetes/cmd/kubelet/app/plugins.go	2020-04-16 16:10:13.366081373 +0200
+@@ -19,8 +19,6 @@
+ // This file exists to force the desired plugin implementations to be linked.
+ import (
+ 	// Credential providers
+-	_ "k8s.io/kubernetes/pkg/credentialprovider/aws"
+-	_ "k8s.io/kubernetes/pkg/credentialprovider/azure"
+ 	_ "k8s.io/kubernetes/pkg/credentialprovider/gcp"
+ 
+ 	"k8s.io/component-base/featuregate"
+@@ -28,27 +26,13 @@
+ 
+ 	// Volume plugins
+ 	"k8s.io/kubernetes/pkg/volume"
+-	"k8s.io/kubernetes/pkg/volume/cephfs"
+ 	"k8s.io/kubernetes/pkg/volume/configmap"
+ 	"k8s.io/kubernetes/pkg/volume/csi"
+-	"k8s.io/kubernetes/pkg/volume/downwardapi"
+ 	"k8s.io/kubernetes/pkg/volume/emptydir"
+-	"k8s.io/kubernetes/pkg/volume/fc"
+ 	"k8s.io/kubernetes/pkg/volume/flexvolume"
+-	"k8s.io/kubernetes/pkg/volume/flocker"
+-	"k8s.io/kubernetes/pkg/volume/git_repo"
+-	"k8s.io/kubernetes/pkg/volume/glusterfs"
+ 	"k8s.io/kubernetes/pkg/volume/hostpath"
+-	"k8s.io/kubernetes/pkg/volume/iscsi"
+ 	"k8s.io/kubernetes/pkg/volume/local"
+-	"k8s.io/kubernetes/pkg/volume/nfs"
+-	"k8s.io/kubernetes/pkg/volume/portworx"
+-	"k8s.io/kubernetes/pkg/volume/projected"
+-	"k8s.io/kubernetes/pkg/volume/quobyte"
+-	"k8s.io/kubernetes/pkg/volume/rbd"
+-	"k8s.io/kubernetes/pkg/volume/scaleio"
+ 	"k8s.io/kubernetes/pkg/volume/secret"
+-	"k8s.io/kubernetes/pkg/volume/storageos"
+ 
+ 	// Cloud providers
+ 	_ "k8s.io/kubernetes/pkg/cloudprovider/providers"
+@@ -64,30 +48,11 @@
+ 	//
+ 	// Kubelet does not currently need to configure volume plugins.
+ 	// If/when it does, see kube-controller-manager/app/plugins.go for example of using volume.VolumeConfig
+-	var err error
+-	allPlugins, err = appendLegacyProviderVolumes(allPlugins, featureGate)
+-	if err != nil {
+-		return allPlugins, err
+-	}
+ 	allPlugins = append(allPlugins, emptydir.ProbeVolumePlugins()...)
+-	allPlugins = append(allPlugins, git_repo.ProbeVolumePlugins()...)
+ 	allPlugins = append(allPlugins, hostpath.ProbeVolumePlugins(volume.VolumeConfig{})...)
+-	allPlugins = append(allPlugins, nfs.ProbeVolumePlugins(volume.VolumeConfig{})...)
+ 	allPlugins = append(allPlugins, secret.ProbeVolumePlugins()...)
+-	allPlugins = append(allPlugins, iscsi.ProbeVolumePlugins()...)
+-	allPlugins = append(allPlugins, glusterfs.ProbeVolumePlugins()...)
+-	allPlugins = append(allPlugins, rbd.ProbeVolumePlugins()...)
+-	allPlugins = append(allPlugins, quobyte.ProbeVolumePlugins()...)
+-	allPlugins = append(allPlugins, cephfs.ProbeVolumePlugins()...)
+-	allPlugins = append(allPlugins, downwardapi.ProbeVolumePlugins()...)
+-	allPlugins = append(allPlugins, fc.ProbeVolumePlugins()...)
+-	allPlugins = append(allPlugins, flocker.ProbeVolumePlugins()...)
+ 	allPlugins = append(allPlugins, configmap.ProbeVolumePlugins()...)
+-	allPlugins = append(allPlugins, projected.ProbeVolumePlugins()...)
+-	allPlugins = append(allPlugins, portworx.ProbeVolumePlugins()...)
+-	allPlugins = append(allPlugins, scaleio.ProbeVolumePlugins()...)
+ 	allPlugins = append(allPlugins, local.ProbeVolumePlugins()...)
+-	allPlugins = append(allPlugins, storageos.ProbeVolumePlugins()...)
+ 	allPlugins = append(allPlugins, csi.ProbeVolumePlugins()...)
+ 	return allPlugins, nil
+ }
+diff -ur io_k8s_kubernetes.orig/cmd/kubelet/app/plugins_providers.go io_k8s_kubernetes/cmd/kubelet/app/plugins_providers.go
+--- io_k8s_kubernetes.orig/cmd/kubelet/app/plugins_providers.go	2020-04-15 13:43:57.827669732 +0200
++++ io_k8s_kubernetes/cmd/kubelet/app/plugins_providers.go	2020-04-15 16:29:49.402465237 +0200
+@@ -24,13 +24,7 @@
+ 	"k8s.io/klog"
+ 	"k8s.io/kubernetes/pkg/features"
+ 	"k8s.io/kubernetes/pkg/volume"
+-	"k8s.io/kubernetes/pkg/volume/awsebs"
+-	"k8s.io/kubernetes/pkg/volume/azure_dd"
+-	"k8s.io/kubernetes/pkg/volume/azure_file"
+-	"k8s.io/kubernetes/pkg/volume/cinder"
+ 	"k8s.io/kubernetes/pkg/volume/csimigration"
+-	"k8s.io/kubernetes/pkg/volume/gcepd"
+-	"k8s.io/kubernetes/pkg/volume/vsphere_volume"
+ )
+ 
+ type probeFn func() []volume.VolumePlugin
+@@ -60,11 +54,6 @@
+ 
+ func appendLegacyProviderVolumes(allPlugins []volume.VolumePlugin, featureGate featuregate.FeatureGate) ([]volume.VolumePlugin, error) {
+ 	pluginMigrationStatus := make(map[string]pluginInfo)
+-	pluginMigrationStatus[plugins.AWSEBSInTreePluginName] = pluginInfo{pluginMigrationFeature: features.CSIMigrationAWS, pluginMigrationCompleteFeature: features.CSIMigrationAWSComplete, pluginProbeFunction: awsebs.ProbeVolumePlugins}
+-	pluginMigrationStatus[plugins.GCEPDInTreePluginName] = pluginInfo{pluginMigrationFeature: features.CSIMigrationGCE, pluginMigrationCompleteFeature: features.CSIMigrationGCEComplete, pluginProbeFunction: gcepd.ProbeVolumePlugins}
+-	pluginMigrationStatus[plugins.CinderInTreePluginName] = pluginInfo{pluginMigrationFeature: features.CSIMigrationOpenStack, pluginMigrationCompleteFeature: features.CSIMigrationOpenStackComplete, pluginProbeFunction: cinder.ProbeVolumePlugins}
+-	pluginMigrationStatus[plugins.AzureDiskInTreePluginName] = pluginInfo{pluginMigrationFeature: features.CSIMigrationAzureDisk, pluginMigrationCompleteFeature: features.CSIMigrationAzureDiskComplete, pluginProbeFunction: azure_dd.ProbeVolumePlugins}
+-	pluginMigrationStatus[plugins.AzureFileInTreePluginName] = pluginInfo{pluginMigrationFeature: features.CSIMigrationAzureFile, pluginMigrationCompleteFeature: features.CSIMigrationAzureFileComplete, pluginProbeFunction: azure_file.ProbeVolumePlugins}
+ 
+ 	var err error
+ 	for pluginName, pluginInfo := range pluginMigrationStatus {
+@@ -74,6 +63,5 @@
+ 		}
+ 	}
+ 
+-	allPlugins = append(allPlugins, vsphere_volume.ProbeVolumePlugins()...)
+ 	return allPlugins, nil
+ }
+diff -ur io_k8s_kubernetes.orig/cmd/kubelet/BUILD io_k8s_kubernetes/cmd/kubelet/BUILD
+--- io_k8s_kubernetes.orig/cmd/kubelet/BUILD	2020-04-15 13:43:57.827669732 +0200
++++ io_k8s_kubernetes/cmd/kubelet/BUILD	2020-04-20 14:56:20.446965836 +0200
+@@ -5,7 +5,7 @@
+     go_binary = "go_binary_conditional_pure",
+ )
+ load("@io_bazel_rules_go//go:def.bzl", "go_library")
+-load("//staging/src/k8s.io/component-base/version:def.bzl", "version_x_defs")
++load("@//third_party/go:kubernetes_version_def.bzl", "version_x_defs")
+ 
+ go_binary(
+     name = "kubelet",
+diff -ur io_k8s_kubernetes.orig/pkg/controller/nodeipam/ipam/cidr_allocator.go io_k8s_kubernetes/pkg/controller/nodeipam/ipam/cidr_allocator.go
+--- io_k8s_kubernetes.orig/pkg/controller/nodeipam/ipam/cidr_allocator.go	2020-04-15 13:43:57.860669820 +0200
++++ io_k8s_kubernetes/pkg/controller/nodeipam/ipam/cidr_allocator.go	2020-04-17 13:18:36.157842990 +0200
+@@ -111,8 +111,6 @@
+ 	switch allocatorType {
+ 	case RangeAllocatorType:
+ 		return NewCIDRRangeAllocator(kubeClient, nodeInformer, allocatorParams, nodeList)
+-	case CloudAllocatorType:
+-		return NewCloudCIDRAllocator(kubeClient, cloud, nodeInformer)
+ 	default:
+ 		return nil, fmt.Errorf("invalid CIDR allocator type: %v", allocatorType)
+ 	}
+diff -ur io_k8s_kubernetes.orig/pkg/controller/nodeipam/nolegacyprovider.go io_k8s_kubernetes/pkg/controller/nodeipam/nolegacyprovider.go
+--- io_k8s_kubernetes.orig/pkg/controller/nodeipam/nolegacyprovider.go	2020-04-15 13:43:57.860669820 +0200
++++ io_k8s_kubernetes/pkg/controller/nodeipam/nolegacyprovider.go	2020-04-17 13:27:12.440927122 +0200
+@@ -1,5 +1,3 @@
+-// +build providerless
+-
+ /*
+ Copyright 2019 The Kubernetes Authors.
+ 
+diff -ur io_k8s_kubernetes.orig/pkg/kubelet/cadvisor/cadvisor_linux.go io_k8s_kubernetes/pkg/kubelet/cadvisor/cadvisor_linux.go
+--- io_k8s_kubernetes.orig/pkg/kubelet/cadvisor/cadvisor_linux.go	2020-04-15 13:43:57.875669859 +0200
++++ io_k8s_kubernetes/pkg/kubelet/cadvisor/cadvisor_linux.go	2020-04-15 18:24:28.683551309 +0200
+@@ -34,8 +34,6 @@
+
+ 	// Register cloud info providers.
+ 	// TODO(#68522): Remove this in 1.20+ once the cAdvisor endpoints are removed.
+-	_ "github.com/google/cadvisor/utils/cloudinfo/aws"
+-	_ "github.com/google/cadvisor/utils/cloudinfo/azure"
+ 	_ "github.com/google/cadvisor/utils/cloudinfo/gce"
+
+ 	"github.com/google/cadvisor/cache/memory"