treewide: bump to k8s v1.33.2
Update Kubernetes to 1.33 which is already at patch version 2. As part
of K8s gomod dependencies containerd was bumped a minor release to 2.1.3.
The UserNamespacesSupport feature gate is now default-on and was thus
dropped. The netlink patches were upstreamed and can now be dropped as
part of the depenency update. A new klog sink adapter for our logging
interface was introduced as the client-go MutationCache now requires a
logger.
containerd abuses gRPC interfaces for mocking, thus they are not
forward-compatible and need a new patch to be compatible with the
CRI version now being used.
Change-Id: I4feb2ab3bcfca5b83c7ea38ed444b14ade1e9bf0
Reviewed-on: https://review.monogon.dev/c/monogon/+/4433
Tested-by: Jenkins CI
Reviewed-by: Lorenz Brun <lorenz@monogon.tech>
diff --git a/metropolis/node/kubernetes/BUILD.bazel b/metropolis/node/kubernetes/BUILD.bazel
index d2c3065..530723f 100644
--- a/metropolis/node/kubernetes/BUILD.bazel
+++ b/metropolis/node/kubernetes/BUILD.bazel
@@ -43,6 +43,7 @@
"//osbase/event/memory",
"//osbase/fileargs",
"//osbase/fsquota",
+ "//osbase/logtree",
"//osbase/loop",
"//osbase/net/dns/kubernetes",
"//osbase/supervisor",
diff --git a/metropolis/node/kubernetes/feature_gates.go b/metropolis/node/kubernetes/feature_gates.go
index 5821101..58340e3 100644
--- a/metropolis/node/kubernetes/feature_gates.go
+++ b/metropolis/node/kubernetes/feature_gates.go
@@ -43,6 +43,5 @@
}
var extraFeatureGates = featureGates{
- features.UserNamespacesSupport: true,
features.UserNamespacesPodSecurityStandards: true,
}
diff --git a/metropolis/node/kubernetes/provisioner.go b/metropolis/node/kubernetes/provisioner.go
index cd2ed45..9b5a31c 100644
--- a/metropolis/node/kubernetes/provisioner.go
+++ b/metropolis/node/kubernetes/provisioner.go
@@ -38,6 +38,7 @@
"source.monogon.dev/go/logging"
"source.monogon.dev/metropolis/node/core/localstorage"
"source.monogon.dev/osbase/fsquota"
+ "source.monogon.dev/osbase/logtree"
"source.monogon.dev/osbase/supervisor"
)
@@ -85,6 +86,8 @@
// queue. Queues are being worked on by only one worker to limit load and avoid
// complicated locking infrastructure. Failed items are requeued.
func (p *csiProvisionerServer) Run(ctx context.Context) error {
+ p.logger = supervisor.Logger(ctx)
+
// The recorder is used to log Kubernetes events for successful or failed
// volume provisions. These events then show up in `kubectl describe pvc`
// and can be used by admins to debug issues with this provisioner.
@@ -95,16 +98,16 @@
p.pvcInformer = p.InformerFactory.Core().V1().PersistentVolumeClaims()
p.pvInformer = p.InformerFactory.Core().V1().PersistentVolumes()
p.storageClassInformer = p.InformerFactory.Storage().V1().StorageClasses()
- p.pvcMutationCache = cache.NewIntegerResourceVersionMutationCache(p.pvcInformer.Informer().GetStore(), nil, time.Minute, false)
- p.pvMutationCache = cache.NewIntegerResourceVersionMutationCache(p.pvInformer.Informer().GetStore(), nil, time.Minute, false)
+
+ klogger := logtree.NewKlogLogger(p.logger)
+ p.pvcMutationCache = cache.NewIntegerResourceVersionMutationCache(klogger, p.pvcInformer.Informer().GetStore(), nil, time.Minute, false)
+ p.pvMutationCache = cache.NewIntegerResourceVersionMutationCache(klogger, p.pvInformer.Informer().GetStore(), nil, time.Minute, false)
p.claimQueue = workqueue.NewTypedDelayingQueue[string]()
p.claimRateLimiter = workqueue.NewTypedItemExponentialFailureRateLimiter[string](time.Second, 5*time.Minute)
p.claimNextTry = make(map[string]time.Time)
p.pvQueue = workqueue.NewTypedRateLimitingQueue(workqueue.DefaultTypedControllerRateLimiter[string]())
- p.logger = supervisor.Logger(ctx)
-
p.pvcInformer.Informer().SetWatchErrorHandler(func(_ *cache.Reflector, err error) {
p.logger.Errorf("pvcInformer watch error: %v", err)
})