blob: 38de7df5b18ea82c8a021727a48d68e67fbbf499 [file] [log] [blame]
Lorenz Brunb15abad2020-04-16 11:17:12 +02001// Copyright 2020 The Monogon Project Authors.
2//
3// SPDX-License-Identifier: Apache-2.0
4//
5// Licensed under the Apache License, Version 2.0 (the "License");
6// you may not use this file except in compliance with the License.
7// You may obtain a copy of the License at
8//
9// http://www.apache.org/licenses/LICENSE-2.0
10//
11// Unless required by applicable law or agreed to in writing, software
12// distributed under the License is distributed on an "AS IS" BASIS,
13// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14// See the License for the specific language governing permissions and
15// limitations under the License.
16
17package kubernetes
18
19import (
20 "context"
21 "errors"
22 "fmt"
Lorenz Brunb15abad2020-04-16 11:17:12 +020023 "os"
24 "path/filepath"
25
Lorenz Brun37050122021-03-30 14:00:27 +020026 "golang.org/x/sys/unix"
Lorenz Brunb15abad2020-04-16 11:17:12 +020027 v1 "k8s.io/api/core/v1"
28 storagev1 "k8s.io/api/storage/v1"
29 apierrs "k8s.io/apimachinery/pkg/api/errors"
30 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
31 "k8s.io/client-go/informers"
32 coreinformers "k8s.io/client-go/informers/core/v1"
33 storageinformers "k8s.io/client-go/informers/storage/v1"
34 "k8s.io/client-go/kubernetes"
35 "k8s.io/client-go/kubernetes/scheme"
36 typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
37 "k8s.io/client-go/tools/cache"
38 "k8s.io/client-go/tools/record"
39 ref "k8s.io/client-go/tools/reference"
40 "k8s.io/client-go/util/workqueue"
Serge Bazanskic2c7ad92020-07-13 17:20:09 +020041
Serge Bazanski31370b02021-01-07 16:31:14 +010042 "source.monogon.dev/metropolis/node/core/localstorage"
Serge Bazanski31370b02021-01-07 16:31:14 +010043 "source.monogon.dev/metropolis/pkg/fsquota"
Lorenz Brun37050122021-03-30 14:00:27 +020044 "source.monogon.dev/metropolis/pkg/logtree"
Serge Bazanski31370b02021-01-07 16:31:14 +010045 "source.monogon.dev/metropolis/pkg/supervisor"
Lorenz Brunb15abad2020-04-16 11:17:12 +020046)
47
Serge Bazanski216fe7b2021-05-21 18:36:16 +020048// ONCHANGE(//metropolis/node/kubernetes/reconciler:resources_csi.go): needs to
49// match csiProvisionerServerName declared.
Serge Bazanski662b5b32020-12-21 13:49:00 +010050const csiProvisionerServerName = "dev.monogon.metropolis.vfs"
Lorenz Brunb15abad2020-04-16 11:17:12 +020051
Serge Bazanski216fe7b2021-05-21 18:36:16 +020052// csiProvisionerServer is responsible for the provisioning and deprovisioning
53// of CSI-based container volumes. It runs on all nodes and watches PVCs for
54// ones assigned to the node it's running on and fulfills the provisioning
55// request by creating a directory, applying a quota and creating the
56// corresponding PV. When the PV is released and its retention policy is
57// Delete, the directory and the PV resource are deleted.
Serge Bazanskic2c7ad92020-07-13 17:20:09 +020058type csiProvisionerServer struct {
59 NodeName string
60 Kubernetes kubernetes.Interface
61 InformerFactory informers.SharedInformerFactory
62 VolumesDirectory *localstorage.DataVolumesDirectory
63
Lorenz Brunb15abad2020-04-16 11:17:12 +020064 claimQueue workqueue.RateLimitingInterface
65 pvQueue workqueue.RateLimitingInterface
66 recorder record.EventRecorder
67 pvcInformer coreinformers.PersistentVolumeClaimInformer
68 pvInformer coreinformers.PersistentVolumeInformer
69 storageClassInformer storageinformers.StorageClassInformer
Serge Bazanskic7359672020-10-30 16:38:57 +010070 logger logtree.LeveledLogger
Lorenz Brunb15abad2020-04-16 11:17:12 +020071}
72
Serge Bazanski216fe7b2021-05-21 18:36:16 +020073// runCSIProvisioner runs the main provisioning machinery. It consists of a
74// bunch of informers which keep track of the events happening on the
75// Kubernetes control plane and informs us when something happens. If anything
76// happens to PVCs or PVs, we enqueue the identifier of that resource in a work
77// queue. Queues are being worked on by only one worker to limit load and avoid
78// complicated locking infrastructure. Failed items are requeued.
Serge Bazanskic2c7ad92020-07-13 17:20:09 +020079func (p *csiProvisionerServer) Run(ctx context.Context) error {
Serge Bazanski216fe7b2021-05-21 18:36:16 +020080 // The recorder is used to log Kubernetes events for successful or failed
81 // volume provisions. These events then show up in `kubectl describe pvc`
82 // and can be used by admins to debug issues with this provisioner.
Serge Bazanskic2c7ad92020-07-13 17:20:09 +020083 eventBroadcaster := record.NewBroadcaster()
84 eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: p.Kubernetes.CoreV1().Events("")})
85 p.recorder = eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: csiProvisionerServerName, Host: p.NodeName})
Lorenz Brunb15abad2020-04-16 11:17:12 +020086
Serge Bazanskic2c7ad92020-07-13 17:20:09 +020087 p.pvInformer = p.InformerFactory.Core().V1().PersistentVolumes()
88 p.pvcInformer = p.InformerFactory.Core().V1().PersistentVolumeClaims()
89 p.storageClassInformer = p.InformerFactory.Storage().V1().StorageClasses()
Lorenz Brunb15abad2020-04-16 11:17:12 +020090
Serge Bazanskic2c7ad92020-07-13 17:20:09 +020091 p.claimQueue = workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())
92 p.pvQueue = workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())
Lorenz Brunb15abad2020-04-16 11:17:12 +020093
Serge Bazanskice19acc2023-03-21 16:28:07 +010094 p.logger = supervisor.Logger(ctx)
95
96 p.pvcInformer.Informer().SetWatchErrorHandler(func(_ *cache.Reflector, err error) {
97 p.logger.Errorf("pvcInformer watch error: %v", err)
98 })
Serge Bazanskic2c7ad92020-07-13 17:20:09 +020099 p.pvcInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
100 AddFunc: p.enqueueClaim,
101 UpdateFunc: func(old, new interface{}) {
102 p.enqueueClaim(new)
103 },
104 })
105 p.pvInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
106 AddFunc: p.enqueuePV,
107 UpdateFunc: func(old, new interface{}) {
108 p.enqueuePV(new)
109 },
110 })
Serge Bazanskice19acc2023-03-21 16:28:07 +0100111 p.pvInformer.Informer().SetWatchErrorHandler(func(_ *cache.Reflector, err error) {
112 p.logger.Errorf("pvInformer watch error: %v", err)
113 })
114
115 p.storageClassInformer.Informer().SetWatchErrorHandler(func(_ *cache.Reflector, err error) {
116 p.logger.Errorf("storageClassInformer watch error: %v", err)
117 })
Lorenz Brunb15abad2020-04-16 11:17:12 +0200118
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200119 go p.pvcInformer.Informer().Run(ctx.Done())
120 go p.pvInformer.Informer().Run(ctx.Done())
121 go p.storageClassInformer.Informer().Run(ctx.Done())
Lorenz Brunb15abad2020-04-16 11:17:12 +0200122
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200123 // These will self-terminate once the queues are shut down
124 go p.processQueueItems(p.claimQueue, func(key string) error {
125 return p.processPVC(key)
126 })
127 go p.processQueueItems(p.pvQueue, func(key string) error {
128 return p.processPV(key)
129 })
Lorenz Brunb15abad2020-04-16 11:17:12 +0200130
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200131 supervisor.Signal(ctx, supervisor.SignalHealthy)
132 <-ctx.Done()
133 p.claimQueue.ShutDown()
134 p.pvQueue.ShutDown()
135 return nil
Lorenz Brunb15abad2020-04-16 11:17:12 +0200136}
137
Serge Bazanski216fe7b2021-05-21 18:36:16 +0200138// isOurPVC checks if the given PVC is is to be provisioned by this provisioner
139// and has been scheduled onto this node
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200140func (p *csiProvisionerServer) isOurPVC(pvc *v1.PersistentVolumeClaim) bool {
141 if pvc.ObjectMeta.Annotations["volume.beta.kubernetes.io/storage-provisioner"] != csiProvisionerServerName {
142 return false
143 }
144 if pvc.ObjectMeta.Annotations["volume.kubernetes.io/selected-node"] != p.NodeName {
145 return false
146 }
147 return true
Lorenz Brunb15abad2020-04-16 11:17:12 +0200148}
149
Serge Bazanski216fe7b2021-05-21 18:36:16 +0200150// isOurPV checks if the given PV has been provisioned by this provisioner and
151// has been scheduled onto this node
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200152func (p *csiProvisionerServer) isOurPV(pv *v1.PersistentVolume) bool {
153 if pv.ObjectMeta.Annotations["pv.kubernetes.io/provisioned-by"] != csiProvisionerServerName {
154 return false
155 }
156 if pv.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions[0].Values[0] != p.NodeName {
157 return false
158 }
159 return true
Lorenz Brunb15abad2020-04-16 11:17:12 +0200160}
161
162// enqueueClaim adds an added/changed PVC to the work queue
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200163func (p *csiProvisionerServer) enqueueClaim(obj interface{}) {
Lorenz Brunb15abad2020-04-16 11:17:12 +0200164 key, err := cache.MetaNamespaceKeyFunc(obj)
165 if err != nil {
Serge Bazanskic7359672020-10-30 16:38:57 +0100166 p.logger.Errorf("Not queuing PVC because key could not be derived: %v", err)
Lorenz Brunb15abad2020-04-16 11:17:12 +0200167 return
168 }
169 p.claimQueue.Add(key)
170}
171
172// enqueuePV adds an added/changed PV to the work queue
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200173func (p *csiProvisionerServer) enqueuePV(obj interface{}) {
Lorenz Brunb15abad2020-04-16 11:17:12 +0200174 key, err := cache.MetaNamespaceKeyFunc(obj)
175 if err != nil {
Serge Bazanskic7359672020-10-30 16:38:57 +0100176 p.logger.Errorf("Not queuing PV because key could not be derived: %v", err)
Lorenz Brunb15abad2020-04-16 11:17:12 +0200177 return
178 }
179 p.pvQueue.Add(key)
180}
181
Serge Bazanski216fe7b2021-05-21 18:36:16 +0200182// processQueueItems gets items from the given work queue and calls the process
183// function for each of them. It self- terminates once the queue is shut down.
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200184func (p *csiProvisionerServer) processQueueItems(queue workqueue.RateLimitingInterface, process func(key string) error) {
Lorenz Brunb15abad2020-04-16 11:17:12 +0200185 for {
186 obj, shutdown := queue.Get()
187 if shutdown {
188 return
189 }
190
191 func(obj interface{}) {
192 defer queue.Done(obj)
193 key, ok := obj.(string)
194 if !ok {
195 queue.Forget(obj)
Serge Bazanskic7359672020-10-30 16:38:57 +0100196 p.logger.Errorf("Expected string in workqueue, got %+v", obj)
Lorenz Brunb15abad2020-04-16 11:17:12 +0200197 return
198 }
199
200 if err := process(key); err != nil {
Serge Bazanskic7359672020-10-30 16:38:57 +0100201 p.logger.Warningf("Failed processing item %q, requeueing (numrequeues: %d): %v", key, queue.NumRequeues(obj), err)
Lorenz Brunb15abad2020-04-16 11:17:12 +0200202 queue.AddRateLimited(obj)
203 }
204
205 queue.Forget(obj)
206 }(obj)
207 }
208}
209
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200210// volumePath gets the path where the volume is stored.
211func (p *csiProvisionerServer) volumePath(volumeID string) string {
212 return filepath.Join(p.VolumesDirectory.FullPath(), volumeID)
Lorenz Brunb15abad2020-04-16 11:17:12 +0200213}
214
Serge Bazanski216fe7b2021-05-21 18:36:16 +0200215// processPVC looks at a single PVC item from the queue, determines if it needs
216// to be provisioned and logs the provisioning result to the recorder
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200217func (p *csiProvisionerServer) processPVC(key string) error {
Lorenz Brunb15abad2020-04-16 11:17:12 +0200218 namespace, name, err := cache.SplitMetaNamespaceKey(key)
219 if err != nil {
220 return fmt.Errorf("invalid resource key: %s", key)
221 }
222 pvc, err := p.pvcInformer.Lister().PersistentVolumeClaims(namespace).Get(name)
223 if apierrs.IsNotFound(err) {
224 return nil // nothing to do, no error
225 } else if err != nil {
226 return fmt.Errorf("failed to get PVC for processing: %w", err)
227 }
228
229 if !p.isOurPVC(pvc) {
230 return nil
231 }
232
233 if pvc.Status.Phase != "Pending" {
234 // If the PVC is not pending, we don't need to provision anything
235 return nil
236 }
237
238 storageClass, err := p.storageClassInformer.Lister().Get(*pvc.Spec.StorageClassName)
239 if err != nil {
Serge Bazanskice19acc2023-03-21 16:28:07 +0100240 return fmt.Errorf("could not get storage class: %w", err)
Lorenz Brunb15abad2020-04-16 11:17:12 +0200241 }
242
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200243 if storageClass.Provisioner != csiProvisionerServerName {
Serge Bazanski216fe7b2021-05-21 18:36:16 +0200244 // We're not responsible for this PVC. Can only happen if
245 // controller-manager makes a mistake setting the annotations, but
246 // we're bailing here anyways for safety.
Lorenz Brunb15abad2020-04-16 11:17:12 +0200247 return nil
248 }
249
250 err = p.provisionPVC(pvc, storageClass)
251
252 if err != nil {
253 p.recorder.Eventf(pvc, v1.EventTypeWarning, "ProvisioningFailed", "Failed to provision PV: %v", err)
254 return err
255 }
256 p.recorder.Eventf(pvc, v1.EventTypeNormal, "Provisioned", "Successfully provisioned PV")
257
258 return nil
259}
260
Serge Bazanski216fe7b2021-05-21 18:36:16 +0200261// provisionPVC creates the directory where the volume lives, sets a quota for
262// the requested amount of storage and creates the PV object representing this
263// new volume
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200264func (p *csiProvisionerServer) provisionPVC(pvc *v1.PersistentVolumeClaim, storageClass *storagev1.StorageClass) error {
Lorenz Brunb15abad2020-04-16 11:17:12 +0200265 claimRef, err := ref.GetReference(scheme.Scheme, pvc)
266 if err != nil {
267 return fmt.Errorf("failed to get reference to PVC: %w", err)
268 }
269
270 storageReq := pvc.Spec.Resources.Requests[v1.ResourceStorage]
271 if storageReq.IsZero() {
272 return fmt.Errorf("PVC is not requesting any storage, this is not supported")
273 }
274 capacity, ok := storageReq.AsInt64()
275 if !ok {
276 return fmt.Errorf("PVC requesting more than 2^63 bytes of storage, this is not supported")
277 }
278
Lorenz Brunb15abad2020-04-16 11:17:12 +0200279 volumeID := "pvc-" + string(pvc.ObjectMeta.UID)
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200280 volumePath := p.volumePath(volumeID)
Lorenz Brunb15abad2020-04-16 11:17:12 +0200281
Serge Bazanskic7359672020-10-30 16:38:57 +0100282 p.logger.Infof("Creating local PV %s", volumeID)
Lorenz Brun37050122021-03-30 14:00:27 +0200283
284 switch *pvc.Spec.VolumeMode {
285 case "", v1.PersistentVolumeFilesystem:
286 if err := os.Mkdir(volumePath, 0644); err != nil && !os.IsExist(err) {
287 return fmt.Errorf("failed to create volume directory: %w", err)
288 }
Lorenz Brun764a2de2021-11-22 16:26:36 +0100289 files, err := os.ReadDir(volumePath)
Lorenz Brun37050122021-03-30 14:00:27 +0200290 if err != nil {
291 return fmt.Errorf("failed to list files in newly-created volume: %w", err)
292 }
293 if len(files) > 0 {
294 return errors.New("newly-created volume already contains data, bailing")
295 }
296 if err := fsquota.SetQuota(volumePath, uint64(capacity), 100000); err != nil {
Serge Bazanskice19acc2023-03-21 16:28:07 +0100297 return fmt.Errorf("failed to update quota: %w", err)
Lorenz Brun37050122021-03-30 14:00:27 +0200298 }
299 case v1.PersistentVolumeBlock:
300 imageFile, err := os.OpenFile(volumePath, os.O_CREATE|os.O_RDWR, 0644)
301 if err != nil {
302 return fmt.Errorf("failed to create volume image: %w", err)
303 }
304 defer imageFile.Close()
305 if err := unix.Fallocate(int(imageFile.Fd()), 0, 0, capacity); err != nil {
306 return fmt.Errorf("failed to fallocate() volume image: %w", err)
307 }
308 default:
309 return fmt.Errorf("VolumeMode \"%s\" is unsupported", *pvc.Spec.VolumeMode)
Lorenz Brunb15abad2020-04-16 11:17:12 +0200310 }
311
312 vol := &v1.PersistentVolume{
313 ObjectMeta: metav1.ObjectMeta{
314 Name: volumeID,
315 Annotations: map[string]string{
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200316 "pv.kubernetes.io/provisioned-by": csiProvisionerServerName},
Lorenz Brunb15abad2020-04-16 11:17:12 +0200317 },
318 Spec: v1.PersistentVolumeSpec{
319 AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
320 Capacity: v1.ResourceList{
321 v1.ResourceStorage: storageReq, // We're always giving the exact amount
322 },
323 PersistentVolumeSource: v1.PersistentVolumeSource{
324 CSI: &v1.CSIPersistentVolumeSource{
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200325 Driver: csiProvisionerServerName,
Lorenz Brunb15abad2020-04-16 11:17:12 +0200326 VolumeHandle: volumeID,
327 },
328 },
Lorenz Brun37050122021-03-30 14:00:27 +0200329 ClaimRef: claimRef,
330 VolumeMode: pvc.Spec.VolumeMode,
Lorenz Brunb15abad2020-04-16 11:17:12 +0200331 NodeAffinity: &v1.VolumeNodeAffinity{
332 Required: &v1.NodeSelector{
333 NodeSelectorTerms: []v1.NodeSelectorTerm{
334 {
335 MatchExpressions: []v1.NodeSelectorRequirement{
336 {
337 Key: "kubernetes.io/hostname",
338 Operator: v1.NodeSelectorOpIn,
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200339 Values: []string{p.NodeName},
Lorenz Brunb15abad2020-04-16 11:17:12 +0200340 },
341 },
342 },
343 },
344 },
345 },
346 StorageClassName: *pvc.Spec.StorageClassName,
347 PersistentVolumeReclaimPolicy: *storageClass.ReclaimPolicy,
348 },
349 }
350
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200351 _, err = p.Kubernetes.CoreV1().PersistentVolumes().Create(context.Background(), vol, metav1.CreateOptions{})
352 if err != nil && !apierrs.IsAlreadyExists(err) {
Lorenz Brunb15abad2020-04-16 11:17:12 +0200353 return fmt.Errorf("failed to create PV object: %w", err)
354 }
355 return nil
356}
357
Serge Bazanski216fe7b2021-05-21 18:36:16 +0200358// processPV looks at a single PV item from the queue and checks if it has been
359// released and needs to be deleted. If yes it deletes the associated quota,
360// directory and the PV object and logs the result to the recorder.
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200361func (p *csiProvisionerServer) processPV(key string) error {
Lorenz Brunb15abad2020-04-16 11:17:12 +0200362 _, name, err := cache.SplitMetaNamespaceKey(key)
363 if err != nil {
364 return fmt.Errorf("invalid resource key: %s", key)
365 }
366 pv, err := p.pvInformer.Lister().Get(name)
367 if apierrs.IsNotFound(err) {
368 return nil // nothing to do, no error
369 } else if err != nil {
370 return fmt.Errorf("failed to get PV for processing: %w", err)
371 }
372
373 if !p.isOurPV(pv) {
374 return nil
375 }
376 if pv.Spec.PersistentVolumeReclaimPolicy != v1.PersistentVolumeReclaimDelete || pv.Status.Phase != "Released" {
377 return nil
378 }
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200379 volumePath := p.volumePath(pv.Spec.CSI.VolumeHandle)
Lorenz Brunb15abad2020-04-16 11:17:12 +0200380
381 // Log deletes for auditing purposes
Serge Bazanskic7359672020-10-30 16:38:57 +0100382 p.logger.Infof("Deleting persistent volume %s", pv.Spec.CSI.VolumeHandle)
Lorenz Brun37050122021-03-30 14:00:27 +0200383 switch *pv.Spec.VolumeMode {
384 case "", v1.PersistentVolumeFilesystem:
385 if err := fsquota.SetQuota(volumePath, 0, 0); err != nil {
Serge Bazanski216fe7b2021-05-21 18:36:16 +0200386 // We record these here manually since a successful deletion
387 // removes the PV we'd be attaching them to.
Lorenz Brun37050122021-03-30 14:00:27 +0200388 p.recorder.Eventf(pv, v1.EventTypeWarning, "DeprovisioningFailed", "Failed to remove quota: %v", err)
389 return fmt.Errorf("failed to remove quota: %w", err)
390 }
391 if err := os.RemoveAll(volumePath); err != nil && !os.IsNotExist(err) {
392 p.recorder.Eventf(pv, v1.EventTypeWarning, "DeprovisioningFailed", "Failed to delete volume: %v", err)
393 return fmt.Errorf("failed to delete volume: %w", err)
394 }
395 case v1.PersistentVolumeBlock:
396 if err := os.Remove(volumePath); err != nil && !os.IsNotExist(err) {
397 p.recorder.Eventf(pv, v1.EventTypeWarning, "DeprovisioningFailed", "Failed to delete volume: %v", err)
398 return fmt.Errorf("failed to delete volume: %w", err)
399 }
400 default:
401 p.recorder.Eventf(pv, v1.EventTypeWarning, "DeprovisioningFailed", "Invalid volume mode \"%v\"", *pv.Spec.VolumeMode)
402 return fmt.Errorf("invalid volume mode \"%v\"", *pv.Spec.VolumeMode)
Lorenz Brunb15abad2020-04-16 11:17:12 +0200403 }
404
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200405 err = p.Kubernetes.CoreV1().PersistentVolumes().Delete(context.Background(), pv.Name, metav1.DeleteOptions{})
Lorenz Brunb15abad2020-04-16 11:17:12 +0200406 if err != nil && !apierrs.IsNotFound(err) {
407 p.recorder.Eventf(pv, v1.EventTypeWarning, "DeprovisioningFailed", "Failed to delete PV object from K8s API: %v", err)
408 return fmt.Errorf("failed to delete PV object: %w", err)
409 }
410 return nil
411}