blob: 92caa592d8e5420616a16384402456d307cd96da [file] [log] [blame]
Tim Windelschmidt6d33a432025-02-04 14:34:25 +01001// Copyright The Monogon Project Authors.
Lorenz Brunb15abad2020-04-16 11:17:12 +02002// SPDX-License-Identifier: Apache-2.0
Lorenz Brunb15abad2020-04-16 11:17:12 +02003
4package kubernetes
5
6import (
7 "context"
8 "errors"
9 "fmt"
Lorenz Brunb15abad2020-04-16 11:17:12 +020010 "os"
11 "path/filepath"
12
Lorenz Brun37050122021-03-30 14:00:27 +020013 "golang.org/x/sys/unix"
Lorenz Brunb15abad2020-04-16 11:17:12 +020014 v1 "k8s.io/api/core/v1"
15 storagev1 "k8s.io/api/storage/v1"
16 apierrs "k8s.io/apimachinery/pkg/api/errors"
17 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
18 "k8s.io/client-go/informers"
19 coreinformers "k8s.io/client-go/informers/core/v1"
20 storageinformers "k8s.io/client-go/informers/storage/v1"
21 "k8s.io/client-go/kubernetes"
22 "k8s.io/client-go/kubernetes/scheme"
23 typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
24 "k8s.io/client-go/tools/cache"
25 "k8s.io/client-go/tools/record"
26 ref "k8s.io/client-go/tools/reference"
27 "k8s.io/client-go/util/workqueue"
Serge Bazanskic2c7ad92020-07-13 17:20:09 +020028
Serge Bazanski3c5d0632024-09-12 10:49:12 +000029 "source.monogon.dev/go/logging"
Serge Bazanski31370b02021-01-07 16:31:14 +010030 "source.monogon.dev/metropolis/node/core/localstorage"
Tim Windelschmidt9f21f532024-05-07 15:14:20 +020031 "source.monogon.dev/osbase/fsquota"
Tim Windelschmidt9f21f532024-05-07 15:14:20 +020032 "source.monogon.dev/osbase/supervisor"
Lorenz Brunb15abad2020-04-16 11:17:12 +020033)
34
Lorenz Brun397f7ea2024-08-20 21:26:06 +020035// inodeCapacityRatio describes the ratio between the byte capacity of a volume
36// and its inode capacity. One inode on XFS is 512 bytes and by default 25%
37// (1/4) of capacity can be used for metadata.
38const inodeCapacityRatio = 4 * 512
39
Serge Bazanski216fe7b2021-05-21 18:36:16 +020040// ONCHANGE(//metropolis/node/kubernetes/reconciler:resources_csi.go): needs to
41// match csiProvisionerServerName declared.
Serge Bazanski662b5b32020-12-21 13:49:00 +010042const csiProvisionerServerName = "dev.monogon.metropolis.vfs"
Lorenz Brunb15abad2020-04-16 11:17:12 +020043
Serge Bazanski216fe7b2021-05-21 18:36:16 +020044// csiProvisionerServer is responsible for the provisioning and deprovisioning
45// of CSI-based container volumes. It runs on all nodes and watches PVCs for
46// ones assigned to the node it's running on and fulfills the provisioning
47// request by creating a directory, applying a quota and creating the
48// corresponding PV. When the PV is released and its retention policy is
49// Delete, the directory and the PV resource are deleted.
Serge Bazanskic2c7ad92020-07-13 17:20:09 +020050type csiProvisionerServer struct {
51 NodeName string
52 Kubernetes kubernetes.Interface
53 InformerFactory informers.SharedInformerFactory
54 VolumesDirectory *localstorage.DataVolumesDirectory
55
Jan Schär896b1382025-01-15 13:54:26 +010056 claimQueue workqueue.TypedRateLimitingInterface[string]
57 pvQueue workqueue.TypedRateLimitingInterface[string]
Lorenz Brunb15abad2020-04-16 11:17:12 +020058 recorder record.EventRecorder
59 pvcInformer coreinformers.PersistentVolumeClaimInformer
60 pvInformer coreinformers.PersistentVolumeInformer
61 storageClassInformer storageinformers.StorageClassInformer
Serge Bazanski3c5d0632024-09-12 10:49:12 +000062 logger logging.Leveled
Lorenz Brunb15abad2020-04-16 11:17:12 +020063}
64
Serge Bazanski216fe7b2021-05-21 18:36:16 +020065// runCSIProvisioner runs the main provisioning machinery. It consists of a
66// bunch of informers which keep track of the events happening on the
67// Kubernetes control plane and informs us when something happens. If anything
68// happens to PVCs or PVs, we enqueue the identifier of that resource in a work
69// queue. Queues are being worked on by only one worker to limit load and avoid
70// complicated locking infrastructure. Failed items are requeued.
Serge Bazanskic2c7ad92020-07-13 17:20:09 +020071func (p *csiProvisionerServer) Run(ctx context.Context) error {
Serge Bazanski216fe7b2021-05-21 18:36:16 +020072 // The recorder is used to log Kubernetes events for successful or failed
73 // volume provisions. These events then show up in `kubectl describe pvc`
74 // and can be used by admins to debug issues with this provisioner.
Serge Bazanskic2c7ad92020-07-13 17:20:09 +020075 eventBroadcaster := record.NewBroadcaster()
76 eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: p.Kubernetes.CoreV1().Events("")})
77 p.recorder = eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: csiProvisionerServerName, Host: p.NodeName})
Lorenz Brunb15abad2020-04-16 11:17:12 +020078
Serge Bazanskic2c7ad92020-07-13 17:20:09 +020079 p.pvInformer = p.InformerFactory.Core().V1().PersistentVolumes()
80 p.pvcInformer = p.InformerFactory.Core().V1().PersistentVolumeClaims()
81 p.storageClassInformer = p.InformerFactory.Storage().V1().StorageClasses()
Lorenz Brunb15abad2020-04-16 11:17:12 +020082
Jan Schär896b1382025-01-15 13:54:26 +010083 p.claimQueue = workqueue.NewTypedRateLimitingQueue(workqueue.DefaultTypedControllerRateLimiter[string]())
84 p.pvQueue = workqueue.NewTypedRateLimitingQueue(workqueue.DefaultTypedControllerRateLimiter[string]())
Lorenz Brunb15abad2020-04-16 11:17:12 +020085
Serge Bazanskice19acc2023-03-21 16:28:07 +010086 p.logger = supervisor.Logger(ctx)
87
88 p.pvcInformer.Informer().SetWatchErrorHandler(func(_ *cache.Reflector, err error) {
89 p.logger.Errorf("pvcInformer watch error: %v", err)
90 })
Serge Bazanskic2c7ad92020-07-13 17:20:09 +020091 p.pvcInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
92 AddFunc: p.enqueueClaim,
93 UpdateFunc: func(old, new interface{}) {
94 p.enqueueClaim(new)
95 },
96 })
97 p.pvInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
98 AddFunc: p.enqueuePV,
99 UpdateFunc: func(old, new interface{}) {
100 p.enqueuePV(new)
101 },
102 })
Serge Bazanskice19acc2023-03-21 16:28:07 +0100103 p.pvInformer.Informer().SetWatchErrorHandler(func(_ *cache.Reflector, err error) {
104 p.logger.Errorf("pvInformer watch error: %v", err)
105 })
106
107 p.storageClassInformer.Informer().SetWatchErrorHandler(func(_ *cache.Reflector, err error) {
108 p.logger.Errorf("storageClassInformer watch error: %v", err)
109 })
Lorenz Brunb15abad2020-04-16 11:17:12 +0200110
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200111 go p.pvcInformer.Informer().Run(ctx.Done())
112 go p.pvInformer.Informer().Run(ctx.Done())
113 go p.storageClassInformer.Informer().Run(ctx.Done())
Lorenz Brunb15abad2020-04-16 11:17:12 +0200114
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200115 // These will self-terminate once the queues are shut down
116 go p.processQueueItems(p.claimQueue, func(key string) error {
117 return p.processPVC(key)
118 })
119 go p.processQueueItems(p.pvQueue, func(key string) error {
120 return p.processPV(key)
121 })
Lorenz Brunb15abad2020-04-16 11:17:12 +0200122
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200123 supervisor.Signal(ctx, supervisor.SignalHealthy)
124 <-ctx.Done()
125 p.claimQueue.ShutDown()
126 p.pvQueue.ShutDown()
127 return nil
Lorenz Brunb15abad2020-04-16 11:17:12 +0200128}
129
Serge Bazanski216fe7b2021-05-21 18:36:16 +0200130// isOurPVC checks if the given PVC is is to be provisioned by this provisioner
131// and has been scheduled onto this node
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200132func (p *csiProvisionerServer) isOurPVC(pvc *v1.PersistentVolumeClaim) bool {
133 if pvc.ObjectMeta.Annotations["volume.beta.kubernetes.io/storage-provisioner"] != csiProvisionerServerName {
134 return false
135 }
136 if pvc.ObjectMeta.Annotations["volume.kubernetes.io/selected-node"] != p.NodeName {
137 return false
138 }
139 return true
Lorenz Brunb15abad2020-04-16 11:17:12 +0200140}
141
Serge Bazanski216fe7b2021-05-21 18:36:16 +0200142// isOurPV checks if the given PV has been provisioned by this provisioner and
143// has been scheduled onto this node
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200144func (p *csiProvisionerServer) isOurPV(pv *v1.PersistentVolume) bool {
145 if pv.ObjectMeta.Annotations["pv.kubernetes.io/provisioned-by"] != csiProvisionerServerName {
146 return false
147 }
148 if pv.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions[0].Values[0] != p.NodeName {
149 return false
150 }
151 return true
Lorenz Brunb15abad2020-04-16 11:17:12 +0200152}
153
154// enqueueClaim adds an added/changed PVC to the work queue
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200155func (p *csiProvisionerServer) enqueueClaim(obj interface{}) {
Lorenz Brunb15abad2020-04-16 11:17:12 +0200156 key, err := cache.MetaNamespaceKeyFunc(obj)
157 if err != nil {
Serge Bazanskic7359672020-10-30 16:38:57 +0100158 p.logger.Errorf("Not queuing PVC because key could not be derived: %v", err)
Lorenz Brunb15abad2020-04-16 11:17:12 +0200159 return
160 }
161 p.claimQueue.Add(key)
162}
163
164// enqueuePV adds an added/changed PV to the work queue
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200165func (p *csiProvisionerServer) enqueuePV(obj interface{}) {
Lorenz Brunb15abad2020-04-16 11:17:12 +0200166 key, err := cache.MetaNamespaceKeyFunc(obj)
167 if err != nil {
Serge Bazanskic7359672020-10-30 16:38:57 +0100168 p.logger.Errorf("Not queuing PV because key could not be derived: %v", err)
Lorenz Brunb15abad2020-04-16 11:17:12 +0200169 return
170 }
171 p.pvQueue.Add(key)
172}
173
Serge Bazanski216fe7b2021-05-21 18:36:16 +0200174// processQueueItems gets items from the given work queue and calls the process
175// function for each of them. It self- terminates once the queue is shut down.
Jan Schär896b1382025-01-15 13:54:26 +0100176func (p *csiProvisionerServer) processQueueItems(queue workqueue.TypedRateLimitingInterface[string], process func(key string) error) {
Lorenz Brunb15abad2020-04-16 11:17:12 +0200177 for {
178 obj, shutdown := queue.Get()
179 if shutdown {
180 return
181 }
182
Jan Schär896b1382025-01-15 13:54:26 +0100183 func(obj string) {
Lorenz Brunb15abad2020-04-16 11:17:12 +0200184 defer queue.Done(obj)
Lorenz Brunb15abad2020-04-16 11:17:12 +0200185
Jan Schär896b1382025-01-15 13:54:26 +0100186 if err := process(obj); err != nil {
187 p.logger.Warningf("Failed processing item %q, requeueing (numrequeues: %d): %v", obj, queue.NumRequeues(obj), err)
Lorenz Brunb15abad2020-04-16 11:17:12 +0200188 queue.AddRateLimited(obj)
Jan Schär1947e9b2025-01-16 16:45:03 +0100189 } else {
190 queue.Forget(obj)
Lorenz Brunb15abad2020-04-16 11:17:12 +0200191 }
Lorenz Brunb15abad2020-04-16 11:17:12 +0200192 }(obj)
193 }
194}
195
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200196// volumePath gets the path where the volume is stored.
197func (p *csiProvisionerServer) volumePath(volumeID string) string {
198 return filepath.Join(p.VolumesDirectory.FullPath(), volumeID)
Lorenz Brunb15abad2020-04-16 11:17:12 +0200199}
200
Serge Bazanski216fe7b2021-05-21 18:36:16 +0200201// processPVC looks at a single PVC item from the queue, determines if it needs
202// to be provisioned and logs the provisioning result to the recorder
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200203func (p *csiProvisionerServer) processPVC(key string) error {
Lorenz Brunb15abad2020-04-16 11:17:12 +0200204 namespace, name, err := cache.SplitMetaNamespaceKey(key)
205 if err != nil {
206 return fmt.Errorf("invalid resource key: %s", key)
207 }
208 pvc, err := p.pvcInformer.Lister().PersistentVolumeClaims(namespace).Get(name)
209 if apierrs.IsNotFound(err) {
210 return nil // nothing to do, no error
211 } else if err != nil {
212 return fmt.Errorf("failed to get PVC for processing: %w", err)
213 }
214
215 if !p.isOurPVC(pvc) {
216 return nil
217 }
218
219 if pvc.Status.Phase != "Pending" {
220 // If the PVC is not pending, we don't need to provision anything
221 return nil
222 }
223
224 storageClass, err := p.storageClassInformer.Lister().Get(*pvc.Spec.StorageClassName)
225 if err != nil {
Serge Bazanskice19acc2023-03-21 16:28:07 +0100226 return fmt.Errorf("could not get storage class: %w", err)
Lorenz Brunb15abad2020-04-16 11:17:12 +0200227 }
228
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200229 if storageClass.Provisioner != csiProvisionerServerName {
Serge Bazanski216fe7b2021-05-21 18:36:16 +0200230 // We're not responsible for this PVC. Can only happen if
231 // controller-manager makes a mistake setting the annotations, but
232 // we're bailing here anyways for safety.
Lorenz Brunb15abad2020-04-16 11:17:12 +0200233 return nil
234 }
235
236 err = p.provisionPVC(pvc, storageClass)
237
238 if err != nil {
239 p.recorder.Eventf(pvc, v1.EventTypeWarning, "ProvisioningFailed", "Failed to provision PV: %v", err)
240 return err
241 }
242 p.recorder.Eventf(pvc, v1.EventTypeNormal, "Provisioned", "Successfully provisioned PV")
243
244 return nil
245}
246
Serge Bazanski216fe7b2021-05-21 18:36:16 +0200247// provisionPVC creates the directory where the volume lives, sets a quota for
248// the requested amount of storage and creates the PV object representing this
249// new volume
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200250func (p *csiProvisionerServer) provisionPVC(pvc *v1.PersistentVolumeClaim, storageClass *storagev1.StorageClass) error {
Lorenz Brunb15abad2020-04-16 11:17:12 +0200251 claimRef, err := ref.GetReference(scheme.Scheme, pvc)
252 if err != nil {
253 return fmt.Errorf("failed to get reference to PVC: %w", err)
254 }
255
256 storageReq := pvc.Spec.Resources.Requests[v1.ResourceStorage]
257 if storageReq.IsZero() {
258 return fmt.Errorf("PVC is not requesting any storage, this is not supported")
259 }
260 capacity, ok := storageReq.AsInt64()
261 if !ok {
262 return fmt.Errorf("PVC requesting more than 2^63 bytes of storage, this is not supported")
263 }
264
Lorenz Brunb15abad2020-04-16 11:17:12 +0200265 volumeID := "pvc-" + string(pvc.ObjectMeta.UID)
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200266 volumePath := p.volumePath(volumeID)
Lorenz Brunb15abad2020-04-16 11:17:12 +0200267
Serge Bazanskic7359672020-10-30 16:38:57 +0100268 p.logger.Infof("Creating local PV %s", volumeID)
Lorenz Brun37050122021-03-30 14:00:27 +0200269
270 switch *pvc.Spec.VolumeMode {
271 case "", v1.PersistentVolumeFilesystem:
272 if err := os.Mkdir(volumePath, 0644); err != nil && !os.IsExist(err) {
273 return fmt.Errorf("failed to create volume directory: %w", err)
274 }
Lorenz Brun764a2de2021-11-22 16:26:36 +0100275 files, err := os.ReadDir(volumePath)
Lorenz Brun37050122021-03-30 14:00:27 +0200276 if err != nil {
277 return fmt.Errorf("failed to list files in newly-created volume: %w", err)
278 }
279 if len(files) > 0 {
280 return errors.New("newly-created volume already contains data, bailing")
281 }
Lorenz Brun397f7ea2024-08-20 21:26:06 +0200282 if err := fsquota.SetQuota(volumePath, uint64(capacity), uint64(capacity)/inodeCapacityRatio); err != nil {
Serge Bazanskice19acc2023-03-21 16:28:07 +0100283 return fmt.Errorf("failed to update quota: %w", err)
Lorenz Brun37050122021-03-30 14:00:27 +0200284 }
285 case v1.PersistentVolumeBlock:
286 imageFile, err := os.OpenFile(volumePath, os.O_CREATE|os.O_RDWR, 0644)
287 if err != nil {
288 return fmt.Errorf("failed to create volume image: %w", err)
289 }
290 defer imageFile.Close()
291 if err := unix.Fallocate(int(imageFile.Fd()), 0, 0, capacity); err != nil {
292 return fmt.Errorf("failed to fallocate() volume image: %w", err)
293 }
294 default:
295 return fmt.Errorf("VolumeMode \"%s\" is unsupported", *pvc.Spec.VolumeMode)
Lorenz Brunb15abad2020-04-16 11:17:12 +0200296 }
297
298 vol := &v1.PersistentVolume{
299 ObjectMeta: metav1.ObjectMeta{
300 Name: volumeID,
301 Annotations: map[string]string{
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200302 "pv.kubernetes.io/provisioned-by": csiProvisionerServerName},
Lorenz Brunb15abad2020-04-16 11:17:12 +0200303 },
304 Spec: v1.PersistentVolumeSpec{
305 AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
306 Capacity: v1.ResourceList{
307 v1.ResourceStorage: storageReq, // We're always giving the exact amount
308 },
309 PersistentVolumeSource: v1.PersistentVolumeSource{
310 CSI: &v1.CSIPersistentVolumeSource{
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200311 Driver: csiProvisionerServerName,
Lorenz Brunb15abad2020-04-16 11:17:12 +0200312 VolumeHandle: volumeID,
313 },
314 },
Lorenz Brun37050122021-03-30 14:00:27 +0200315 ClaimRef: claimRef,
316 VolumeMode: pvc.Spec.VolumeMode,
Lorenz Brunb15abad2020-04-16 11:17:12 +0200317 NodeAffinity: &v1.VolumeNodeAffinity{
318 Required: &v1.NodeSelector{
319 NodeSelectorTerms: []v1.NodeSelectorTerm{
320 {
321 MatchExpressions: []v1.NodeSelectorRequirement{
322 {
323 Key: "kubernetes.io/hostname",
324 Operator: v1.NodeSelectorOpIn,
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200325 Values: []string{p.NodeName},
Lorenz Brunb15abad2020-04-16 11:17:12 +0200326 },
327 },
328 },
329 },
330 },
331 },
332 StorageClassName: *pvc.Spec.StorageClassName,
333 PersistentVolumeReclaimPolicy: *storageClass.ReclaimPolicy,
334 },
335 }
336
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200337 _, err = p.Kubernetes.CoreV1().PersistentVolumes().Create(context.Background(), vol, metav1.CreateOptions{})
338 if err != nil && !apierrs.IsAlreadyExists(err) {
Lorenz Brunb15abad2020-04-16 11:17:12 +0200339 return fmt.Errorf("failed to create PV object: %w", err)
340 }
341 return nil
342}
343
Serge Bazanski216fe7b2021-05-21 18:36:16 +0200344// processPV looks at a single PV item from the queue and checks if it has been
345// released and needs to be deleted. If yes it deletes the associated quota,
346// directory and the PV object and logs the result to the recorder.
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200347func (p *csiProvisionerServer) processPV(key string) error {
Lorenz Brunb15abad2020-04-16 11:17:12 +0200348 _, name, err := cache.SplitMetaNamespaceKey(key)
349 if err != nil {
350 return fmt.Errorf("invalid resource key: %s", key)
351 }
352 pv, err := p.pvInformer.Lister().Get(name)
353 if apierrs.IsNotFound(err) {
354 return nil // nothing to do, no error
355 } else if err != nil {
356 return fmt.Errorf("failed to get PV for processing: %w", err)
357 }
358
359 if !p.isOurPV(pv) {
360 return nil
361 }
362 if pv.Spec.PersistentVolumeReclaimPolicy != v1.PersistentVolumeReclaimDelete || pv.Status.Phase != "Released" {
363 return nil
364 }
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200365 volumePath := p.volumePath(pv.Spec.CSI.VolumeHandle)
Lorenz Brunb15abad2020-04-16 11:17:12 +0200366
367 // Log deletes for auditing purposes
Serge Bazanskic7359672020-10-30 16:38:57 +0100368 p.logger.Infof("Deleting persistent volume %s", pv.Spec.CSI.VolumeHandle)
Lorenz Brun37050122021-03-30 14:00:27 +0200369 switch *pv.Spec.VolumeMode {
370 case "", v1.PersistentVolumeFilesystem:
371 if err := fsquota.SetQuota(volumePath, 0, 0); err != nil {
Serge Bazanski216fe7b2021-05-21 18:36:16 +0200372 // We record these here manually since a successful deletion
373 // removes the PV we'd be attaching them to.
Lorenz Brun37050122021-03-30 14:00:27 +0200374 p.recorder.Eventf(pv, v1.EventTypeWarning, "DeprovisioningFailed", "Failed to remove quota: %v", err)
375 return fmt.Errorf("failed to remove quota: %w", err)
376 }
377 if err := os.RemoveAll(volumePath); err != nil && !os.IsNotExist(err) {
378 p.recorder.Eventf(pv, v1.EventTypeWarning, "DeprovisioningFailed", "Failed to delete volume: %v", err)
379 return fmt.Errorf("failed to delete volume: %w", err)
380 }
381 case v1.PersistentVolumeBlock:
382 if err := os.Remove(volumePath); err != nil && !os.IsNotExist(err) {
383 p.recorder.Eventf(pv, v1.EventTypeWarning, "DeprovisioningFailed", "Failed to delete volume: %v", err)
384 return fmt.Errorf("failed to delete volume: %w", err)
385 }
386 default:
387 p.recorder.Eventf(pv, v1.EventTypeWarning, "DeprovisioningFailed", "Invalid volume mode \"%v\"", *pv.Spec.VolumeMode)
388 return fmt.Errorf("invalid volume mode \"%v\"", *pv.Spec.VolumeMode)
Lorenz Brunb15abad2020-04-16 11:17:12 +0200389 }
390
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200391 err = p.Kubernetes.CoreV1().PersistentVolumes().Delete(context.Background(), pv.Name, metav1.DeleteOptions{})
Lorenz Brunb15abad2020-04-16 11:17:12 +0200392 if err != nil && !apierrs.IsNotFound(err) {
393 p.recorder.Eventf(pv, v1.EventTypeWarning, "DeprovisioningFailed", "Failed to delete PV object from K8s API: %v", err)
394 return fmt.Errorf("failed to delete PV object: %w", err)
395 }
396 return nil
397}