blob: a9dfa72a657be4d19daad0e00eb82ff35d771698 [file] [log] [blame]
Lorenz Brunb15abad2020-04-16 11:17:12 +02001// Copyright 2020 The Monogon Project Authors.
2//
3// SPDX-License-Identifier: Apache-2.0
4//
5// Licensed under the Apache License, Version 2.0 (the "License");
6// you may not use this file except in compliance with the License.
7// You may obtain a copy of the License at
8//
9// http://www.apache.org/licenses/LICENSE-2.0
10//
11// Unless required by applicable law or agreed to in writing, software
12// distributed under the License is distributed on an "AS IS" BASIS,
13// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14// See the License for the specific language governing permissions and
15// limitations under the License.
16
17package kubernetes
18
19import (
20 "context"
21 "errors"
22 "fmt"
23 "io/ioutil"
24 "os"
25 "path/filepath"
26
Lorenz Brunb15abad2020-04-16 11:17:12 +020027 v1 "k8s.io/api/core/v1"
28 storagev1 "k8s.io/api/storage/v1"
29 apierrs "k8s.io/apimachinery/pkg/api/errors"
30 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
31 "k8s.io/client-go/informers"
32 coreinformers "k8s.io/client-go/informers/core/v1"
33 storageinformers "k8s.io/client-go/informers/storage/v1"
34 "k8s.io/client-go/kubernetes"
35 "k8s.io/client-go/kubernetes/scheme"
36 typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
37 "k8s.io/client-go/tools/cache"
38 "k8s.io/client-go/tools/record"
39 ref "k8s.io/client-go/tools/reference"
40 "k8s.io/client-go/util/workqueue"
Serge Bazanskic2c7ad92020-07-13 17:20:09 +020041
Serge Bazanski77cb6c52020-12-19 00:09:22 +010042 "git.monogon.dev/source/nexantic.git/metropolis/node/common/fsquota"
43 "git.monogon.dev/source/nexantic.git/metropolis/node/common/supervisor"
44 "git.monogon.dev/source/nexantic.git/metropolis/node/core/localstorage"
45 "git.monogon.dev/source/nexantic.git/metropolis/node/core/logtree"
Lorenz Brunb15abad2020-04-16 11:17:12 +020046)
47
Serge Bazanski77cb6c52020-12-19 00:09:22 +010048// ONCHANGE(//metropolis/node/kubernetes/reconciler:resources_csi.go): needs to match csiProvisionerServerName declared.
Serge Bazanski662b5b32020-12-21 13:49:00 +010049const csiProvisionerServerName = "dev.monogon.metropolis.vfs"
Lorenz Brunb15abad2020-04-16 11:17:12 +020050
Serge Bazanskic2c7ad92020-07-13 17:20:09 +020051// csiProvisionerServer is responsible for the provisioning and deprovisioning of CSI-based container volumes. It runs on all
Lorenz Brunb15abad2020-04-16 11:17:12 +020052// nodes and watches PVCs for ones assigned to the node it's running on and fulfills the provisioning request by
53// creating a directory, applying a quota and creating the corresponding PV. When the PV is released and its retention
54// policy is Delete, the directory and the PV resource are deleted.
Serge Bazanskic2c7ad92020-07-13 17:20:09 +020055type csiProvisionerServer struct {
56 NodeName string
57 Kubernetes kubernetes.Interface
58 InformerFactory informers.SharedInformerFactory
59 VolumesDirectory *localstorage.DataVolumesDirectory
60
Lorenz Brunb15abad2020-04-16 11:17:12 +020061 claimQueue workqueue.RateLimitingInterface
62 pvQueue workqueue.RateLimitingInterface
63 recorder record.EventRecorder
64 pvcInformer coreinformers.PersistentVolumeClaimInformer
65 pvInformer coreinformers.PersistentVolumeInformer
66 storageClassInformer storageinformers.StorageClassInformer
Serge Bazanskic7359672020-10-30 16:38:57 +010067 logger logtree.LeveledLogger
Lorenz Brunb15abad2020-04-16 11:17:12 +020068}
69
70// runCSIProvisioner runs the main provisioning machinery. It consists of a bunch of informers which keep track of
71// the events happening on the Kubernetes control plane and informs us when something happens. If anything happens to
72// PVCs or PVs, we enqueue the identifier of that resource in a work queue. Queues are being worked on by only one
73// worker to limit load and avoid complicated locking infrastructure. Failed items are requeued.
Serge Bazanskic2c7ad92020-07-13 17:20:09 +020074func (p *csiProvisionerServer) Run(ctx context.Context) error {
75 // The recorder is used to log Kubernetes events for successful or failed volume provisions. These events then
76 // show up in `kubectl describe pvc` and can be used by admins to debug issues with this provisioner.
77 eventBroadcaster := record.NewBroadcaster()
78 eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: p.Kubernetes.CoreV1().Events("")})
79 p.recorder = eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: csiProvisionerServerName, Host: p.NodeName})
Lorenz Brunb15abad2020-04-16 11:17:12 +020080
Serge Bazanskic2c7ad92020-07-13 17:20:09 +020081 p.pvInformer = p.InformerFactory.Core().V1().PersistentVolumes()
82 p.pvcInformer = p.InformerFactory.Core().V1().PersistentVolumeClaims()
83 p.storageClassInformer = p.InformerFactory.Storage().V1().StorageClasses()
Lorenz Brunb15abad2020-04-16 11:17:12 +020084
Serge Bazanskic2c7ad92020-07-13 17:20:09 +020085 p.claimQueue = workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())
86 p.pvQueue = workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())
Lorenz Brunb15abad2020-04-16 11:17:12 +020087
Serge Bazanskic2c7ad92020-07-13 17:20:09 +020088 p.pvcInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
89 AddFunc: p.enqueueClaim,
90 UpdateFunc: func(old, new interface{}) {
91 p.enqueueClaim(new)
92 },
93 })
94 p.pvInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
95 AddFunc: p.enqueuePV,
96 UpdateFunc: func(old, new interface{}) {
97 p.enqueuePV(new)
98 },
99 })
100 p.logger = supervisor.Logger(ctx)
Lorenz Brunb15abad2020-04-16 11:17:12 +0200101
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200102 go p.pvcInformer.Informer().Run(ctx.Done())
103 go p.pvInformer.Informer().Run(ctx.Done())
104 go p.storageClassInformer.Informer().Run(ctx.Done())
Lorenz Brunb15abad2020-04-16 11:17:12 +0200105
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200106 // These will self-terminate once the queues are shut down
107 go p.processQueueItems(p.claimQueue, func(key string) error {
108 return p.processPVC(key)
109 })
110 go p.processQueueItems(p.pvQueue, func(key string) error {
111 return p.processPV(key)
112 })
Lorenz Brunb15abad2020-04-16 11:17:12 +0200113
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200114 supervisor.Signal(ctx, supervisor.SignalHealthy)
115 <-ctx.Done()
116 p.claimQueue.ShutDown()
117 p.pvQueue.ShutDown()
118 return nil
Lorenz Brunb15abad2020-04-16 11:17:12 +0200119}
120
121// isOurPVC checks if the given PVC is is to be provisioned by this provisioner and has been scheduled onto this node
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200122func (p *csiProvisionerServer) isOurPVC(pvc *v1.PersistentVolumeClaim) bool {
123 if pvc.ObjectMeta.Annotations["volume.beta.kubernetes.io/storage-provisioner"] != csiProvisionerServerName {
124 return false
125 }
126 if pvc.ObjectMeta.Annotations["volume.kubernetes.io/selected-node"] != p.NodeName {
127 return false
128 }
129 return true
Lorenz Brunb15abad2020-04-16 11:17:12 +0200130}
131
132// isOurPV checks if the given PV has been provisioned by this provisioner and has been scheduled onto this node
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200133func (p *csiProvisionerServer) isOurPV(pv *v1.PersistentVolume) bool {
134 if pv.ObjectMeta.Annotations["pv.kubernetes.io/provisioned-by"] != csiProvisionerServerName {
135 return false
136 }
137 if pv.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions[0].Values[0] != p.NodeName {
138 return false
139 }
140 return true
Lorenz Brunb15abad2020-04-16 11:17:12 +0200141}
142
143// enqueueClaim adds an added/changed PVC to the work queue
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200144func (p *csiProvisionerServer) enqueueClaim(obj interface{}) {
Lorenz Brunb15abad2020-04-16 11:17:12 +0200145 key, err := cache.MetaNamespaceKeyFunc(obj)
146 if err != nil {
Serge Bazanskic7359672020-10-30 16:38:57 +0100147 p.logger.Errorf("Not queuing PVC because key could not be derived: %v", err)
Lorenz Brunb15abad2020-04-16 11:17:12 +0200148 return
149 }
150 p.claimQueue.Add(key)
151}
152
153// enqueuePV adds an added/changed PV to the work queue
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200154func (p *csiProvisionerServer) enqueuePV(obj interface{}) {
Lorenz Brunb15abad2020-04-16 11:17:12 +0200155 key, err := cache.MetaNamespaceKeyFunc(obj)
156 if err != nil {
Serge Bazanskic7359672020-10-30 16:38:57 +0100157 p.logger.Errorf("Not queuing PV because key could not be derived: %v", err)
Lorenz Brunb15abad2020-04-16 11:17:12 +0200158 return
159 }
160 p.pvQueue.Add(key)
161}
162
163// processQueueItems gets items from the given work queue and calls the process function for each of them. It self-
164// terminates once the queue is shut down.
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200165func (p *csiProvisionerServer) processQueueItems(queue workqueue.RateLimitingInterface, process func(key string) error) {
Lorenz Brunb15abad2020-04-16 11:17:12 +0200166 for {
167 obj, shutdown := queue.Get()
168 if shutdown {
169 return
170 }
171
172 func(obj interface{}) {
173 defer queue.Done(obj)
174 key, ok := obj.(string)
175 if !ok {
176 queue.Forget(obj)
Serge Bazanskic7359672020-10-30 16:38:57 +0100177 p.logger.Errorf("Expected string in workqueue, got %+v", obj)
Lorenz Brunb15abad2020-04-16 11:17:12 +0200178 return
179 }
180
181 if err := process(key); err != nil {
Serge Bazanskic7359672020-10-30 16:38:57 +0100182 p.logger.Warningf("Failed processing item %q, requeueing (numrequeues: %d): %v", key, queue.NumRequeues(obj), err)
Lorenz Brunb15abad2020-04-16 11:17:12 +0200183 queue.AddRateLimited(obj)
184 }
185
186 queue.Forget(obj)
187 }(obj)
188 }
189}
190
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200191// volumePath gets the path where the volume is stored.
192func (p *csiProvisionerServer) volumePath(volumeID string) string {
193 return filepath.Join(p.VolumesDirectory.FullPath(), volumeID)
Lorenz Brunb15abad2020-04-16 11:17:12 +0200194}
195
196// processPVC looks at a single PVC item from the queue, determines if it needs to be provisioned and logs the
197// provisioning result to the recorder
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200198func (p *csiProvisionerServer) processPVC(key string) error {
Lorenz Brunb15abad2020-04-16 11:17:12 +0200199 namespace, name, err := cache.SplitMetaNamespaceKey(key)
200 if err != nil {
201 return fmt.Errorf("invalid resource key: %s", key)
202 }
203 pvc, err := p.pvcInformer.Lister().PersistentVolumeClaims(namespace).Get(name)
204 if apierrs.IsNotFound(err) {
205 return nil // nothing to do, no error
206 } else if err != nil {
207 return fmt.Errorf("failed to get PVC for processing: %w", err)
208 }
209
210 if !p.isOurPVC(pvc) {
211 return nil
212 }
213
214 if pvc.Status.Phase != "Pending" {
215 // If the PVC is not pending, we don't need to provision anything
216 return nil
217 }
218
219 storageClass, err := p.storageClassInformer.Lister().Get(*pvc.Spec.StorageClassName)
220 if err != nil {
221 return fmt.Errorf("")
222 }
223
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200224 if storageClass.Provisioner != csiProvisionerServerName {
Lorenz Brunb15abad2020-04-16 11:17:12 +0200225 // We're not responsible for this PVC. Can only happen if controller-manager makes a mistake
226 // setting the annotations, but we're bailing here anyways for safety.
227 return nil
228 }
229
230 err = p.provisionPVC(pvc, storageClass)
231
232 if err != nil {
233 p.recorder.Eventf(pvc, v1.EventTypeWarning, "ProvisioningFailed", "Failed to provision PV: %v", err)
234 return err
235 }
236 p.recorder.Eventf(pvc, v1.EventTypeNormal, "Provisioned", "Successfully provisioned PV")
237
238 return nil
239}
240
241// provisionPVC creates the directory where the volume lives, sets a quota for the requested amount of storage and
242// creates the PV object representing this new volume
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200243func (p *csiProvisionerServer) provisionPVC(pvc *v1.PersistentVolumeClaim, storageClass *storagev1.StorageClass) error {
Lorenz Brunb15abad2020-04-16 11:17:12 +0200244 claimRef, err := ref.GetReference(scheme.Scheme, pvc)
245 if err != nil {
246 return fmt.Errorf("failed to get reference to PVC: %w", err)
247 }
248
249 storageReq := pvc.Spec.Resources.Requests[v1.ResourceStorage]
250 if storageReq.IsZero() {
251 return fmt.Errorf("PVC is not requesting any storage, this is not supported")
252 }
253 capacity, ok := storageReq.AsInt64()
254 if !ok {
255 return fmt.Errorf("PVC requesting more than 2^63 bytes of storage, this is not supported")
256 }
257
258 if *pvc.Spec.VolumeMode == v1.PersistentVolumeBlock {
Serge Bazanski662b5b32020-12-21 13:49:00 +0100259 return fmt.Errorf("Block PVCs are currently not supported by Metropolis")
Lorenz Brunb15abad2020-04-16 11:17:12 +0200260 }
261
262 volumeID := "pvc-" + string(pvc.ObjectMeta.UID)
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200263 volumePath := p.volumePath(volumeID)
Lorenz Brunb15abad2020-04-16 11:17:12 +0200264
Serge Bazanskic7359672020-10-30 16:38:57 +0100265 p.logger.Infof("Creating local PV %s", volumeID)
Lorenz Brunb15abad2020-04-16 11:17:12 +0200266 if err := os.Mkdir(volumePath, 0644); err != nil && !os.IsExist(err) {
267 return fmt.Errorf("failed to create volume directory: %w", err)
268 }
269 files, err := ioutil.ReadDir(volumePath)
270 if err != nil {
271 return fmt.Errorf("failed to list files in newly-created volume: %w", err)
272 }
273 if len(files) > 0 {
274 return errors.New("newly-created volume already contains data, bailing")
275 }
276 if err := fsquota.SetQuota(volumePath, uint64(capacity), 100000); err != nil {
277 return fmt.Errorf("failed to update quota: %v", err)
278 }
279
280 vol := &v1.PersistentVolume{
281 ObjectMeta: metav1.ObjectMeta{
282 Name: volumeID,
283 Annotations: map[string]string{
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200284 "pv.kubernetes.io/provisioned-by": csiProvisionerServerName},
Lorenz Brunb15abad2020-04-16 11:17:12 +0200285 },
286 Spec: v1.PersistentVolumeSpec{
287 AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
288 Capacity: v1.ResourceList{
289 v1.ResourceStorage: storageReq, // We're always giving the exact amount
290 },
291 PersistentVolumeSource: v1.PersistentVolumeSource{
292 CSI: &v1.CSIPersistentVolumeSource{
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200293 Driver: csiProvisionerServerName,
Lorenz Brunb15abad2020-04-16 11:17:12 +0200294 VolumeHandle: volumeID,
295 },
296 },
297 ClaimRef: claimRef,
298 NodeAffinity: &v1.VolumeNodeAffinity{
299 Required: &v1.NodeSelector{
300 NodeSelectorTerms: []v1.NodeSelectorTerm{
301 {
302 MatchExpressions: []v1.NodeSelectorRequirement{
303 {
304 Key: "kubernetes.io/hostname",
305 Operator: v1.NodeSelectorOpIn,
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200306 Values: []string{p.NodeName},
Lorenz Brunb15abad2020-04-16 11:17:12 +0200307 },
308 },
309 },
310 },
311 },
312 },
313 StorageClassName: *pvc.Spec.StorageClassName,
314 PersistentVolumeReclaimPolicy: *storageClass.ReclaimPolicy,
315 },
316 }
317
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200318 _, err = p.Kubernetes.CoreV1().PersistentVolumes().Create(context.Background(), vol, metav1.CreateOptions{})
319 if err != nil && !apierrs.IsAlreadyExists(err) {
Lorenz Brunb15abad2020-04-16 11:17:12 +0200320 return fmt.Errorf("failed to create PV object: %w", err)
321 }
322 return nil
323}
324
325// processPV looks at a single PV item from the queue and checks if it has been released and needs to be deleted. If yes
326// it deletes the associated quota, directory and the PV object and logs the result to the recorder.
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200327func (p *csiProvisionerServer) processPV(key string) error {
Lorenz Brunb15abad2020-04-16 11:17:12 +0200328 _, name, err := cache.SplitMetaNamespaceKey(key)
329 if err != nil {
330 return fmt.Errorf("invalid resource key: %s", key)
331 }
332 pv, err := p.pvInformer.Lister().Get(name)
333 if apierrs.IsNotFound(err) {
334 return nil // nothing to do, no error
335 } else if err != nil {
336 return fmt.Errorf("failed to get PV for processing: %w", err)
337 }
338
339 if !p.isOurPV(pv) {
340 return nil
341 }
342 if pv.Spec.PersistentVolumeReclaimPolicy != v1.PersistentVolumeReclaimDelete || pv.Status.Phase != "Released" {
343 return nil
344 }
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200345 volumePath := p.volumePath(pv.Spec.CSI.VolumeHandle)
Lorenz Brunb15abad2020-04-16 11:17:12 +0200346
347 // Log deletes for auditing purposes
Serge Bazanskic7359672020-10-30 16:38:57 +0100348 p.logger.Infof("Deleting persistent volume %s", pv.Spec.CSI.VolumeHandle)
Lorenz Brunb15abad2020-04-16 11:17:12 +0200349 if err := fsquota.SetQuota(volumePath, 0, 0); err != nil {
350 // We record these here manually since a successful deletion removes the PV we'd be attaching them to
351 p.recorder.Eventf(pv, v1.EventTypeWarning, "DeprovisioningFailed", "Failed to remove quota: %v", err)
352 return fmt.Errorf("failed to remove quota: %w", err)
353 }
354 err = os.RemoveAll(volumePath)
355 if os.IsNotExist(err) {
356 return nil
357 } else if err != nil {
358 p.recorder.Eventf(pv, v1.EventTypeWarning, "DeprovisioningFailed", "Failed to delete volume: %v", err)
359 return fmt.Errorf("failed to delete volume: %w", err)
360 }
361
Serge Bazanskic2c7ad92020-07-13 17:20:09 +0200362 err = p.Kubernetes.CoreV1().PersistentVolumes().Delete(context.Background(), pv.Name, metav1.DeleteOptions{})
Lorenz Brunb15abad2020-04-16 11:17:12 +0200363 if err != nil && !apierrs.IsNotFound(err) {
364 p.recorder.Eventf(pv, v1.EventTypeWarning, "DeprovisioningFailed", "Failed to delete PV object from K8s API: %v", err)
365 return fmt.Errorf("failed to delete PV object: %w", err)
366 }
367 return nil
368}