*: reflow comments to 80 characters

This reformats the entire Metropolis codebase to have comments no longer
than 80 characters, implementing CR/66.

This has been done half manually, as we don't have a good integration
between commentwrap/Bazel, but that can be implemented if we decide to
go for this tool/limit.

Change-Id: If1fff0b093ef806f5dc00551c11506e8290379d0
diff --git a/metropolis/node/kubernetes/clusternet/clusternet.go b/metropolis/node/kubernetes/clusternet/clusternet.go
index 74fe1ba..85a78a1 100644
--- a/metropolis/node/kubernetes/clusternet/clusternet.go
+++ b/metropolis/node/kubernetes/clusternet/clusternet.go
@@ -14,15 +14,21 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// Package clusternet implements a WireGuard-based overlay network for Kubernetes. It relies on controller-manager's
-// IPAM to assign IP ranges to nodes and on Kubernetes' Node objects to distribute the Node IPs and public keys.
+// Package clusternet implements a WireGuard-based overlay network for
+// Kubernetes. It relies on controller-manager's IPAM to assign IP ranges to
+// nodes and on Kubernetes' Node objects to distribute the Node IPs and public
+// keys.
 //
-// It sets up a single WireGuard network interface and routes the entire ClusterCIDR into that network interface,
-// relying on WireGuard's AllowedIPs mechanism to look up the correct peer node to send the traffic to. This means
-// that the routing table doesn't change and doesn't have to be separately managed. When clusternet is started
-// it annotates its WireGuard public key onto its node object.
-// For each node object that's created or updated on the K8s apiserver it checks if a public key annotation is set and
-// if yes a peer with that public key, its InternalIP as endpoint and the CIDR for that node as AllowedIPs is created.
+// It sets up a single WireGuard network interface and routes the entire
+// ClusterCIDR into that network interface, relying on WireGuard's AllowedIPs
+// mechanism to look up the correct peer node to send the traffic to. This
+// means that the routing table doesn't change and doesn't have to be
+// separately managed. When clusternet is started it annotates its WireGuard
+// public key onto its node object.
+// For each node object that's created or updated on the K8s apiserver it
+// checks if a public key annotation is set and if yes a peer with that public
+// key, its InternalIP as endpoint and the CIDR for that node as AllowedIPs is
+// created.
 package clusternet
 
 import (
@@ -45,8 +51,8 @@
 
 	common "source.monogon.dev/metropolis/node"
 	"source.monogon.dev/metropolis/node/core/localstorage"
-	"source.monogon.dev/metropolis/pkg/logtree"
 	"source.monogon.dev/metropolis/pkg/jsonpatch"
+	"source.monogon.dev/metropolis/pkg/logtree"
 	"source.monogon.dev/metropolis/pkg/supervisor"
 )
 
@@ -67,7 +73,8 @@
 	logger   logtree.LeveledLogger
 }
 
-// ensureNode creates/updates the corresponding WireGuard peer entry for the given node objet
+// ensureNode creates/updates the corresponding WireGuard peer entry for the
+// given node objet
 func (s *Service) ensureNode(newNode *corev1.Node) error {
 	if newNode.Name == s.NodeName {
 		// Node doesn't need to connect to itself
@@ -108,8 +115,8 @@
 	}
 	allowedIPs = append(allowedIPs, net.IPNet{IP: internalIP, Mask: net.CIDRMask(32, 32)})
 	s.logger.V(1).Infof("Adding/Updating WireGuard peer node %s, endpoint %s, allowedIPs %+v", newNode.Name, internalIP.String(), allowedIPs)
-	// WireGuard's kernel side has create/update semantics on peers by default. So we can just add the peer multiple
-	// times to update it.
+	// WireGuard's kernel side has create/update semantics on peers by default.
+	// So we can just add the peer multiple times to update it.
 	err = s.wgClient.ConfigureDevice(clusterNetDeviceName, wgtypes.Config{
 		Peers: []wgtypes.PeerConfig{{
 			PublicKey:         pubKey,
@@ -124,7 +131,8 @@
 	return nil
 }
 
-// removeNode removes the corresponding WireGuard peer entry for the given node object
+// removeNode removes the corresponding WireGuard peer entry for the given node
+// object
 func (s *Service) removeNode(oldNode *corev1.Node) error {
 	if oldNode.Name == s.NodeName {
 		// Node doesn't need to connect to itself
@@ -150,7 +158,8 @@
 	return nil
 }
 
-// ensureOnDiskKey loads the private key from disk or (if none exists) generates one and persists it.
+// ensureOnDiskKey loads the private key from disk or (if none exists)
+// generates one and persists it.
 func (s *Service) ensureOnDiskKey() error {
 	keyRaw, err := s.DataDirectory.Key.Read()
 	if os.IsNotExist(err) {
@@ -176,7 +185,8 @@
 	return nil
 }
 
-// annotateThisNode annotates the node (as defined by NodeName) with the wireguard public key of this node.
+// annotateThisNode annotates the node (as defined by NodeName) with the
+// wireguard public key of this node.
 func (s *Service) annotateThisNode(ctx context.Context) error {
 	patch := []jsonpatch.JsonPatchOp{{
 		Operation: "add",
diff --git a/metropolis/node/kubernetes/containerd/main.go b/metropolis/node/kubernetes/containerd/main.go
index 6b99081..c3dd4a0 100644
--- a/metropolis/node/kubernetes/containerd/main.go
+++ b/metropolis/node/kubernetes/containerd/main.go
@@ -76,10 +76,12 @@
 
 			n, err := io.Copy(supervisor.RawLogger(ctx), fifo)
 			if n == 0 && err == nil {
-				// Hack because pipes/FIFOs can return zero reads when nobody is writing. To avoid busy-looping,
-				// sleep a bit before retrying. This does not loose data since the FIFO internal buffer will
-				// stall writes when it becomes full. 10ms maximum stall in a non-latency critical process (reading
-				// debug logs) is not an issue for us.
+				// Hack because pipes/FIFOs can return zero reads when nobody
+				// is writing. To avoid busy-looping, sleep a bit before
+				// retrying. This does not loose data since the FIFO internal
+				// buffer will stall writes when it becomes full. 10ms maximum
+				// stall in a non-latency critical process (reading debug logs)
+				// is not an issue for us.
 				time.Sleep(10 * time.Millisecond)
 			} else if err != nil {
 				return fmt.Errorf("log pump failed: %v", err)
@@ -88,14 +90,18 @@
 	}
 }
 
-// runPreseed loads OCI bundles in tar form from preseedNamespacesDir into containerd at startup.
-// This can be run multiple times, containerd will automatically dedup the layers.
-// containerd uses namespaces to keep images (and everything else) separate so to define where the images will be loaded
-// to they need to be in a folder named after the namespace they should be loaded into.
-// containerd's CRI plugin (which is built as part of containerd) uses a hardcoded namespace ("k8s.io") for everything
-// accessed through CRI, so if an image should be available on K8s it needs to be in that namespace.
-// As an example if image helloworld should be loaded for use with Kubernetes, the OCI bundle needs to be at
-// <preseedNamespacesDir>/k8s.io/helloworld.tar. No tagging beyond what's in the bundle is performed.
+// runPreseed loads OCI bundles in tar form from preseedNamespacesDir into
+// containerd at startup.
+// This can be run multiple times, containerd will automatically dedup the
+// layers.  containerd uses namespaces to keep images (and everything else)
+// separate so to define where the images will be loaded to they need to be in
+// a folder named after the namespace they should be loaded into.  containerd's
+// CRI plugin (which is built as part of containerd) uses a hardcoded namespace
+// ("k8s.io") for everything accessed through CRI, so if an image should be
+// available on K8s it needs to be in that namespace.
+// As an example if image helloworld should be loaded for use with Kubernetes,
+// the OCI bundle needs to be at <preseedNamespacesDir>/k8s.io/helloworld.tar.
+// No tagging beyond what's in the bundle is performed.
 func (s *Service) runPreseed(ctx context.Context) error {
 	client, err := ctr.New(s.EphemeralVolume.ClientSocket.FullPath())
 	if err != nil {
@@ -126,8 +132,9 @@
 			if err != nil {
 				return fmt.Errorf("failed to open preseed image \"%v\": %w", image.Name(), err)
 			}
-			// defer in this loop is fine since we're never going to preseed more than ~1M images which is where our
-			// file descriptor limit is.
+			// defer in this loop is fine since we're never going to preseed
+			// more than ~1M images which is where our file descriptor limit
+			// is.
 			defer imageFile.Close()
 			importedImages, err := client.Import(ctxWithNS, imageFile)
 			if err != nil {
diff --git a/metropolis/node/kubernetes/csi.go b/metropolis/node/kubernetes/csi.go
index efd8af4..4893254 100644
--- a/metropolis/node/kubernetes/csi.go
+++ b/metropolis/node/kubernetes/csi.go
@@ -39,9 +39,10 @@
 	"source.monogon.dev/metropolis/pkg/supervisor"
 )
 
-// Derived from K8s spec for acceptable names, but shortened to 130 characters to avoid issues with
-// maximum path length. We don't provision longer names so this applies only if you manually create
-// a volume with a name of more than 130 characters.
+// Derived from K8s spec for acceptable names, but shortened to 130 characters
+// to avoid issues with maximum path length. We don't provision longer names so
+// this applies only if you manually create a volume with a name of more than
+// 130 characters.
 var acceptableNames = regexp.MustCompile("^[a-z][a-z0-9-.]{0,128}[a-z0-9]$")
 
 type csiPluginServer struct {
@@ -64,8 +65,8 @@
 	pluginServer := grpc.NewServer()
 	csi.RegisterIdentityServer(pluginServer, s)
 	csi.RegisterNodeServer(pluginServer, s)
-	// Enable graceful shutdown since we don't have long-running RPCs and most of them shouldn't and can't be
-	// cancelled anyways.
+	// Enable graceful shutdown since we don't have long-running RPCs and most
+	// of them shouldn't and can't be cancelled anyways.
 	if err := supervisor.Run(ctx, "csi-node", supervisor.GRPCServer(pluginServer, pluginListener, true)); err != nil {
 		return err
 	}
diff --git a/metropolis/node/kubernetes/hyperkube/main.go b/metropolis/node/kubernetes/hyperkube/main.go
index 3b4ac08..10c7a2d 100644
--- a/metropolis/node/kubernetes/hyperkube/main.go
+++ b/metropolis/node/kubernetes/hyperkube/main.go
@@ -56,9 +56,9 @@
 
 	hyperkubeCommand, allCommandFns := NewHyperKubeCommand()
 
-	// TODO: once we switch everything over to Cobra commands, we can go back to calling
-	// cliflag.InitFlags() (by removing its pflag.Parse() call). For now, we have to set the
-	// normalize func and add the go flag set by hand.
+	// TODO: once we switch everything over to Cobra commands, we can go back
+	// to calling cliflag.InitFlags() (by removing its pflag.Parse() call). For
+	// now, we have to set the normalize func and add the go flag set by hand.
 	pflag.CommandLine.SetNormalizeFunc(cliflag.WordSepNormalizeFunc)
 	pflag.CommandLine.AddGoFlagSet(goflag.CommandLine)
 	// cliflag.InitFlags()
@@ -89,8 +89,8 @@
 
 // NewHyperKubeCommand is the entry point for hyperkube
 func NewHyperKubeCommand() (*cobra.Command, []func() *cobra.Command) {
-	// these have to be functions since the command is polymorphic. Cobra wants you to be top level
-	// command to get executed
+	// these have to be functions since the command is polymorphic. Cobra wants
+	// you to be top level command to get executed
 	apiserver := func() *cobra.Command { return kubeapiserver.NewAPIServerCommand() }
 	controller := func() *cobra.Command { return kubecontrollermanager.NewControllerManagerCommand() }
 	scheduler := func() *cobra.Command { return kubescheduler.NewSchedulerCommand() }
diff --git a/metropolis/node/kubernetes/kubelet.go b/metropolis/node/kubernetes/kubelet.go
index 953a201..d966e5d 100644
--- a/metropolis/node/kubernetes/kubelet.go
+++ b/metropolis/node/kubernetes/kubelet.go
@@ -102,8 +102,8 @@
 			"memory": "300Mi",
 		},
 
-		// We're not going to use this, but let's make it point to a known-empty directory in case anybody manages to
-		// trigger it.
+		// We're not going to use this, but let's make it point to a
+		// known-empty directory in case anybody manages to trigger it.
 		VolumePluginDir: s.EphemeralDirectory.FlexvolumePlugins.FullPath(),
 	}
 }
diff --git a/metropolis/node/kubernetes/nfproxy/nfproxy.go b/metropolis/node/kubernetes/nfproxy/nfproxy.go
index ac13af1..5fcc5b5 100644
--- a/metropolis/node/kubernetes/nfproxy/nfproxy.go
+++ b/metropolis/node/kubernetes/nfproxy/nfproxy.go
@@ -14,8 +14,9 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// Package nfproxy is a Kubernetes Service IP proxy based exclusively on the Linux nftables interface.
-// It uses netfilter's NAT capabilities to accept traffic on service IPs and DNAT it to the respective endpoint.
+// Package nfproxy is a Kubernetes Service IP proxy based exclusively on the
+// Linux nftables interface.  It uses netfilter's NAT capabilities to accept
+// traffic on service IPs and DNAT it to the respective endpoint.
 package nfproxy
 
 import (
@@ -42,7 +43,8 @@
 )
 
 type Service struct {
-	// Traffic in ClusterCIDR is assumed to be originated inside the cluster and will not be SNATed
+	// Traffic in ClusterCIDR is assumed to be originated inside the cluster
+	// and will not be SNATed
 	ClusterCIDR net.IPNet
 	// A Kubernetes ClientSet with read access to endpoints and services
 	ClientSet kubernetes.Interface
diff --git a/metropolis/node/kubernetes/pki/kubernetes.go b/metropolis/node/kubernetes/pki/kubernetes.go
index 467f718..0e59306 100644
--- a/metropolis/node/kubernetes/pki/kubernetes.go
+++ b/metropolis/node/kubernetes/pki/kubernetes.go
@@ -56,9 +56,11 @@
 	// APIServer client certificate used to authenticate to kubelets.
 	APIServerKubeletClient KubeCertificateName = "apiserver-kubelet-client"
 
-	// Kubernetes Controller manager client certificate, used to authenticate to the apiserver.
+	// Kubernetes Controller manager client certificate, used to authenticate
+	// to the apiserver.
 	ControllerManagerClient KubeCertificateName = "controller-manager-client"
-	// Kubernetes Controller manager server certificate, used to run its HTTP server.
+	// Kubernetes Controller manager server certificate, used to run its HTTP
+	// server.
 	ControllerManager KubeCertificateName = "controller-manager"
 
 	// Kubernetes Scheduler client certificate, used to authenticate to the apiserver.
@@ -66,12 +68,12 @@
 	// Kubernetes scheduler server certificate, used to run its HTTP server.
 	Scheduler KubeCertificateName = "scheduler"
 
-	// Root-on-kube (system:masters) client certificate. Used to control the apiserver (and resources) by Metropolis
-	// internally.
+	// Root-on-kube (system:masters) client certificate. Used to control the
+	// apiserver (and resources) by Metropolis internally.
 	Master KubeCertificateName = "master"
 
 	// OpenAPI Kubernetes Aggregation CA.
-	// See: https://kubernetes.io/docs/tasks/extend-kubernetes/configure-aggregation-layer/#ca-reusage-and-conflicts
+	//   https://kubernetes.io/docs/tasks/extend-kubernetes/configure-aggregation-layer/#ca-reusage-and-conflicts
 	AggregationCA    KubeCertificateName = "aggregation-ca"
 	FrontProxyClient KubeCertificateName = "front-proxy-client"
 )
@@ -79,8 +81,9 @@
 const (
 	// etcdPrefix is where all the PKI data is stored in etcd.
 	etcdPrefix = "/kube-pki/"
-	// serviceAccountKeyName is the etcd path part that is used to store the ServiceAccount authentication secret.
-	// This is not a certificate, just an RSA key.
+	// serviceAccountKeyName is the etcd path part that is used to store the
+	// ServiceAccount authentication secret. This is not a certificate, just an
+	// RSA key.
 	serviceAccountKeyName = "service-account-privkey"
 )
 
@@ -116,7 +119,8 @@
 			"kubernetes.default.svc.cluster.local",
 			"localhost",
 		},
-		[]net.IP{{10, 0, 255, 1}, {127, 0, 0, 1}}, // TODO(q3k): add service network internal apiserver address
+		// TODO(q3k): add service network internal apiserver address
+		[]net.IP{{10, 0, 255, 1}, {127, 0, 0, 1}},
 	))
 	make(IdCA, APIServerKubeletClient, opki.Client("metropolis:apiserver-kubelet-client", nil))
 	make(IdCA, ControllerManagerClient, opki.Client("system:kube-controller-manager", nil))
@@ -131,7 +135,8 @@
 	return &pki
 }
 
-// EnsureAll ensures that all static certificates (and the serviceaccount key) are present on etcd.
+// EnsureAll ensures that all static certificates (and the serviceaccount key)
+// are present on etcd.
 func (k *PKI) EnsureAll(ctx context.Context) error {
 	for n, v := range k.Certificates {
 		k.logger.Infof("Ensuring %s exists", string(n))
@@ -147,8 +152,8 @@
 	return nil
 }
 
-// Kubeconfig generates a kubeconfig blob for a given certificate name. The same lifetime semantics as in .Certificate
-// apply.
+// Kubeconfig generates a kubeconfig blob for a given certificate name. The
+// same lifetime semantics as in .Certificate apply.
 func (k *PKI) Kubeconfig(ctx context.Context, name KubeCertificateName) ([]byte, error) {
 	c, ok := k.Certificates[name]
 	if !ok {
@@ -157,9 +162,11 @@
 	return Kubeconfig(ctx, k.KV, c)
 }
 
-// Certificate retrieves an x509 DER-encoded (but not PEM-wrapped) key and certificate for a given certificate name.
-// If the requested certificate is volatile, it will be created on demand. Otherwise it will be created on etcd (if not
-// present), and retrieved from there.
+// Certificate retrieves an x509 DER-encoded (but not PEM-wrapped) key and
+// certificate for a given certificate name.
+// If the requested certificate is volatile, it will be created on demand.
+// Otherwise it will be created on etcd (if not present), and retrieved from
+// there.
 func (k *PKI) Certificate(ctx context.Context, name KubeCertificateName) (cert, key []byte, err error) {
 	c, ok := k.Certificates[name]
 	if !ok {
@@ -168,7 +175,8 @@
 	return c.Ensure(ctx, k.KV)
 }
 
-// Kubeconfig generates a kubeconfig blob for this certificate. The same lifetime semantics as in .Ensure apply.
+// Kubeconfig generates a kubeconfig blob for this certificate. The same
+// lifetime semantics as in .Ensure apply.
 func Kubeconfig(ctx context.Context, kv clientv3.KV, c *opki.Certificate) ([]byte, error) {
 
 	cert, key, err := c.Ensure(ctx, kv)
@@ -204,11 +212,12 @@
 	return clientcmd.Write(*kubeconfig)
 }
 
-// ServiceAccountKey retrieves (and possibly generates and stores on etcd) the Kubernetes service account key. The
-// returned data is ready to be used by Kubernetes components (in PKIX form).
+// ServiceAccountKey retrieves (and possibly generates and stores on etcd) the
+// Kubernetes service account key. The returned data is ready to be used by
+// Kubernetes components (in PKIX form).
 func (k *PKI) ServiceAccountKey(ctx context.Context) ([]byte, error) {
-	// TODO(q3k): this should be abstracted away once we abstract away etcd access into a library with try-or-create
-	// semantics.
+	// TODO(q3k): this should be abstracted away once we abstract away etcd
+	// access into a library with try-or-create semantics.
 	path := fmt.Sprintf("%s%s.der", etcdPrefix, serviceAccountKeyName)
 
 	// Try loading  key from etcd.
diff --git a/metropolis/node/kubernetes/plugins/kvmdevice/kvmdevice.go b/metropolis/node/kubernetes/plugins/kvmdevice/kvmdevice.go
index a437973..ed47f74 100644
--- a/metropolis/node/kubernetes/plugins/kvmdevice/kvmdevice.go
+++ b/metropolis/node/kubernetes/plugins/kvmdevice/kvmdevice.go
@@ -14,10 +14,13 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// Package kvmdevice implements a Kubernetes device plugin for the virtual KVM device. Using the device plugin API
-// allows us to take advantage of the scheduler to locate pods on machines eligible for KVM and also allows granular
-// access control to KVM using quotas instead of needing privileged access.
-// Since KVM devices are virtual, this plugin emulates a huge number of them so that we never run out.
+// Package kvmdevice implements a Kubernetes device plugin for the virtual KVM
+// device. Using the device plugin API allows us to take advantage of the
+// scheduler to locate pods on machines eligible for KVM and also allows
+// granular access control to KVM using quotas instead of needing privileged
+// access.
+// Since KVM devices are virtual, this plugin emulates a huge number of them so
+// that we never run out.
 package kvmdevice
 
 import (
@@ -110,8 +113,9 @@
 	return &response, nil
 }
 
-// deviceNumberFromString gets a Linux device number from a string containing two decimal numbers representing the major
-// and minor device numbers separated by a colon. Whitespace is ignored.
+// deviceNumberFromString gets a Linux device number from a string containing
+// two decimal numbers representing the major and minor device numbers
+// separated by a colon. Whitespace is ignored.
 func deviceNumberFromString(s string) (uint64, error) {
 	kvmDevParts := strings.Split(s, ":")
 	if len(kvmDevParts) != 2 {
diff --git a/metropolis/node/kubernetes/provisioner.go b/metropolis/node/kubernetes/provisioner.go
index 0aa5c66..42edf77 100644
--- a/metropolis/node/kubernetes/provisioner.go
+++ b/metropolis/node/kubernetes/provisioner.go
@@ -46,13 +46,16 @@
 	"source.monogon.dev/metropolis/pkg/supervisor"
 )
 
-// ONCHANGE(//metropolis/node/kubernetes/reconciler:resources_csi.go): needs to match csiProvisionerServerName declared.
+// ONCHANGE(//metropolis/node/kubernetes/reconciler:resources_csi.go): needs to
+// match csiProvisionerServerName declared.
 const csiProvisionerServerName = "dev.monogon.metropolis.vfs"
 
-// csiProvisionerServer is responsible for the provisioning and deprovisioning of CSI-based container volumes. It runs on all
-// nodes and watches PVCs for ones assigned to the node it's running on and fulfills the provisioning request by
-// creating a directory, applying a quota and creating the corresponding PV. When the PV is released and its retention
-// policy is Delete, the directory and the PV resource are deleted.
+// csiProvisionerServer is responsible for the provisioning and deprovisioning
+// of CSI-based container volumes. It runs on all nodes and watches PVCs for
+// ones assigned to the node it's running on and fulfills the provisioning
+// request by creating a directory, applying a quota and creating the
+// corresponding PV. When the PV is released and its retention policy is
+// Delete, the directory and the PV resource are deleted.
 type csiProvisionerServer struct {
 	NodeName         string
 	Kubernetes       kubernetes.Interface
@@ -68,13 +71,16 @@
 	logger               logtree.LeveledLogger
 }
 
-// runCSIProvisioner runs the main provisioning machinery. It consists of a bunch of informers which keep track of
-// the events happening on the Kubernetes control plane and informs us when something happens. If anything happens to
-// PVCs or PVs, we enqueue the identifier of that resource in a work queue. Queues are being worked on by only one
-// worker to limit load and avoid complicated locking infrastructure. Failed items are requeued.
+// runCSIProvisioner runs the main provisioning machinery. It consists of a
+// bunch of informers which keep track of the events happening on the
+// Kubernetes control plane and informs us when something happens. If anything
+// happens to PVCs or PVs, we enqueue the identifier of that resource in a work
+// queue. Queues are being worked on by only one worker to limit load and avoid
+// complicated locking infrastructure. Failed items are requeued.
 func (p *csiProvisionerServer) Run(ctx context.Context) error {
-	// The recorder is used to log Kubernetes events for successful or failed volume provisions. These events then
-	// show up in `kubectl describe pvc` and can be used by admins to debug issues with this provisioner.
+	// The recorder is used to log Kubernetes events for successful or failed
+	// volume provisions. These events then show up in `kubectl describe pvc`
+	// and can be used by admins to debug issues with this provisioner.
 	eventBroadcaster := record.NewBroadcaster()
 	eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: p.Kubernetes.CoreV1().Events("")})
 	p.recorder = eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: csiProvisionerServerName, Host: p.NodeName})
@@ -119,7 +125,8 @@
 	return nil
 }
 
-// isOurPVC checks if the given PVC is is to be provisioned by this provisioner and has been scheduled onto this node
+// isOurPVC checks if the given PVC is is to be provisioned by this provisioner
+// and has been scheduled onto this node
 func (p *csiProvisionerServer) isOurPVC(pvc *v1.PersistentVolumeClaim) bool {
 	if pvc.ObjectMeta.Annotations["volume.beta.kubernetes.io/storage-provisioner"] != csiProvisionerServerName {
 		return false
@@ -130,7 +137,8 @@
 	return true
 }
 
-// isOurPV checks if the given PV has been provisioned by this provisioner and has been scheduled onto this node
+// isOurPV checks if the given PV has been provisioned by this provisioner and
+// has been scheduled onto this node
 func (p *csiProvisionerServer) isOurPV(pv *v1.PersistentVolume) bool {
 	if pv.ObjectMeta.Annotations["pv.kubernetes.io/provisioned-by"] != csiProvisionerServerName {
 		return false
@@ -161,8 +169,8 @@
 	p.pvQueue.Add(key)
 }
 
-// processQueueItems gets items from the given work queue and calls the process function for each of them. It self-
-// terminates once the queue is shut down.
+// processQueueItems gets items from the given work queue and calls the process
+// function for each of them. It self- terminates once the queue is shut down.
 func (p *csiProvisionerServer) processQueueItems(queue workqueue.RateLimitingInterface, process func(key string) error) {
 	for {
 		obj, shutdown := queue.Get()
@@ -194,8 +202,8 @@
 	return filepath.Join(p.VolumesDirectory.FullPath(), volumeID)
 }
 
-// processPVC looks at a single PVC item from the queue, determines if it needs to be provisioned and logs the
-// provisioning result to the recorder
+// processPVC looks at a single PVC item from the queue, determines if it needs
+// to be provisioned and logs the provisioning result to the recorder
 func (p *csiProvisionerServer) processPVC(key string) error {
 	namespace, name, err := cache.SplitMetaNamespaceKey(key)
 	if err != nil {
@@ -223,8 +231,9 @@
 	}
 
 	if storageClass.Provisioner != csiProvisionerServerName {
-		// We're not responsible for this PVC. Can only happen if controller-manager makes a mistake
-		// setting the annotations, but we're bailing here anyways for safety.
+		// We're not responsible for this PVC. Can only happen if
+		// controller-manager makes a mistake setting the annotations, but
+		// we're bailing here anyways for safety.
 		return nil
 	}
 
@@ -239,8 +248,9 @@
 	return nil
 }
 
-// provisionPVC creates the directory where the volume lives, sets a quota for the requested amount of storage and
-// creates the PV object representing this new volume
+// provisionPVC creates the directory where the volume lives, sets a quota for
+// the requested amount of storage and creates the PV object representing this
+// new volume
 func (p *csiProvisionerServer) provisionPVC(pvc *v1.PersistentVolumeClaim, storageClass *storagev1.StorageClass) error {
 	claimRef, err := ref.GetReference(scheme.Scheme, pvc)
 	if err != nil {
@@ -335,8 +345,9 @@
 	return nil
 }
 
-// processPV looks at a single PV item from the queue and checks if it has been released and needs to be deleted. If yes
-// it deletes the associated quota, directory and the PV object and logs the result to the recorder.
+// processPV looks at a single PV item from the queue and checks if it has been
+// released and needs to be deleted. If yes it deletes the associated quota,
+// directory and the PV object and logs the result to the recorder.
 func (p *csiProvisionerServer) processPV(key string) error {
 	_, name, err := cache.SplitMetaNamespaceKey(key)
 	if err != nil {
@@ -362,7 +373,8 @@
 	switch *pv.Spec.VolumeMode {
 	case "", v1.PersistentVolumeFilesystem:
 		if err := fsquota.SetQuota(volumePath, 0, 0); err != nil {
-			// We record these here manually since a successful deletion removes the PV we'd be attaching them to
+			// We record these here manually since a successful deletion
+			// removes the PV we'd be attaching them to.
 			p.recorder.Eventf(pv, v1.EventTypeWarning, "DeprovisioningFailed", "Failed to remove quota: %v", err)
 			return fmt.Errorf("failed to remove quota: %w", err)
 		}
diff --git a/metropolis/node/kubernetes/reconciler/reconciler.go b/metropolis/node/kubernetes/reconciler/reconciler.go
index 51cc248..1828060 100644
--- a/metropolis/node/kubernetes/reconciler/reconciler.go
+++ b/metropolis/node/kubernetes/reconciler/reconciler.go
@@ -48,20 +48,25 @@
 }
 
 const (
-	// BuiltinLabelKey is used as a k8s label to mark built-in objects (ie., managed by the reconciler)
+	// BuiltinLabelKey is used as a k8s label to mark built-in objects (ie.,
+	// managed by the reconciler)
 	BuiltinLabelKey = "metropolis.monogon.dev/builtin"
-	// BuiltinLabelValue is used as a k8s label value, under the BuiltinLabelKey key.
+	// BuiltinLabelValue is used as a k8s label value, under the
+	// BuiltinLabelKey key.
 	BuiltinLabelValue = "true"
-	// BuiltinRBACPrefix is used to prefix all built-in objects that are part of the rbac/v1 API (eg.
-	// {Cluster,}Role{Binding,} objects). This corresponds to the colon-separated 'namespaces' notation used by
+	// BuiltinRBACPrefix is used to prefix all built-in objects that are part
+	// of the rbac/v1 API (eg.  {Cluster,}Role{Binding,} objects). This
+	// corresponds to the colon-separated 'namespaces' notation used by
 	// Kubernetes system (system:) objects.
 	BuiltinRBACPrefix = "metropolis:"
 )
 
-// builtinLabels makes a kubernetes-compatible label dictionary (key->value) that is used to mark objects that are
-// built-in into Metropolis (ie., managed by the reconciler). These are then subsequently retrieved by listBuiltins.
-// The extra argument specifies what other labels are to be merged into the the labels dictionary, for convenience. If
-// nil or empty, no extra labels will be applied.
+// builtinLabels makes a kubernetes-compatible label dictionary (key->value)
+// that is used to mark objects that are built-in into Metropolis (ie., managed
+// by the reconciler). These are then subsequently retrieved by listBuiltins.
+// The extra argument specifies what other labels are to be merged into the the
+// labels dictionary, for convenience. If nil or empty, no extra labels will be
+// applied.
 func builtinLabels(extra map[string]string) map[string]string {
 	l := map[string]string{
 		BuiltinLabelKey: BuiltinLabelValue,
@@ -74,32 +79,39 @@
 	return l
 }
 
-// listBuiltins returns a k8s client ListOptions structure that allows to retrieve all objects that are built-in into
-// Metropolis currently present in the API server (ie., ones that are to be managed by the reconciler). These are
-// created by applying builtinLabels to their metadata labels.
+// listBuiltins returns a k8s client ListOptions structure that allows to
+// retrieve all objects that are built-in into Metropolis currently present in
+// the API server (ie., ones that are to be managed by the reconciler). These
+// are created by applying builtinLabels to their metadata labels.
 var listBuiltins = meta.ListOptions{
 	LabelSelector: fmt.Sprintf("%s=%s", BuiltinLabelKey, BuiltinLabelValue),
 }
 
-// builtinRBACName returns a name that is compatible with colon-delimited 'namespaced' objects, a la system:*.
-// These names are to be used by all builtins created as part of the rbac/v1 Kubernetes API.
+// builtinRBACName returns a name that is compatible with colon-delimited
+// 'namespaced' objects, a la system:*.
+// These names are to be used by all builtins created as part of the rbac/v1
+// Kubernetes API.
 func builtinRBACName(name string) string {
 	return BuiltinRBACPrefix + name
 }
 
-// resource is a type of resource to be managed by the reconciler. All builti-ins/reconciled objects must implement
-// this interface to be managed correctly by the reconciler.
+// resource is a type of resource to be managed by the reconciler. All
+// builti-ins/reconciled objects must implement this interface to be managed
+// correctly by the reconciler.
 type resource interface {
-	// List returns a list of names of objects current present on the target (ie. k8s API server).
+	// List returns a list of names of objects current present on the target
+	// (ie. k8s API server).
 	List(ctx context.Context) ([]string, error)
-	// Create creates an object on the target. The el interface{} argument is the black box object returned by the
-	// Expected() call.
+	// Create creates an object on the target. The el interface{} argument is
+	// the black box object returned by the Expected() call.
 	Create(ctx context.Context, el interface{}) error
 	// Delete delete an object, by name, from the target.
 	Delete(ctx context.Context, name string) error
-	// Expected returns a map of all objects expected to be present on the target. The keys are names (which must
-	// correspond to the names returned by List() and used by Delete(), and the values are blackboxes that will then
-	// be passed to the Create() call if their corresponding key (name) does not exist on the target.
+	// Expected returns a map of all objects expected to be present on the
+	// target. The keys are names (which must correspond to the names returned
+	// by List() and used by Delete(), and the values are blackboxes that will
+	// then be passed to the Create() call if their corresponding key (name)
+	// does not exist on the target.
 	Expected() map[string]interface{}
 }
 
diff --git a/metropolis/node/kubernetes/reconciler/reconciler_test.go b/metropolis/node/kubernetes/reconciler/reconciler_test.go
index b58d4af..ba2f4e8 100644
--- a/metropolis/node/kubernetes/reconciler/reconciler_test.go
+++ b/metropolis/node/kubernetes/reconciler/reconciler_test.go
@@ -28,9 +28,10 @@
 	meta "k8s.io/apimachinery/pkg/apis/meta/v1"
 )
 
-// kubernetesMeta unwraps an interface{} that might contain a Kubernetes resource of type that is managed by the
-// reconciler. Any time a new Kubernetes type is managed by the reconciler, the following switch should be extended
-// to cover that type.
+// kubernetesMeta unwraps an interface{} that might contain a Kubernetes
+// resource of type that is managed by the reconciler. Any time a new
+// Kubernetes type is managed by the reconciler, the following switch should be
+// extended to cover that type.
 func kubernetesMeta(v interface{}) *meta.ObjectMeta {
 	switch v2 := v.(type) {
 	case *rbac.ClusterRole:
@@ -49,9 +50,11 @@
 	return nil
 }
 
-// TestExpectedNamedCorrectly ensures that all the Expected objects of all resource types have a correspondence between
-// their returned key and inner name. This contract must be met in order for the reconciler to not create runaway
-// resources. This assumes all managed resources are Kubernetes resources.
+// TestExpectedNamedCorrectly ensures that all the Expected objects of all
+// resource types have a correspondence between their returned key and inner
+// name. This contract must be met in order for the reconciler to not create
+// runaway resources. This assumes all managed resources are Kubernetes
+// resources.
 func TestExpectedNamedCorrectly(t *testing.T) {
 	for reconciler, r := range allResources(nil) {
 		for outer, v := range r.Expected() {
@@ -68,10 +71,13 @@
 	}
 }
 
-// TestExpectedLabeledCorrectly ensures that all the Expected objects of all resource types have a Kubernetes metadata
-// label that signifies it's a builtin object, to be retrieved afterwards. This contract must be met in order for the
-// reconciler to not keep overwriting objects (and possibly failing), when a newly created object is not then
-// retrievable using a selector corresponding to this label. This assumes all managed resources are Kubernetes objects.
+// TestExpectedLabeledCorrectly ensures that all the Expected objects of all
+// resource types have a Kubernetes metadata label that signifies it's a
+// builtin object, to be retrieved afterwards. This contract must be met in
+// order for the reconciler to not keep overwriting objects (and possibly
+// failing), when a newly created object is not then retrievable using a
+// selector corresponding to this label. This assumes all managed resources are
+// Kubernetes objects.
 func TestExpectedLabeledCorrectly(t *testing.T) {
 	for reconciler, r := range allResources(nil) {
 		for outer, v := range r.Expected() {
@@ -88,8 +94,9 @@
 	}
 }
 
-// testResource is a resource type used for testing. The inner type is a string that is equal to its name (key).
-// It simulates a target (ie. k8s apiserver mock) that always acts nominally (all resources are created, deleted as
+// testResource is a resource type used for testing. The inner type is a string
+// that is equal to its name (key).  It simulates a target (ie. k8s apiserver
+// mock) that always acts nominally (all resources are created, deleted as
 // requested, and the state is consistent with requests).
 type testResource struct {
 	// current is the simulated state of resources in the target.
@@ -124,7 +131,8 @@
 	return exp
 }
 
-// newTestResource creates a test resource with a list of expected resource strings.
+// newTestResource creates a test resource with a list of expected resource
+// strings.
 func newTestResource(want ...string) *testResource {
 	expected := make(map[string]string)
 	for _, w := range want {
@@ -136,8 +144,9 @@
 	}
 }
 
-// currentDiff returns a human-readable string showing the different between the current state and the given resource
-// strings. If no difference is present, the returned string is empty.
+// currentDiff returns a human-readable string showing the different between
+// the current state and the given resource strings. If no difference is
+// present, the returned string is empty.
 func (r *testResource) currentDiff(want ...string) string {
 	expected := make(map[string]string)
 	for _, w := range want {
@@ -154,8 +163,8 @@
 	return ""
 }
 
-// TestBasicReconciliation ensures that the reconcile function does manipulate a target state based on a set of
-// expected resources.
+// TestBasicReconciliation ensures that the reconcile function does manipulate
+// a target state based on a set of expected resources.
 func TestBasicReconciliation(t *testing.T) {
 	ctx := context.Background()
 	r := newTestResource("foo", "bar", "baz")
diff --git a/metropolis/node/kubernetes/reconciler/resources_csi.go b/metropolis/node/kubernetes/reconciler/resources_csi.go
index c7f7b2b..04d52a8 100644
--- a/metropolis/node/kubernetes/reconciler/resources_csi.go
+++ b/metropolis/node/kubernetes/reconciler/resources_csi.go
@@ -24,9 +24,11 @@
 	"k8s.io/client-go/kubernetes"
 )
 
-// TODO(q3k): this is duplicated with //metropolis/node/kubernetes:provisioner.go; integrate this once provisioner.go
-// gets moved into a subpackage.
-// ONCHANGE(//metropolis/node/kubernetes:provisioner.go): needs to match csiProvisionerName declared.
+// TODO(q3k): this is duplicated with
+// //metropolis/node/kubernetes:provisioner.go; integrate this once
+// provisioner.go gets moved into a subpackage.
+// ONCHANGE(//metropolis/node/kubernetes:provisioner.go): needs to match
+// csiProvisionerName declared.
 const csiProvisionerName = "dev.monogon.metropolis.vfs"
 
 type resourceCSIDrivers struct {
diff --git a/metropolis/node/kubernetes/service.go b/metropolis/node/kubernetes/service.go
index 5c8b037..fe701e6 100644
--- a/metropolis/node/kubernetes/service.go
+++ b/metropolis/node/kubernetes/service.go
@@ -206,7 +206,8 @@
 	return nil
 }
 
-// GetDebugKubeconfig issues a kubeconfig for an arbitrary given identity. Useful for debugging and testing.
+// GetDebugKubeconfig issues a kubeconfig for an arbitrary given identity.
+// Useful for debugging and testing.
 func (s *Service) GetDebugKubeconfig(ctx context.Context, request *apb.GetDebugKubeconfigRequest) (*apb.GetDebugKubeconfigResponse, error) {
 	client, err := s.c.KPKI.VolatileClient(ctx, request.Id, request.Groups)
 	if err != nil {