*: reflow comments to 80 characters

This reformats the entire Metropolis codebase to have comments no longer
than 80 characters, implementing CR/66.

This has been done half manually, as we don't have a good integration
between commentwrap/Bazel, but that can be implemented if we decide to
go for this tool/limit.

Change-Id: If1fff0b093ef806f5dc00551c11506e8290379d0
diff --git a/metropolis/cli/dbg/main.go b/metropolis/cli/dbg/main.go
index 75685e9..557a9f2 100644
--- a/metropolis/cli/dbg/main.go
+++ b/metropolis/cli/dbg/main.go
@@ -40,8 +40,8 @@
 
 func main() {
 	ctx := context.Background()
-	// Hardcode localhost since this should never be used to interface with a production node because of missing
-	// encryption & authentication
+	// Hardcode localhost since this should never be used to interface with a
+	// production node because of missing encryption & authentication
 	grpcClient, err := grpc.Dial("localhost:7837", grpc.WithInsecure())
 	if err != nil {
 		fmt.Printf("Failed to dial debug service (is it running): %v\n", err)
@@ -132,7 +132,8 @@
 			}
 		}
 	case "kubectl":
-		// Always get a kubeconfig with cluster-admin (group system:masters), kubectl itself can impersonate
+		// Always get a kubeconfig with cluster-admin (group system:masters),
+		// kubectl itself can impersonate
 		kubeconfigFile, err := ioutil.TempFile("", "dbg_kubeconfig")
 		if err != nil {
 			fmt.Fprintf(os.Stderr, "Failed to create kubeconfig temp file: %v\n", err)
@@ -151,10 +152,13 @@
 			os.Exit(1)
 		}
 
-		// This magic sets up everything as if this was just the kubectl binary. It sets the KUBECONFIG environment
-		// variable so that it knows where the Kubeconfig is located and forcibly overwrites the arguments so that
-		// the "wrapper" arguments are not visible to its flags parser. The base code is straight from
-		// https://github.com/kubernetes/kubernetes/blob/master/cmd/kubectl/kubectl.go
+		// This magic sets up everything as if this was just the kubectl
+		// binary. It sets the KUBECONFIG environment variable so that it knows
+		// where the Kubeconfig is located and forcibly overwrites the
+		// arguments so that the "wrapper" arguments are not visible to its
+		// flags parser.
+		// The base code is straight from:
+		//   https://github.com/kubernetes/kubernetes/blob/master/cmd/kubectl/kubectl.go
 		os.Setenv("KUBECONFIG", kubeconfigFile.Name())
 		rand.Seed(time.Now().UnixNano())
 		pflag.CommandLine.SetNormalizeFunc(cliflag.WordSepNormalizeFunc)
diff --git a/metropolis/node/build/genosrelease/main.go b/metropolis/node/build/genosrelease/main.go
index e19876e..ad6e3e2 100644
--- a/metropolis/node/build/genosrelease/main.go
+++ b/metropolis/node/build/genosrelease/main.go
@@ -14,8 +14,10 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// genosrelease provides rudimentary support to generate os-release files following the freedesktop spec
-// (https://www.freedesktop.org/software/systemd/man/os-release.html) from arguments and stamping
+// genosrelease provides rudimentary support to generate os-release files
+// following the freedesktop spec from arguments and stamping
+//
+// https://www.freedesktop.org/software/systemd/man/os-release.html
 package main
 
 import (
diff --git a/metropolis/node/build/mkerofs/main.go b/metropolis/node/build/mkerofs/main.go
index a05e440..ea89d67 100644
--- a/metropolis/node/build/mkerofs/main.go
+++ b/metropolis/node/build/mkerofs/main.go
@@ -14,8 +14,9 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// mkerofs takes a specification in the form of a prototext file (see fsspec next to this) and assembles an
-// EROFS filesystem according to it. The output is fully reproducible.
+// mkerofs takes a specification in the form of a prototext file (see fsspec
+// next to this) and assembles an EROFS filesystem according to it. The output
+// is fully reproducible.
 package main
 
 import (
@@ -99,9 +100,11 @@
 	children map[string]*entrySpec
 }
 
-// pathRef gets the entrySpec at the leaf of the given path, inferring directories if necessary
+// pathRef gets the entrySpec at the leaf of the given path, inferring
+// directories if necessary
 func (s *entrySpec) pathRef(p string) *entrySpec {
-	// This block gets a path array starting at the root of the filesystem. The root folder is the zero-length array.
+	// This block gets a path array starting at the root of the filesystem. The
+	// root folder is the zero-length array.
 	pathParts := strings.Split(path.Clean("./"+p), "/")
 	if pathParts[0] == "." {
 		pathParts = pathParts[1:]
diff --git a/metropolis/node/build/mkimage/main.go b/metropolis/node/build/mkimage/main.go
index 5546055..f41d643 100644
--- a/metropolis/node/build/mkimage/main.go
+++ b/metropolis/node/build/mkimage/main.go
@@ -84,7 +84,8 @@
 	}
 
 	table := &gpt.Table{
-		// This is appropriate at least for virtio disks. Might need to be adjusted for real ones.
+		// This is appropriate at least for virtio disks. Might need to be
+		// adjusted for real ones.
 		LogicalSectorSize:  512,
 		PhysicalSectorSize: 512,
 		ProtectiveMBR:      true,
@@ -165,7 +166,8 @@
 		log.Fatalf("os.Open(%q): %v", src, err)
 	}
 	defer source.Close()
-	// If this is streamed (e.g. using io.Copy) it exposes a bug in diskfs, so do it in one go.
+	// If this is streamed (e.g. using io.Copy) it exposes a bug in diskfs, so
+	// do it in one go.
 	data, err := ioutil.ReadAll(source)
 	if err != nil {
 		log.Fatalf("Reading %q: %v", src, err)
diff --git a/metropolis/node/core/consensus/ca/ca.go b/metropolis/node/core/consensus/ca/ca.go
index 6bcc19b..e0c56b0 100644
--- a/metropolis/node/core/consensus/ca/ca.go
+++ b/metropolis/node/core/consensus/ca/ca.go
@@ -73,19 +73,21 @@
 	CACert     *x509.Certificate
 	CACertRaw  []byte
 
-	// bootstrapIssued are certificates that have been issued by the CA before it has been successfully Saved to etcd.
+	// bootstrapIssued are certificates that have been issued by the CA before
+	// it has been successfully Saved to etcd.
 	bootstrapIssued [][]byte
-	// canBootstrapIssue is set on CAs that have been created by New and not yet stored to etcd. If not set,
-	// certificates cannot be issued in-memory.
+	// canBootstrapIssue is set on CAs that have been created by New and not
+	// yet stored to etcd. If not set, certificates cannot be issued in-memory.
 	canBootstrapIssue bool
 }
 
-// Workaround for https://github.com/golang/go/issues/26676 in Go's crypto/x509. Specifically Go
-// violates Section 4.2.1.2 of RFC 5280 without this.
+// Workaround for https://github.com/golang/go/issues/26676 in Go's
+// crypto/x509. Specifically Go violates Section 4.2.1.2 of RFC 5280 without
+// this.
 // Fixed for 1.15 in https://go-review.googlesource.com/c/go/+/227098/.
 //
-// Taken from https://github.com/FiloSottile/mkcert/blob/master/cert.go#L295 written by one of Go's
-// crypto engineers (BSD 3-clause).
+// Taken from https://github.com/FiloSottile/mkcert/blob/master/cert.go#L295
+// written by one of Go's crypto engineers (BSD 3-clause).
 func calculateSKID(pubKey crypto.PublicKey) ([]byte, error) {
 	spkiASN1, err := x509.MarshalPKIXPublicKey(pubKey)
 	if err != nil {
@@ -104,8 +106,9 @@
 	return skid[:], nil
 }
 
-// New creates a new certificate authority with the given common name. The newly created CA will be stored in memory
-// until committed to etcd by calling .Save.
+// New creates a new certificate authority with the given common name. The
+// newly created CA will be stored in memory until committed to etcd by calling
+// .Save.
 func New(name string) (*CA, error) {
 	pubKey, privKey, err := ed25519.GenerateKey(rand.Reader)
 	if err != nil {
@@ -159,7 +162,8 @@
 	resp, err := kv.Txn(ctx).Then(
 		clientv3.OpGet(pathCACertificate),
 		clientv3.OpGet(pathCAKey),
-		// We only read the CRL to ensure it exists on etcd (and early fail on inconsistency)
+		// We only read the CRL to ensure it exists on etcd (and early fail on
+		// inconsistency)
 		clientv3.OpGet(pathCACRL)).Commit()
 	if err != nil {
 		return nil, fmt.Errorf("failed to retrieve CA from etcd: %w", err)
@@ -198,7 +202,8 @@
 	}, nil
 }
 
-// Save stores a newly created CA into etcd, committing both the CA data and any certificates issued until then.
+// Save stores a newly created CA into etcd, committing both the CA data and
+// any certificates issued until then.
 func (c *CA) Save(ctx context.Context, kv clientv3.KV) error {
 	crl, err := c.makeCRL(nil)
 	if err != nil {
@@ -233,8 +238,9 @@
 	return nil
 }
 
-// Issue issues a certificate. If kv is non-nil, the newly issued certificate will be immediately stored to etcd,
-// otherwise it will be kept in memory (until .Save is called). Certificates can only be issued to memory on
+// Issue issues a certificate. If kv is non-nil, the newly issued certificate
+// will be immediately stored to etcd, otherwise it will be kept in memory
+// (until .Save is called). Certificates can only be issued to memory on
 // newly-created CAs that have not been saved to etcd yet.
 func (c *CA) Issue(ctx context.Context, kv clientv3.KV, commonName string) (cert []byte, privkey []byte, err error) {
 	serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 127)
@@ -297,9 +303,10 @@
 	return crl, nil
 }
 
-// Revoke revokes a certificate by hostname. The selected hostname will be added to the CRL stored in etcd. This call
-// might fail (safely) if a simultaneous revoke happened that caused the CRL to be bumped. The call can be then retried
-// safely.
+// Revoke revokes a certificate by hostname. The selected hostname will be
+// added to the CRL stored in etcd. This call might fail (safely) if a
+// simultaneous revoke happened that caused the CRL to be bumped. The call can
+// be then retried safely.
 func (c *CA) Revoke(ctx context.Context, kv clientv3.KV, hostname string) error {
 	res, err := kv.Txn(ctx).Then(
 		clientv3.OpGet(pathCACRL),
@@ -383,8 +390,9 @@
 	return nil
 }
 
-// WaitCRLChange returns a channel that will receive a CRLUpdate any time the remote CRL changed. Immediately after
-// calling this method, the current CRL is retrieved from the cluster and put into the channel.
+// WaitCRLChange returns a channel that will receive a CRLUpdate any time the
+// remote CRL changed. Immediately after calling this method, the current CRL
+// is retrieved from the cluster and put into the channel.
 func (c *CA) WaitCRLChange(ctx context.Context, kv clientv3.KV, w clientv3.Watcher) <-chan CRLUpdate {
 	C := make(chan CRLUpdate)
 
@@ -424,16 +432,20 @@
 	return C
 }
 
-// CRLUpdate is emitted for every remote CRL change, and spuriously on ever new WaitCRLChange.
+// CRLUpdate is emitted for every remote CRL change, and spuriously on ever new
+// WaitCRLChange.
 type CRLUpdate struct {
-	// The new (or existing, in the case of the first call) CRL. If nil, Err will be set.
+	// The new (or existing, in the case of the first call) CRL. If nil, Err
+	// will be set.
 	CRL []byte
-	// If set, an error occurred and the WaitCRLChange call must be restarted. If set, CRL will be nil.
+	// If set, an error occurred and the WaitCRLChange call must be restarted.
+	// If set, CRL will be nil.
 	Err error
 }
 
-// GetCurrentCRL returns the current CRL for the CA. This should only be used for one-shot operations like
-// bootstrapping a new node that doesn't yet have access to etcd - otherwise, WaitCRLChange shoulde be used.
+// GetCurrentCRL returns the current CRL for the CA. This should only be used
+// for one-shot operations like bootstrapping a new node that doesn't yet have
+// access to etcd - otherwise, WaitCRLChange shoulde be used.
 func (c *CA) GetCurrentCRL(ctx context.Context, kv clientv3.KV) ([]byte, error) {
 	initial, err := kv.Get(ctx, pathCACRL)
 	if err != nil {
diff --git a/metropolis/node/core/consensus/consensus.go b/metropolis/node/core/consensus/consensus.go
index 683db19..ed44140 100644
--- a/metropolis/node/core/consensus/consensus.go
+++ b/metropolis/node/core/consensus/consensus.go
@@ -14,19 +14,22 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// Package consensus implements a managed etcd cluster member service, with a self-hosted CA system for issuing peer
-// certificates. Currently each Metropolis node runs an etcd member, and connects to the etcd member locally over a
-// domain socket.
+// Package consensus implements a managed etcd cluster member service, with a
+// self-hosted CA system for issuing peer certificates. Currently each
+// Metropolis node runs an etcd member, and connects to the etcd member locally
+// over a domain socket.
 //
 // The service supports two modes of startup:
-//  - initializing a new cluster, by bootstrapping the CA in memory, starting a cluster, committing the CA to etcd
-//    afterwards, and saving the new node's certificate to local storage
-//  - joining an existing cluster, using certificates from local storage and loading the CA from etcd. This flow is also
-//    used when the node joins a cluster for the first time (then the certificates required must be provisioned
-//    externally before starting the consensus service).
+//  - initializing a new cluster, by bootstrapping the CA in memory, starting a
+//    cluster, committing the CA to etcd afterwards, and saving the new node's
+//    certificate to local storage
+//  - joining an existing cluster, using certificates from local storage and
+//    loading the CA from etcd. This flow is also used when the node joins a
+//    cluster for the first time (then the certificates required must be
+//    provisioned externally before starting the consensus service).
 //
-// Regardless of how the etcd member service was started, the resulting running service is further managed and used
-// in the same way.
+// Regardless of how the etcd member service was started, the resulting running
+// service is further managed and used in the same way.
 //
 package consensus
 
@@ -59,8 +62,9 @@
 	// The configuration with which the service was started. This is immutable.
 	config *Config
 
-	// stateMu guards state. This is locked internally on public methods of Service that require access to state. The
-	// state might be recreated on service restart.
+	// stateMu guards state. This is locked internally on public methods of
+	// Service that require access to state. The state might be recreated on
+	// service restart.
 	stateMu sync.Mutex
 	state   *state
 }
@@ -71,8 +75,8 @@
 	ready atomic.Bool
 
 	ca *ca.CA
-	// cl is an etcd client that loops back to the localy running etcd server. This runs over the Client unix domain
-	// socket that etcd starts.
+	// cl is an etcd client that loops back to the localy running etcd server.
+	// This runs over the Client unix domain socket that etcd starts.
 	cl *clientv3.Client
 }
 
@@ -82,16 +86,19 @@
 	// Ephemeral directory for etcd.
 	Ephemeral *localstorage.EphemeralConsensusDirectory
 
-	// Name is the cluster name. This must be the same amongst all etcd members within one cluster.
+	// Name is the cluster name. This must be the same amongst all etcd members
+	// within one cluster.
 	Name string
-	// NewCluster selects whether the etcd member will start a new cluster and bootstrap a CA and the first member
-	// certificate, or load existing PKI certificates from disk.
+	// NewCluster selects whether the etcd member will start a new cluster and
+	// bootstrap a CA and the first member certificate, or load existing PKI
+	// certificates from disk.
 	NewCluster bool
-	// Port is the port at which this cluster member will listen for other members. If zero, defaults to the global
-	// Metropolis setting.
+	// Port is the port at which this cluster member will listen for other
+	// members. If zero, defaults to the global Metropolis setting.
 	Port int
 
-	// externalHost is used by tests to override the address at which etcd should listen for peer connections.
+	// externalHost is used by tests to override the address at which etcd
+	// should listen for peer connections.
 	externalHost string
 }
 
@@ -101,8 +108,8 @@
 	}
 }
 
-// configure transforms the service configuration into an embedded etcd configuration. This is pure and side effect
-// free.
+// configure transforms the service configuration into an embedded etcd
+// configuration. This is pure and side effect free.
 func (s *Service) configure(ctx context.Context) (*embed.Config, error) {
 	if err := s.config.Ephemeral.MkdirAll(0700); err != nil {
 		return nil, fmt.Errorf("failed to create ephemeral directory: %w", err)
@@ -166,8 +173,8 @@
 	return cfg, nil
 }
 
-// Run is a Supervisor runnable that starts the etcd member service. It will become healthy once the member joins the
-// cluster successfully.
+// Run is a Supervisor runnable that starts the etcd member service. It will
+// become healthy once the member joins the cluster successfully.
 func (s *Service) Run(ctx context.Context) error {
 	st := &state{
 		ready: *atomic.NewBool(false),
@@ -298,8 +305,8 @@
 	return ctx.Err()
 }
 
-// watchCRL is a sub-runnable of the etcd cluster member service that updates the on-local-storage CRL to match the
-// newest available version in etcd.
+// watchCRL is a sub-runnable of the etcd cluster member service that updates
+// the on-local-storage CRL to match the newest available version in etcd.
 func (s *Service) watchCRL(ctx context.Context) error {
 	s.stateMu.Lock()
 	cl := s.state.cl
@@ -339,9 +346,10 @@
 				continue
 			}
 
-			// We always call PromoteMember since the metadata necessary to decide if we should is private.
-			// Luckily etcd already does sanity checks internally and will refuse to promote nodes that aren't
-			// connected or are still behind on transactions.
+			// We always call PromoteMember since the metadata necessary to
+			// decide if we should is private.  Luckily etcd already does
+			// sanity checks internally and will refuse to promote nodes that
+			// aren't connected or are still behind on transactions.
 			if _, err := st.etcd.Server.PromoteMember(ctx, uint64(member.ID)); err != nil {
 				supervisor.Logger(ctx).Infof("Failed to promote consensus node %s: %v", member.Name, err)
 			} else {
@@ -371,7 +379,8 @@
 }
 
 func (s *Service) WaitReady(ctx context.Context) error {
-	// TODO(q3k): reimplement the atomic ready flag as an event synchronization mechanism
+	// TODO(q3k): reimplement the atomic ready flag as an event synchronization
+	// mechanism
 	if s.IsReady() {
 		return nil
 	}
@@ -411,8 +420,9 @@
 	return s.state.cl.Cluster
 }
 
-// MemberInfo returns information about this etcd cluster member: its ID and name. This will block until this
-// information is available (ie. the cluster status is Ready).
+// MemberInfo returns information about this etcd cluster member: its ID and
+// name. This will block until this information is available (ie. the cluster
+// status is Ready).
 func (s *Service) MemberInfo(ctx context.Context) (id uint64, name string, err error) {
 	if err = s.WaitReady(ctx); err != nil {
 		err = fmt.Errorf("when waiting for cluster readiness: %w", err)
diff --git a/metropolis/node/core/debug_service.go b/metropolis/node/core/debug_service.go
index 964642b..e0f7753 100644
--- a/metropolis/node/core/debug_service.go
+++ b/metropolis/node/core/debug_service.go
@@ -44,8 +44,10 @@
 	cluster    *cluster.Manager
 	kubernetes *kubernetes.Service
 	logtree    *logtree.LogTree
-	// traceLock provides exclusive access to the Linux tracing infrastructure (ftrace)
-	// This is a channel because Go's mutexes can't be cancelled or be acquired in a non-blocking way.
+	// traceLock provides exclusive access to the Linux tracing infrastructure
+	// (ftrace)
+	// This is a channel because Go's mutexes can't be cancelled or be acquired
+	// in a non-blocking way.
 	traceLock chan struct{}
 }
 
@@ -192,7 +194,8 @@
 	}
 }
 
-// Validate property names as they are used in path construction and we really don't want a path traversal vulnerability
+// Validate property names as they are used in path construction and we really
+// don't want a path traversal vulnerability
 var safeTracingPropertyNamesRe = regexp.MustCompile("^[a-z0-9_]+$")
 
 func writeTracingProperty(name string, value string) error {
diff --git a/metropolis/node/core/delve_enabled.go b/metropolis/node/core/delve_enabled.go
index 24ea470..9df937d 100644
--- a/metropolis/node/core/delve_enabled.go
+++ b/metropolis/node/core/delve_enabled.go
@@ -25,13 +25,16 @@
 	"source.monogon.dev/metropolis/node/core/network"
 )
 
-// initializeDebugger attaches Delve to ourselves and exposes it on common.DebuggerPort
-// This is coupled to compilation_mode=dbg because otherwise Delve doesn't have the necessary DWARF debug info
+// initializeDebugger attaches Delve to ourselves and exposes it on
+// common.DebuggerPort
+// This is coupled to compilation_mode=dbg because otherwise Delve doesn't have
+// the necessary DWARF debug info
 func initializeDebugger(networkSvc *network.Service) {
 	go func() {
-		// This is intentionally delayed until network becomes available since Delve for some reason connects to itself
-		// and in early-boot no network interface is available to do that through. Also external access isn't possible
-		// early on anyways.
+		// This is intentionally delayed until network becomes available since
+		// Delve for some reason connects to itself and in early-boot no
+		// network interface is available to do that through. Also external
+		// access isn't possible early on anyways.
 		watcher := networkSvc.Watch()
 		_, err := watcher.Get(context.Background())
 		if err != nil {
diff --git a/metropolis/node/core/localstorage/crypt/blockdev.go b/metropolis/node/core/localstorage/crypt/blockdev.go
index 7f874b7..6ea1b49 100644
--- a/metropolis/node/core/localstorage/crypt/blockdev.go
+++ b/metropolis/node/core/localstorage/crypt/blockdev.go
@@ -31,10 +31,14 @@
 )
 
 var (
-	// EFIPartitionType is the standardized partition type value for the EFI ESP partition. The human readable GUID is C12A7328-F81F-11D2-BA4B-00A0C93EC93B.
+	// EFIPartitionType is the standardized partition type value for the EFI
+	// ESP partition. The human readable GUID is
+	// C12A7328-F81F-11D2-BA4B-00A0C93EC93B.
 	EFIPartitionType = gpt.PartType{0x28, 0x73, 0x2a, 0xc1, 0x1f, 0xf8, 0xd2, 0x11, 0xba, 0x4b, 0x00, 0xa0, 0xc9, 0x3e, 0xc9, 0x3b}
 
-	// NodeDataPartitionType is the partition type value for a Metropolis Node data partition. The human-readable GUID is 9eeec464-6885-414a-b278-4305c51f7966.
+	// NodeDataPartitionType is the partition type value for a Metropolis Node
+	// data partition. The human-readable GUID is
+	// 9eeec464-6885-414a-b278-4305c51f7966.
 	NodeDataPartitionType = gpt.PartType{0x64, 0xc4, 0xee, 0x9e, 0x85, 0x68, 0x4a, 0x41, 0xb2, 0x78, 0x43, 0x05, 0xc5, 0x1f, 0x79, 0x66}
 )
 
@@ -43,9 +47,9 @@
 	NodeDataCryptPath = "/dev/data-crypt"
 )
 
-// MakeBlockDevices looks for the ESP and the node data partition and maps them to ESPDevicePath and
-// NodeDataCryptPath respectively. This doesn't fail if it doesn't find the partitions, only if
-// something goes catastrophically wrong.
+// MakeBlockDevices looks for the ESP and the node data partition and maps them
+// to ESPDevicePath and NodeDataCryptPath respectively. This doesn't fail if it
+// doesn't find the partitions, only if something goes catastrophically wrong.
 func MakeBlockDevices(ctx context.Context) error {
 	blockdevNames, err := ioutil.ReadDir("/sys/class/block")
 	if err != nil {
diff --git a/metropolis/node/core/localstorage/crypt/crypt.go b/metropolis/node/core/localstorage/crypt/crypt.go
index a6f5006..7b994d2 100644
--- a/metropolis/node/core/localstorage/crypt/crypt.go
+++ b/metropolis/node/core/localstorage/crypt/crypt.go
@@ -35,7 +35,7 @@
 	}
 	defer integrityPartition.Close()
 	// Based on structure defined in
-	// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/drivers/md/dm-integrity.c#n59
+	//   https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/drivers/md/dm-integrity.c#n59
 	if _, err := integrityPartition.Seek(16, 0); err != nil {
 		return 0, err
 	}
diff --git a/metropolis/node/core/localstorage/crypt/crypt_debug.go b/metropolis/node/core/localstorage/crypt/crypt_debug.go
index db59809..2fbdc50 100644
--- a/metropolis/node/core/localstorage/crypt/crypt_debug.go
+++ b/metropolis/node/core/localstorage/crypt/crypt_debug.go
@@ -22,8 +22,8 @@
 	"golang.org/x/sys/unix"
 )
 
-// CryptMap implements a debug version of CryptMap from crypt.go. It aliases the given baseName device into name
-// without any encryption.
+// CryptMap implements a debug version of CryptMap from crypt.go. It aliases
+// the given baseName device into name without any encryption.
 func CryptMap(name string, baseName string, _ []byte) error {
 	var stat unix.Stat_t
 	if err := unix.Stat(baseName, &stat); err != nil {
@@ -36,8 +36,9 @@
 	return nil
 }
 
-// CryptInit implements a debug version of CryptInit from crypt.go. It aliases the given baseName device into name
-// without any encryption. As an identity mapping doesn't need any initialization it doesn't do anything else.
+// CryptInit implements a debug version of CryptInit from crypt.go. It aliases
+// the given baseName device into name without any encryption. As an identity
+// mapping doesn't need any initialization it doesn't do anything else.
 func CryptInit(name, baseName string, encryptionKey []byte) error {
 	return CryptMap(name, baseName, encryptionKey)
 }
diff --git a/metropolis/node/core/localstorage/declarative/declarative.go b/metropolis/node/core/localstorage/declarative/declarative.go
index ce82c42..4f9b087 100644
--- a/metropolis/node/core/localstorage/declarative/declarative.go
+++ b/metropolis/node/core/localstorage/declarative/declarative.go
@@ -22,31 +22,38 @@
 	"strings"
 )
 
-// Directory represents the intent of existence of a directory in a hierarchical filesystem (simplified to a tree).
-// This structure can be embedded and still be interpreted as a Directory for purposes of use within this library. Any
-// inner fields of such an embedding structure that are in turn (embedded) Directories or files will be treated as
-// children in the intent expressed by this Directory. All contained directory fields must have a `dir:"name"` struct
-// tag that names them, and all contained file fields must have a `file:"name"` struct tag.
+// Directory represents the intent of existence of a directory in a
+// hierarchical filesystem (simplified to a tree).  This structure can be
+// embedded and still be interpreted as a Directory for purposes of use within
+// this library. Any inner fields of such an embedding structure that are in
+// turn (embedded) Directories or files will be treated as children in the
+// intent expressed by this Directory. All contained directory fields must have
+// a `dir:"name"` struct tag that names them, and all contained file fields
+// must have a `file:"name"` struct tag.
 //
-// Creation and management of the directory at runtime is left to the implementing code. However, the DirectoryPlacement
-// implementation (set as the directory is placed onto a backing store) facilitates this management (by exposing methods
-// that mutate the backing store).
+// Creation and management of the directory at runtime is left to the
+// implementing code. However, the DirectoryPlacement implementation (set as
+// the directory is placed onto a backing store) facilitates this management
+// (by exposing methods that mutate the backing store).
 type Directory struct {
 	DirectoryPlacement
 }
 
-// File represents the intent of existence of a file. files are usually child structures in types that embed Directory.
-// File can also be embedded in another structure, and this embedding type will still be interpreted as a File for
-// purposes of use within this library.
+// File represents the intent of existence of a file. files are usually child
+// structures in types that embed Directory.  File can also be embedded in
+// another structure, and this embedding type will still be interpreted as a
+// File for purposes of use within this library.
 //
-// As with Directory, the runtime management of a File in a backing store is left to the implementing code, and the
-// embedded FilePlacement interface facilitates access to the backing store.
+// As with Directory, the runtime management of a File in a backing store is
+// left to the implementing code, and the embedded FilePlacement interface
+// facilitates access to the backing store.
 type File struct {
 	FilePlacement
 }
 
-// unpackDirectory takes a pointer to Directory or a pointer to a structure embedding Directory, and returns a
-// reflection Value that refers to the passed structure itself (not its pointer) and a plain Go pointer to the
+// unpackDirectory takes a pointer to Directory or a pointer to a structure
+// embedding Directory, and returns a reflection Value that refers to the
+// passed structure itself (not its pointer) and a plain Go pointer to the
 // (embedded) Directory.
 func unpackDirectory(d interface{}) (*reflect.Value, *Directory, error) {
 	td := reflect.TypeOf(d)
@@ -68,8 +75,9 @@
 	return &id, dir, nil
 }
 
-// unpackFile takes a pointer to a File or a pointer to a structure embedding File, and returns a reflection Value that
-// refers to the passed structure itself (not its pointer) and a plain Go pointer to the (embedded) File.
+// unpackFile takes a pointer to a File or a pointer to a structure embedding
+// File, and returns a reflection Value that refers to the passed structure
+// itself (not its pointer) and a plain Go pointer to the (embedded) File.
 func unpackFile(f interface{}) (*reflect.Value, *File, error) {
 	tf := reflect.TypeOf(f)
 	if tf.Kind() != reflect.Ptr {
@@ -91,8 +99,10 @@
 
 }
 
-// subdirs takes a pointer to a Directory or pointer to a structure embedding Directory, and returns a pair of pointers
-// to Directory-like structures contained within that directory with corresponding names (based on struct tags).
+// subdirs takes a pointer to a Directory or pointer to a structure embedding
+// Directory, and returns a pair of pointers to Directory-like structures
+// contained within that directory with corresponding names (based on struct
+// tags).
 func subdirs(d interface{}) ([]namedDirectory, error) {
 	s, _, err := unpackDirectory(d)
 	if err != nil {
@@ -117,8 +127,9 @@
 	directory interface{}
 }
 
-// files takes a pointer to a File or pointer to a structure embedding File, and returns a pair of pointers
-// to Directory-like structures contained within that directory with corresponding names (based on struct tags).
+// files takes a pointer to a File or pointer to a structure embedding File,
+// and returns a pair of pointers to Directory-like structures contained within
+// that directory with corresponding names (based on struct tags).
 func files(d interface{}) ([]namedFile, error) {
 	s, _, err := unpackDirectory(d)
 	if err != nil {
@@ -146,12 +157,13 @@
 	file *File
 }
 
-// Validate checks that a given pointer to a Directory or pointer to a structure containing Directory does not contain
-// any programmer errors in its definition:
+// Validate checks that a given pointer to a Directory or pointer to a
+// structure containing Directory does not contain any programmer errors in its
+// definition:
 //  - all subdirectories/files must be named
 //  - all subdirectory/file names within a directory must be unique
-//  - all subdirectory/file names within a directory must not contain the '/' character (as it is a common path
-//    delimiter)
+//  - all subdirectory/file names within a directory must not contain the '/'
+//    character (as it is a common path delimiter)
 func Validate(d interface{}) error {
 	names := make(map[string]bool)
 
diff --git a/metropolis/node/core/localstorage/declarative/placement.go b/metropolis/node/core/localstorage/declarative/placement.go
index c2ff53d..a59912c 100644
--- a/metropolis/node/core/localstorage/declarative/placement.go
+++ b/metropolis/node/core/localstorage/declarative/placement.go
@@ -21,21 +21,25 @@
 	"os"
 )
 
-// A declarative Directory/File tree is an abstract definition until it's 'placed' on a backing file system.
-// By convention, all abstract definitions of hierarchies are stored as copiable structs, and only turned to pointers
-// when placed (ie., implementations like PlaceFS takes a *Directory, but Root as a declarative definition is defined as
-// non-pointer).
+// A declarative Directory/File tree is an abstract definition until it's
+// 'placed' on a backing file system.  By convention, all abstract definitions
+// of hierarchies are stored as copiable structs, and only turned to pointers
+// when placed (ie., implementations like PlaceFS takes a *Directory, but Root
+// as a declarative definition is defined as non-pointer).
 
-// Placement is an interface available on Placed files and Directories. All *Placement interfaces on files/Directories
-// are only available on placed trees - eg., after a PlaceFS call. This is unfortunately not typesafe, callers need to
-// either be sure about placement, or check the interface for null.
+// Placement is an interface available on Placed files and Directories. All
+// *Placement interfaces on files/Directories are only available on placed
+// trees - eg., after a PlaceFS call. This is unfortunately not typesafe,
+// callers need to either be sure about placement, or check the interface for
+// null.
 type Placement interface {
 	FullPath() string
 	RootRef() interface{}
 }
 
-// FilePlacement is an interface available on Placed files. It is implemented by different placement backends, and
-// set on all files during placement by a given backend.
+// FilePlacement is an interface available on Placed files. It is implemented
+// by different placement backends, and set on all files during placement by a
+// given backend.
 type FilePlacement interface {
 	Placement
 	Exists() (bool, error)
@@ -43,26 +47,30 @@
 	Write([]byte, os.FileMode) error
 }
 
-// DirectoryPlacement is an interface available on Placed Directories. It is implemented by different placement
-// backends, and set on all directories during placement by a given backend.
+// DirectoryPlacement is an interface available on Placed Directories. It is
+// implemented by different placement backends, and set on all directories
+// during placement by a given backend.
 type DirectoryPlacement interface {
 	Placement
-	// MkdirAll creates this directory and all its parents on backing stores that have a physical directory
-	// structure.
+	// MkdirAll creates this directory and all its parents on backing stores
+	// that have a physical directory structure.
 	MkdirAll(file os.FileMode) error
 }
 
-// DirectoryPlacer is a placement backend-defined function that, given the path returned by the parent of a directory,
-// and the path to a directory, returns a DirectoryPlacement implementation for this directory. The new placement's
-// path (via .FullPath()) will be used for placement of directories/files within the new directory.
+// DirectoryPlacer is a placement backend-defined function that, given the path
+// returned by the parent of a directory, and the path to a directory, returns
+// a DirectoryPlacement implementation for this directory. The new placement's
+// path (via .FullPath()) will be used for placement of directories/files
+// within the new directory.
 type DirectoryPlacer func(parent, this string) DirectoryPlacement
 
 // FilePlacer is analogous to DirectoryPlacer, but for files.
 type FilePlacer func(parent, this string) FilePlacement
 
-// place recursively places a pointer to a Directory or pointer to a structure embedding Directory into a given backend,
-// by calling DirectoryPlacer and FilePlacer where appropriate. This is done recursively across a declarative tree until
-// all children are placed.
+// place recursively places a pointer to a Directory or pointer to a structure
+// embedding Directory into a given backend, by calling DirectoryPlacer and
+// FilePlacer where appropriate. This is done recursively across a declarative
+// tree until all children are placed.
 func place(d interface{}, parent, this string, dpl DirectoryPlacer, fpl FilePlacer) error {
 	_, dir, err := unpackDirectory(d)
 	if err != nil {
diff --git a/metropolis/node/core/localstorage/declarative/placement_local.go b/metropolis/node/core/localstorage/declarative/placement_local.go
index 3f7b1dd..43921cd 100644
--- a/metropolis/node/core/localstorage/declarative/placement_local.go
+++ b/metropolis/node/core/localstorage/declarative/placement_local.go
@@ -27,7 +27,8 @@
 
 // FSRoot is a root of a storage backend that resides on the local filesystem.
 type FSRoot struct {
-	// The local path at which the declarative directory structure is located (eg. "/").
+	// The local path at which the declarative directory structure is located
+	// (eg. "/").
 	root string
 }
 
@@ -83,8 +84,9 @@
 	return os.MkdirAll(f.FullPath(), perm)
 }
 
-// PlaceFS takes a pointer to a Directory or a pointer to a structure embedding Directory and places it at a given
-// filesystem root. From this point on the given structure pointer has valid Placement interfaces.
+// PlaceFS takes a pointer to a Directory or a pointer to a structure embedding
+// Directory and places it at a given filesystem root. From this point on the
+// given structure pointer has valid Placement interfaces.
 func PlaceFS(dd interface{}, root string) error {
 	r := &FSRoot{root}
 	pathFor := func(parent, this string) string {
diff --git a/metropolis/node/core/localstorage/directory_pki.go b/metropolis/node/core/localstorage/directory_pki.go
index 41b6729..b9ab3a0 100644
--- a/metropolis/node/core/localstorage/directory_pki.go
+++ b/metropolis/node/core/localstorage/directory_pki.go
@@ -44,7 +44,8 @@
 	// This has no SANs because it authenticates by public key, not by name
 	return x509.Certificate{
 		Subject: pkix.Name{
-			// We identify nodes by their ID public keys (not hashed since a strong hash is longer and serves no benefit)
+			// We identify nodes by their ID public keys (not hashed since a
+			// strong hash is longer and serves no benefit)
 			CommonName: name,
 		},
 		IsCA:                  false,
@@ -137,8 +138,8 @@
 	}, nil
 }
 
-// AllExist returns true if all PKI files (cert, key, CA cert) are present on the backing
-// store.
+// AllExist returns true if all PKI files (cert, key, CA cert) are present on
+// the backing store.
 func (p *PKIDirectory) AllExist() (bool, error) {
 	for _, d := range []*declarative.File{&p.CACertificate, &p.Certificate, &p.Key} {
 		exists, err := d.Exists()
@@ -152,8 +153,8 @@
 	return true, nil
 }
 
-// AllAbsent returns true if all PKI files (cert, key, CA cert) are missing from the backing
-// store.
+// AllAbsent returns true if all PKI files (cert, key, CA cert) are missing
+// from the backing store.
 func (p *PKIDirectory) AllAbsent() (bool, error) {
 	for _, d := range []*declarative.File{&p.CACertificate, &p.Certificate, &p.Key} {
 		exists, err := d.Exists()
diff --git a/metropolis/node/core/localstorage/storage.go b/metropolis/node/core/localstorage/storage.go
index 0c4f641..e56b4eb 100644
--- a/metropolis/node/core/localstorage/storage.go
+++ b/metropolis/node/core/localstorage/storage.go
@@ -16,20 +16,24 @@
 
 package localstorage
 
-// Localstorage is a replacement for the old 'storage' internal library. It is currently unused, but will become
-// so as the node code gets rewritten.
+// Localstorage is a replacement for the old 'storage' internal library. It is
+// currently unused, but will become so as the node code gets rewritten.
 
-// The library is centered around the idea of a declarative filesystem tree defined as mutually recursive Go structs.
-// This structure is then Placed onto an abstract real filesystem (eg. a local POSIX filesystem at /), and a handle
-// to that placed filesystem is then used by the consumers of this library to refer to subsets of the tree (that now
-// correspond to locations on a filesystem).
+// The library is centered around the idea of a declarative filesystem tree
+// defined as mutually recursive Go structs.  This structure is then Placed
+// onto an abstract real filesystem (eg. a local POSIX filesystem at /), and a
+// handle to that placed filesystem is then used by the consumers of this
+// library to refer to subsets of the tree (that now correspond to locations on
+// a filesystem).
 //
-// Every member of the storage hierarchy must either be, or inherit from Directory or File. In order to be placed
-// correctly, Directory embedding structures must use `dir:` or `file:` tags for child Directories and files
-// respectively. The content of the tag specifies the path part that this element will be placed at.
+// Every member of the storage hierarchy must either be, or inherit from
+// Directory or File. In order to be placed correctly, Directory embedding
+// structures must use `dir:` or `file:` tags for child Directories and files
+// respectively. The content of the tag specifies the path part that this
+// element will be placed at.
 //
-// Full placement path(available via FullPath()) format is placement implementation-specific. However, they're always
-// strings.
+// Full placement path(available via FullPath()) format is placement
+// implementation-specific. However, they're always strings.
 
 import (
 	"sync"
@@ -43,9 +47,11 @@
 	ESP ESPDirectory `dir:"esp"`
 	// Persistent Data partition, mounted from encrypted and authenticated storage.
 	Data DataDirectory `dir:"data"`
-	// FHS-standard /etc directory, containes /etc/hosts, /etc/machine-id, and other compatibility files.
+	// FHS-standard /etc directory, containes /etc/hosts, /etc/machine-id, and
+	// other compatibility files.
 	Etc EtcDirectory `dir:"etc"`
-	// Ephemeral data, used by runtime, stored in tmpfs. Things like sockets, temporary config files, etc.
+	// Ephemeral data, used by runtime, stored in tmpfs. Things like sockets,
+	// temporary config files, etc.
 	Ephemeral EphemeralDirectory `dir:"ephemeral"`
 	// FHS-standard /tmp directory, used by ioutil.TempFile.
 	Tmp TmpDirectory `dir:"tmp"`
@@ -60,15 +66,18 @@
 	Key           declarative.File `file:"cert-key.pem"`
 }
 
-// DataDirectory is an xfs partition mounted via cryptsetup/LUKS, with a key derived from {global,local}Unlock keys.
+// DataDirectory is an xfs partition mounted via cryptsetup/LUKS, with a key
+// derived from {global,local}Unlock keys.
 type DataDirectory struct {
 	declarative.Directory
 
 	// flagLock locks canMount and mounted.
 	flagLock sync.Mutex
-	// canMount is set by Root when it is initialized. It is required to be set for mounting the data directory.
+	// canMount is set by Root when it is initialized. It is required to be set
+	// for mounting the data directory.
 	canMount bool
-	// mounted is set by DataDirectory when it is mounted. It ensures it's only mounted once.
+	// mounted is set by DataDirectory when it is mounted. It ensures it's only
+	// mounted once.
 	mounted bool
 
 	Containerd declarative.Directory   `dir:"containerd"`
@@ -108,11 +117,13 @@
 
 	DevicePlugins struct {
 		declarative.Directory
-		// Used by Kubelet, hardcoded relative to DataKubernetesKubeletDirectory
+		// Used by Kubelet, hardcoded relative to
+		// DataKubernetesKubeletDirectory
 		Kubelet declarative.File `file:"kubelet.sock"`
 	} `dir:"device-plugins"`
 
-	// Pod logs, hardcoded to /data/kubelet/logs in @com_github_kubernetes//pkg/kubelet/kuberuntime:kuberuntime_manager.go
+	// Pod logs, hardcoded to /data/kubelet/logs in
+	// @com_github_kubernetes//pkg/kubelet/kuberuntime:kuberuntime_manager.go
 	Logs declarative.Directory `dir:"logs"`
 
 	Plugins struct {
@@ -134,8 +145,10 @@
 
 type EtcDirectory struct {
 	declarative.Directory
-	Hosts     declarative.File `file:"hosts"`      // Symlinked to /ephemeral/hosts, baked into the erofs system image
-	MachineID declarative.File `file:"machine-id"` // Symlinked to /ephemeral/machine-id, baked into the erofs system image
+	// Symlinked to /ephemeral/hosts, baked into the erofs system image
+	Hosts declarative.File `file:"hosts"`
+	// Symlinked to /ephemeral/machine-id, baked into the erofs system image
+	MachineID declarative.File `file:"machine-id"`
 }
 
 type EphemeralDirectory struct {
diff --git a/metropolis/node/core/main.go b/metropolis/node/core/main.go
index 4051663..eb4c6c7 100644
--- a/metropolis/node/core/main.go
+++ b/metropolis/node/core/main.go
@@ -47,8 +47,10 @@
 )
 
 var (
-	// kubernetesConfig is the static/global part of the Kubernetes service configuration. In the future, this might
-	// be configurable by loading it from the EnrolmentConfig. Fow now, it's static and same across all clusters.
+	// kubernetesConfig is the static/global part of the Kubernetes service
+	// configuration. In the future, this might be configurable by loading it
+	// from the EnrolmentConfig. Fow now, it's static and same across all
+	// clusters.
 	kubernetesConfig = kubernetes.Config{
 		ServiceIPRange: net.IPNet{ // TODO(q3k): Decide if configurable / final value
 			IP:   net.IP{10, 0, 255, 1},
@@ -69,7 +71,8 @@
 		}
 		unix.Sync()
 		// TODO(lorenz): Switch this to Reboot when init panics are less likely
-		// Best effort, nothing we can do if this fails except printing the error to the console.
+		// Best effort, nothing we can do if this fails except printing the
+		// error to the console.
 		if err := unix.Reboot(unix.LINUX_REBOOT_CMD_POWER_OFF); err != nil {
 			panic(fmt.Sprintf("failed to halt node: %v\n", err))
 		}
@@ -97,7 +100,8 @@
 		panic(fmt.Errorf("could not set up basic mounts: %w", err))
 	}
 
-	// Linux kernel default is 4096 which is far too low. Raise it to 1M which is what gVisor suggests.
+	// Linux kernel default is 4096 which is far too low. Raise it to 1M which
+	// is what gVisor suggests.
 	if err := unix.Setrlimit(unix.RLIMIT_NOFILE, &unix.Rlimit{Cur: 1048576, Max: 1048576}); err != nil {
 		logger.Fatalf("Failed to raise rlimits: %v", err)
 	}
@@ -113,7 +117,8 @@
 
 	networkSvc := network.New()
 
-	// This function initializes a headless Delve if this is a debug build or does nothing if it's not
+	// This function initializes a headless Delve if this is a debug build or
+	// does nothing if it's not
 	initializeDebugger(networkSvc)
 
 	// Prepare local storage.
@@ -122,15 +127,17 @@
 		panic(fmt.Errorf("when placing root FS: %w", err))
 	}
 
-	// trapdoor is a channel used to signal to the init service that a very low-level, unrecoverable failure
-	// occured. This causes a GURU MEDITATION ERROR visible to the end user.
+	// trapdoor is a channel used to signal to the init service that a very
+	// low-level, unrecoverable failure occured. This causes a GURU MEDITATION
+	// ERROR visible to the end user.
 	trapdoor := make(chan struct{})
 
 	// Make context for supervisor. We cancel it when we reach the trapdoor.
 	ctxS, ctxC := context.WithCancel(context.Background())
 
-	// Start root initialization code as a supervisor one-shot runnable. This means waiting for the network, starting
-	// the cluster manager, and then starting all services related to the node's roles.
+	// Start root initialization code as a supervisor one-shot runnable. This
+	// means waiting for the network, starting the cluster manager, and then
+	// starting all services related to the node's roles.
 	// TODO(q3k): move this to a separate 'init' service.
 	supervisor.New(ctxS, func(ctx context.Context) error {
 		logger := supervisor.Logger(ctx)
@@ -143,8 +150,8 @@
 			return fmt.Errorf("when starting network: %w", err)
 		}
 
-		// Start cluster manager. This kicks off cluster membership machinery, which will either start
-		// a new cluster, enroll into one or join one.
+		// Start cluster manager. This kicks off cluster membership machinery,
+		// which will either start a new cluster, enroll into one or join one.
 		m := cluster.NewManager(root, networkSvc)
 		if err := supervisor.Run(ctx, "enrolment", m.Run); err != nil {
 			return fmt.Errorf("when starting enrolment: %w", err)
@@ -158,25 +165,27 @@
 			return fmt.Errorf("new couldn't find home in new cluster, aborting: %w", err)
 		}
 
-		// We are now in a cluster. We can thus access our 'node' object and start all services that
-		// we should be running.
+		// We are now in a cluster. We can thus access our 'node' object and
+		// start all services that we should be running.
 
 		logger.Info("Enrolment success, continuing startup.")
 		logger.Info(fmt.Sprintf("This node (%s) has roles:", status.Node.String()))
 		if cm := status.Node.ConsensusMember(); cm != nil {
-			// There's no need to start anything for when we are a consensus member - the cluster
-			// manager does this for us if necessary (as creating/enrolling/joining a cluster is
-			// pretty tied into cluster lifecycle management).
+			// There's no need to start anything for when we are a consensus
+			// member - the cluster manager does this for us if necessary (as
+			// creating/enrolling/joining a cluster is pretty tied into cluster
+			// lifecycle management).
 			logger.Info(fmt.Sprintf(" - etcd consensus member"))
 		}
 		if kw := status.Node.KubernetesWorker(); kw != nil {
 			logger.Info(fmt.Sprintf(" - kubernetes worker"))
 		}
 
-		// If we're supposed to be a kubernetes worker, start kubernetes services and containerd.
-		// In the future, this might be split further into kubernetes control plane and data plane
-		// roles.
-		// TODO(q3k): watch on cluster status updates to start/stop kubernetes service.
+		// If we're supposed to be a kubernetes worker, start kubernetes
+		// services and containerd.  In the future, this might be split further
+		// into kubernetes control plane and data plane roles.
+		// TODO(q3k): watch on cluster status updates to start/stop kubernetes
+		// service.
 		var containerdSvc *containerd.Service
 		var kubeSvc *kubernetes.Service
 		if kw := status.Node.KubernetesWorker(); kw != nil {
@@ -236,8 +245,9 @@
 	for {
 		select {
 		case <-trapdoor:
-			// If the trapdoor got closed, we got stuck early enough in the boot process that we can't do anything about
-			// it. Display a generic error message until we handle error conditions better.
+			// If the trapdoor got closed, we got stuck early enough in the
+			// boot process that we can't do anything about it. Display a
+			// generic error message until we handle error conditions better.
 			ctxC()
 			log.Printf("                  ########################")
 			log.Printf("                  # GURU MEDIATION ERROR #")
@@ -266,9 +276,11 @@
 					}
 				}
 			case unix.SIGURG:
-				// Go 1.14 introduced asynchronous preemption, which uses SIGURG.
-				// In order not to break backwards compatibility in the unlikely case
-				// of an application actually using SIGURG on its own, they're not filtering them.
+				// Go 1.14 introduced asynchronous preemption, which uses
+				// SIGURG.
+				// In order not to break backwards compatibility in the
+				// unlikely case of an application actually using SIGURG on its
+				// own, they're not filtering them.
 				// (https://github.com/golang/go/issues/37942)
 				logger.V(5).Info("Ignoring SIGURG")
 			// TODO(lorenz): We can probably get more than just SIGCHLD as init, but I can't think
@@ -280,8 +292,9 @@
 	}
 }
 
-// nodeCertificate creates a node key/certificate for a foreign node. This is duplicated code with localstorage's
-// PKIDirectory EnsureSelfSigned, but is temporary (and specific to 'golden tickets').
+// nodeCertificate creates a node key/certificate for a foreign node. This is
+// duplicated code with localstorage's PKIDirectory EnsureSelfSigned, but is
+// temporary (and specific to 'golden tickets').
 func (s *debugService) nodeCertificate() (cert, key []byte, err error) {
 	pubKey, privKey, err := ed25519.GenerateKey(rand.Reader)
 	if err != nil {
diff --git a/metropolis/node/core/mounts.go b/metropolis/node/core/mounts.go
index 7a15c30..f8f0dae 100644
--- a/metropolis/node/core/mounts.go
+++ b/metropolis/node/core/mounts.go
@@ -27,8 +27,9 @@
 	"source.monogon.dev/metropolis/pkg/logtree"
 )
 
-// setupMounts sets up basic mounts like sysfs, procfs, devtmpfs and cgroups. This should be called early during init
-// as a lot of processes depend on this being available.
+// setupMounts sets up basic mounts like sysfs, procfs, devtmpfs and cgroups.
+// This should be called early during init as a lot of processes depend on this
+// being available.
 func setupMounts(log logtree.LeveledLogger) error {
 	// Set up target filesystems.
 	for _, el := range []struct {
@@ -50,7 +51,8 @@
 		}
 	}
 
-	// Mount all available CGroups for v1 (v2 uses a single unified hierarchy and is not supported by our runtimes yet)
+	// Mount all available CGroups for v1 (v2 uses a single unified hierarchy
+	// and is not supported by our runtimes yet)
 	if err := unix.Mount("tmpfs", "/sys/fs/cgroup", "tmpfs", unix.MS_NOEXEC|unix.MS_NOSUID|unix.MS_NODEV, ""); err != nil {
 		panic(err)
 	}
diff --git a/metropolis/node/core/network/dhcp4c/callback/callback.go b/metropolis/node/core/network/dhcp4c/callback/callback.go
index de00383..c01cf45 100644
--- a/metropolis/node/core/network/dhcp4c/callback/callback.go
+++ b/metropolis/node/core/network/dhcp4c/callback/callback.go
@@ -14,13 +14,17 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// Package callback contains minimal callbacks for configuring the kernel with options received over DHCP.
+// Package callback contains minimal callbacks for configuring the kernel with
+// options received over DHCP.
 //
-// These directly configure the relevant kernel subsytems and need to own certain parts of them as documented on a per-
-// callback basis to make sure that they can recover from restarts and crashes of the DHCP client.
-// The callbacks in here are not suitable for use in advanced network scenarios like running multiple DHCP clients
-// per interface via ClientIdentifier or when running an external FIB manager. In these cases it's advised to extract
-// the necessary information from the lease in your own callback and communicate it directly to the responsible entity.
+// These directly configure the relevant kernel subsytems and need to own
+// certain parts of them as documented on a per- callback basis to make sure
+// that they can recover from restarts and crashes of the DHCP client.
+// The callbacks in here are not suitable for use in advanced network scenarios
+// like running multiple DHCP clients per interface via ClientIdentifier or
+// when running an external FIB manager. In these cases it's advised to extract
+// the necessary information from the lease in your own callback and
+// communicate it directly to the responsible entity.
 package callback
 
 import (
@@ -61,8 +65,9 @@
 	return a.IP.Equal(b.IP) && aOnes == bOnes && aBits == bBits
 }
 
-// ManageIP sets up and tears down the assigned IP address. It takes exclusive ownership of all IPv4 addresses
-// on the given interface which do not have IFA_F_PERMANENT set, so it's not possible to run multiple dynamic addressing
+// ManageIP sets up and tears down the assigned IP address. It takes exclusive
+// ownership of all IPv4 addresses on the given interface which do not have
+// IFA_F_PERMANENT set, so it's not possible to run multiple dynamic addressing
 // clients on a single interface.
 func ManageIP(iface netlink.Link) dhcp4c.LeaseCallback {
 	return func(old, new *dhcp4c.Lease) error {
@@ -75,9 +80,11 @@
 
 		for _, addr := range addrs {
 			if addr.Flags&unix.IFA_F_PERMANENT == 0 {
-				// Linux identifies addreses by IP, mask and peer (see net/ipv4/devinet.find_matching_ifa in Linux 5.10)
-				// So don't touch addresses which match on these properties as AddrReplace will atomically reconfigure
-				// them anyways without interrupting things.
+				// Linux identifies addreses by IP, mask and peer (see
+				// net/ipv4/devinet.find_matching_ifa in Linux 5.10).
+				// So don't touch addresses which match on these properties as
+				// AddrReplace will atomically reconfigure them anyways without
+				// interrupting things.
 				if isIPNetEqual(addr.IPNet, newNet) && addr.Peer == nil && new != nil {
 					continue
 				}
@@ -104,8 +111,9 @@
 	}
 }
 
-// ManageDefaultRoute manages a default route through the first router offered by DHCP. It does nothing if DHCP
-// doesn't provide any routers. It takes ownership of all RTPROTO_DHCP routes on the given interface, so it's not
+// ManageDefaultRoute manages a default route through the first router offered
+// by DHCP. It does nothing if DHCP doesn't provide any routers. It takes
+// ownership of all RTPROTO_DHCP routes on the given interface, so it's not
 // possible to run multiple DHCP clients on the given interface.
 func ManageDefaultRoute(iface netlink.Link) dhcp4c.LeaseCallback {
 	return func(old, new *dhcp4c.Lease) error {
@@ -120,8 +128,8 @@
 		}
 		ipv4DefaultRoute := net.IPNet{IP: net.IPv4zero, Mask: net.CIDRMask(0, 32)}
 		for _, route := range dhcpRoutes {
-			// Don't remove routes which can be atomically replaced by RouteReplace to prevent potential traffic
-			// disruptions.
+			// Don't remove routes which can be atomically replaced by
+			// RouteReplace to prevent potential traffic disruptions.
 			if !isIPNetEqual(&ipv4DefaultRoute, route.Dst) && newRouter != nil {
 				continue
 			}
diff --git a/metropolis/node/core/network/dhcp4c/callback/callback_test.go b/metropolis/node/core/network/dhcp4c/callback/callback_test.go
index 01611c3..7fb8875 100644
--- a/metropolis/node/core/network/dhcp4c/callback/callback_test.go
+++ b/metropolis/node/core/network/dhcp4c/callback/callback_test.go
@@ -63,7 +63,9 @@
 		oldLease, newLease *dhcp4c.Lease
 		expectedAddrs      []netlink.Addr
 	}{
-		{ // Lifetimes are necessary, otherwise the Kernel sets the IFA_F_PERMANENT flag behind our back
+		// Lifetimes are necessary, otherwise the Kernel sets the
+		// IFA_F_PERMANENT flag behind our back.
+		{
 			name:          "RemoveOldIPs",
 			initialAddrs:  []netlink.Addr{{IPNet: &testNet1, ValidLft: 60}, {IPNet: &testNet2, ValidLft: 60}},
 			oldLease:      nil,
@@ -149,12 +151,15 @@
 	if os.Getenv("IN_KTEST") != "true" {
 		t.Skip("Not in ktest")
 	}
-	// testRoute is only used as a route destination and not configured on any interface.
+	// testRoute is only used as a route destination and not configured on any
+	// interface.
 	testRoute := net.IPNet{IP: net.IP{10, 0, 3, 0}, Mask: net.CIDRMask(24, 32)}
 
-	// A test interface is set up for each test and assigned testNet1 and testNet2 so that testNet1Router and
-	// testNet2Router are valid gateways for routes in this environment. A LinkIndex of -1 is replaced by the correct
-	// link index for this test interface at runtime for both initialRoutes and expectedRoutes.
+	// A test interface is set up for each test and assigned testNet1 and
+	// testNet2 so that testNet1Router and testNet2Router are valid gateways
+	// for routes in this environment. A LinkIndex of -1 is replaced by the
+	// correct link index for this test interface at runtime for both
+	// initialRoutes and expectedRoutes.
 	var tests = []struct {
 		name               string
 		initialRoutes      []netlink.Route
@@ -167,8 +172,10 @@
 			oldLease:      nil,
 			newLease:      leaseAddRouter(trivialLeaseFromNet(testNet1), testNet1Router),
 			expectedRoutes: []netlink.Route{{
-				Protocol:  unix.RTPROT_DHCP,
-				Dst:       nil, // Linux weirdly retuns no RTA_DST for default routes, but one for everything else
+				Protocol: unix.RTPROT_DHCP,
+				// Linux weirdly returns no RTA_DST for default routes, but one
+				// for everything else.
+				Dst:       nil,
 				Gw:        testNet1Router,
 				Src:       testNet1.IP,
 				Table:     mainRoutingTable,
diff --git a/metropolis/node/core/network/dhcp4c/dhcpc.go b/metropolis/node/core/network/dhcp4c/dhcpc.go
index 5a32b5a..9bb70dd 100644
--- a/metropolis/node/core/network/dhcp4c/dhcpc.go
+++ b/metropolis/node/core/network/dhcp4c/dhcpc.go
@@ -14,10 +14,12 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// Package dhcp4c implements a DHCPv4 Client as specified in RFC2131 (with some notable deviations).
-// It implements only the DHCP state machine itself, any configuration other than the interface IP
-// address (which is always assigned in DHCP and necessary for the protocol to work) is exposed
-// as [informers/observables/watchable variables/???] to consumers who then deal with it.
+// Package dhcp4c implements a DHCPv4 Client as specified in RFC2131 (with some
+// notable deviations).  It implements only the DHCP state machine itself, any
+// configuration other than the interface IP address (which is always assigned
+// in DHCP and necessary for the protocol to work) is exposed as
+// [informers/observables/watchable variables/???] to consumers who then deal
+// with it.
 package dhcp4c
 
 import (
@@ -41,19 +43,23 @@
 type state int
 
 const (
-	// stateDiscovering sends broadcast DHCPDISCOVER messages to the network and waits for either a DHCPOFFER or
-	// (in case of Rapid Commit) DHCPACK.
+	// stateDiscovering sends broadcast DHCPDISCOVER messages to the network
+	// and waits for either a DHCPOFFER or (in case of Rapid Commit) DHCPACK.
 	stateDiscovering state = iota
-	// stateRequesting sends broadcast DHCPREQUEST messages containing the server identifier for the selected lease and
-	// waits for a DHCPACK or a DHCPNAK. If it doesn't get either it transitions back into discovering.
+	// stateRequesting sends broadcast DHCPREQUEST messages containing the
+	// server identifier for the selected lease and waits for a DHCPACK or a
+	// DHCPNAK. If it doesn't get either it transitions back into discovering.
 	stateRequesting
-	// stateBound just waits until RenewDeadline (derived from RenewTimeValue, half the lifetime by default) expires.
+	// stateBound just waits until RenewDeadline (derived from RenewTimeValue,
+	// half the lifetime by default) expires.
 	stateBound
-	// stateRenewing sends unicast DHCPREQUEST messages to the currently-selected server and waits for either a DHCPACK
-	// or DHCPNAK message. On DHCPACK it transitions to bound, otherwise to discovering.
+	// stateRenewing sends unicast DHCPREQUEST messages to the
+	// currently-selected server and waits for either a DHCPACK or DHCPNAK
+	// message. On DHCPACK it transitions to bound, otherwise to discovering.
 	stateRenewing
-	// stateRebinding sends broadcast DHCPREQUEST messages to the network and waits for either a DHCPACK or DHCPNAK from
-	// any server. Response processing is identical to stateRenewing.
+	// stateRebinding sends broadcast DHCPREQUEST messages to the network and
+	// waits for either a DHCPACK or DHCPNAK from any server. Response
+	// processing is identical to stateRenewing.
 	stateRebinding
 )
 
@@ -74,42 +80,52 @@
 	}
 }
 
-// This only requests SubnetMask and IPAddressLeaseTime as renewal and rebinding times are fine if
-// they are just defaulted. They are respected (if valid, otherwise they are clamped to the nearest
-// valid value) if sent by the server.
+// This only requests SubnetMask and IPAddressLeaseTime as renewal and
+// rebinding times are fine if they are just defaulted. They are respected (if
+// valid, otherwise they are clamped to the nearest valid value) if sent by the
+// server.
 var internalOptions = dhcpv4.OptionCodeList{dhcpv4.OptionSubnetMask, dhcpv4.OptionIPAddressLeaseTime}
 
-// Transport represents a mechanism over which DHCP messages can be exchanged with a server.
+// Transport represents a mechanism over which DHCP messages can be exchanged
+// with a server.
 type Transport interface {
-	// Send attempts to send the given DHCP payload message to the transport target once. An empty return value
-	// does not indicate that the message was successfully received.
+	// Send attempts to send the given DHCP payload message to the transport
+	// target once. An empty return value does not indicate that the message
+	// was successfully received.
 	Send(payload *dhcpv4.DHCPv4) error
-	// SetReceiveDeadline sets a deadline for Receive() calls after which they return with DeadlineExceededErr
+	// SetReceiveDeadline sets a deadline for Receive() calls after which they
+	// return with DeadlineExceededErr
 	SetReceiveDeadline(time.Time) error
-	// Receive waits for a DHCP message to arrive and returns it. If the deadline expires without a message arriving
-	// it will return DeadlineExceededErr. If the message is completely malformed it will an instance of
-	// InvalidMessageError.
+	// Receive waits for a DHCP message to arrive and returns it. If the
+	// deadline expires without a message arriving it will return
+	// DeadlineExceededErr. If the message is completely malformed it will an
+	// instance of InvalidMessageError.
 	Receive() (*dhcpv4.DHCPv4, error)
-	// Close closes the given transport. Calls to any of the above methods will fail if the transport is closed.
-	// Specific transports can be reopened after being closed.
+	// Close closes the given transport. Calls to any of the above methods will
+	// fail if the transport is closed.  Specific transports can be reopened
+	// after being closed.
 	Close() error
 }
 
-// UnicastTransport represents a mechanism over which DHCP messages can be exchanged with a single server over an
-// arbitrary IPv4-based network. Implementers need to support servers running outside the local network via a router.
+// UnicastTransport represents a mechanism over which DHCP messages can be
+// exchanged with a single server over an arbitrary IPv4-based network.
+// Implementers need to support servers running outside the local network via a
+// router.
 type UnicastTransport interface {
 	Transport
-	// Open connects the transport to a new unicast target. Can only be called after calling Close() or after creating
-	// a new transport.
+	// Open connects the transport to a new unicast target. Can only be called
+	// after calling Close() or after creating a new transport.
 	Open(serverIP, bindIP net.IP) error
 }
 
-// BroadcastTransport represents a mechanism over which DHCP messages can be exchanged with all servers on a Layer 2
-// broadcast domain. Implementers need to support sending and receiving messages without any IP being configured on
+// BroadcastTransport represents a mechanism over which DHCP messages can be
+// exchanged with all servers on a Layer 2 broadcast domain. Implementers need
+// to support sending and receiving messages without any IP being configured on
 // the interface.
 type BroadcastTransport interface {
 	Transport
-	// Open connects the transport. Can only be called after calling Close() or after creating a new transport.
+	// Open connects the transport. Can only be called after calling Close() or
+	// after creating a new transport.
 	Open() error
 }
 
@@ -117,26 +133,29 @@
 
 // Client implements a DHCPv4 client.
 //
-// Note that the size of all data sent to the server (RequestedOptions, ClientIdentifier,
-// VendorClassIdentifier and ExtraRequestOptions) should be kept reasonably small (<500 bytes) in
-// order to maximize the chance that requests can be properly transmitted.
+// Note that the size of all data sent to the server (RequestedOptions,
+// ClientIdentifier, VendorClassIdentifier and ExtraRequestOptions) should be
+// kept reasonably small (<500 bytes) in order to maximize the chance that
+// requests can be properly transmitted.
 type Client struct {
-	// RequestedOptions contains a list of extra options this client is interested in
+	// RequestedOptions contains a list of extra options this client is
+	// interested in
 	RequestedOptions dhcpv4.OptionCodeList
 
 	// ClientIdentifier is used by the DHCP server to identify this client.
 	// If empty, on Ethernet the MAC address is used instead.
 	ClientIdentifier []byte
 
-	// VendorClassIdentifier is used by the DHCP server to identify options specific to this type of
-	// clients and to populate the vendor-specific option (43).
+	// VendorClassIdentifier is used by the DHCP server to identify options
+	// specific to this type of clients and to populate the vendor-specific
+	// option (43).
 	VendorClassIdentifier string
 
 	// ExtraRequestOptions are extra options sent to the server.
 	ExtraRequestOptions dhcpv4.Options
 
-	// Backoff strategies for each state. These all have sane defaults, override them only if
-	// necessary.
+	// Backoff strategies for each state. These all have sane defaults,
+	// override them only if necessary.
 	DiscoverBackoff    backoff.BackOff
 	AcceptOfferBackoff backoff.BackOff
 	RenewBackoff       backoff.BackOff
@@ -170,13 +189,14 @@
 	leaseRenewDeadline time.Time
 }
 
-// newDefaultBackoff returns an infinitely-retrying randomized exponential backoff with a
-// DHCP-appropriate InitialInterval
+// newDefaultBackoff returns an infinitely-retrying randomized exponential
+// backoff with a DHCP-appropriate InitialInterval
 func newDefaultBackoff() *backoff.ExponentialBackOff {
 	b := backoff.NewExponentialBackOff()
 	b.MaxElapsedTime = 0 // No Timeout
-	// Lots of servers wait 1s for existing users of an IP. Wait at least for that and keep some
-	// slack for randomization, communication and processing overhead.
+	// Lots of servers wait 1s for existing users of an IP. Wait at least for
+	// that and keep some slack for randomization, communication and processing
+	// overhead.
 	b.InitialInterval = 1400 * time.Millisecond
 	b.MaxInterval = 30 * time.Second
 	b.RandomizationFactor = 0.2
@@ -184,12 +204,14 @@
 }
 
 // NewClient instantiates (but doesn't start) a new DHCPv4 client.
-// To have a working client it's required to set LeaseCallback to something that is capable of configuring the IP
-// address on the given interface. Unless managed through external means like a routing protocol, setting the default
-// route is also required. A simple example with the callback package thus looks like this:
-//  c := dhcp4c.NewClient(yourInterface)
-//  c.LeaseCallback = callback.Compose(callback.ManageIP(yourInterface), callback.ManageDefaultRoute(yourInterface))
-//  c.Run(ctx)
+// To have a working client it's required to set LeaseCallback to something
+// that is capable of configuring the IP address on the given interface. Unless
+// managed through external means like a routing protocol, setting the default
+// route is also required. A simple example with the callback package thus
+// looks like this:
+//   c := dhcp4c.NewClient(yourInterface)
+//   c.LeaseCallback = callback.Compose(callback.ManageIP(yourInterface), callback.ManageDefaultRoute(yourInterface))
+//   c.Run(ctx)
 func NewClient(iface *net.Interface) (*Client, error) {
 	broadcastConn := transport.NewBroadcastTransport(iface)
 
@@ -227,9 +249,10 @@
 	}, nil
 }
 
-// acceptableLease checks if the given lease is valid enough to even be processed. This is
-// intentionally not exposed to users because under certain cirumstances it can end up acquiring all
-// available IP addresses from a server.
+// acceptableLease checks if the given lease is valid enough to even be
+// processed. This is intentionally not exposed to users because under certain
+// cirumstances it can end up acquiring all available IP addresses from a
+// server.
 func (c *Client) acceptableLease(offer *dhcpv4.DHCPv4) bool {
 	// RFC2131 Section 4.3.1 Table 3
 	if offer.ServerIdentifier() == nil || offer.ServerIdentifier().To4() == nil {
@@ -241,29 +264,34 @@
 		return false
 	}
 
-	// Ignore IPs that are in no way valid for an interface (multicast, loopback, ...)
+	// Ignore IPs that are in no way valid for an interface (multicast,
+	// loopback, ...)
 	if offer.YourIPAddr.To4() == nil || (!offer.YourIPAddr.IsGlobalUnicast() && !offer.YourIPAddr.IsLinkLocalUnicast()) {
 		return false
 	}
 
-	// Technically the options Requested IP address, Parameter request list, Client identifier
-	// and Maximum message size should be refused (MUST NOT), but in the interest of interopatibilty
-	// let's simply remove them if they are present.
+	// Technically the options Requested IP address, Parameter request list,
+	// Client identifier and Maximum message size should be refused (MUST NOT),
+	// but in the interest of interopatibilty let's simply remove them if they
+	// are present.
 	delete(offer.Options, dhcpv4.OptionRequestedIPAddress.Code())
 	delete(offer.Options, dhcpv4.OptionParameterRequestList.Code())
 	delete(offer.Options, dhcpv4.OptionClientIdentifier.Code())
 	delete(offer.Options, dhcpv4.OptionMaximumDHCPMessageSize.Code())
 
-	// Clamp rebindinding times longer than the lease time. Otherwise the state machine might misbehave.
+	// Clamp rebindinding times longer than the lease time. Otherwise the state
+	// machine might misbehave.
 	if offer.IPAddressRebindingTime(0) > offer.IPAddressLeaseTime(0) {
 		offer.UpdateOption(dhcpv4.OptGeneric(dhcpv4.OptionRebindingTimeValue, dhcpv4.Duration(offer.IPAddressLeaseTime(0)).ToBytes()))
 	}
-	// Clamp renewal times longer than the rebinding time. Otherwise the state machine might misbehave.
+	// Clamp renewal times longer than the rebinding time. Otherwise the state
+	// machine might misbehave.
 	if offer.IPAddressRenewalTime(0) > offer.IPAddressRebindingTime(0) {
 		offer.UpdateOption(dhcpv4.OptGeneric(dhcpv4.OptionRenewTimeValue, dhcpv4.Duration(offer.IPAddressRebindingTime(0)).ToBytes()))
 	}
 
-	// Normalize two options that can be represented either inline or as options.
+	// Normalize two options that can be represented either inline or as
+	// options.
 	if len(offer.ServerHostName) > 0 {
 		offer.Options[uint8(dhcpv4.OptionTFTPServerName)] = []byte(offer.ServerHostName)
 	}
@@ -298,9 +326,10 @@
 	return xid, nil
 }
 
-// As most servers out there cannot do reassembly, let's just hope for the best and
-// provide the local interface MTU. If the packet is too big it won't work anyways.
-// Also clamp to the biggest representable MTU in DHCPv4 (2 bytes unsigned int).
+// As most servers out there cannot do reassembly, let's just hope for the best
+// and provide the local interface MTU. If the packet is too big it won't work
+// anyways.  Also clamp to the biggest representable MTU in DHCPv4 (2 bytes
+// unsigned int).
 func (c *Client) maxMsgSize() uint16 {
 	if c.iface.MTU < math.MaxUint16 {
 		return uint16(c.iface.MTU)
@@ -346,8 +375,9 @@
 	}, nil
 }
 
-// transactionStateSpec describes a state which is driven by a DHCP message transaction (sending a
-// specific message and then transitioning into a different state depending on the received messages)
+// transactionStateSpec describes a state which is driven by a DHCP message
+// transaction (sending a specific message and then transitioning into a
+// different state depending on the received messages)
 type transactionStateSpec struct {
 	// ctx is a context for canceling the process
 	ctx context.Context
@@ -355,33 +385,38 @@
 	// transport is used to send and receive messages in this state
 	transport Transport
 
-	// stateDeadline is a fixed external deadline for how long the FSM can remain in this state.
-	// If it's exceeded the stateDeadlineExceeded callback is called and responsible for
-	// transitioning out of this state. It can be left empty to signal that there's no external
-	// deadline for the state.
+	// stateDeadline is a fixed external deadline for how long the FSM can
+	// remain in this state.
+	// If it's exceeded the stateDeadlineExceeded callback is called and
+	// responsible for transitioning out of this state. It can be left empty to
+	// signal that there's no external deadline for the state.
 	stateDeadline time.Time
 
-	// backoff controls how long to wait for answers until handing control back to the FSM.
-	// Since the FSM hasn't advanced until then this means we just get called again and retransmit.
+	// backoff controls how long to wait for answers until handing control back
+	// to the FSM.
+	// Since the FSM hasn't advanced until then this means we just get called
+	// again and retransmit.
 	backoff backoff.BackOff
 
-	// requestType is the type of DHCP request sent out in this state. This is used to populate
-	// the default options for the message.
+	// requestType is the type of DHCP request sent out in this state. This is
+	// used to populate the default options for the message.
 	requestType dhcpv4.MessageType
 
-	// setExtraOptions can modify the request and set extra options before transmitting. Returning
-	// an error here aborts the FSM an can be used to terminate when no valid request can be
-	// constructed.
+	// setExtraOptions can modify the request and set extra options before
+	// transmitting. Returning an error here aborts the FSM an can be used to
+	// terminate when no valid request can be constructed.
 	setExtraOptions func(msg *dhcpv4.DHCPv4) error
 
-	// handleMessage gets called for every parseable (not necessarily valid) DHCP message received
-	// by the transport. It should return an error for every message that doesn't advance the
-	// state machine and no error for every one that does. It is responsible for advancing the FSM
-	// if the required information is present.
+	// handleMessage gets called for every parseable (not necessarily valid)
+	// DHCP message received by the transport. It should return an error for
+	// every message that doesn't advance the state machine and no error for
+	// every one that does. It is responsible for advancing the FSM if the
+	// required information is present.
 	handleMessage func(msg *dhcpv4.DHCPv4, sentTime time.Time) error
 
-	// stateDeadlineExceeded gets called if either the backoff returns backoff.Stop or the
-	// stateDeadline runs out. It is responsible for advancing the FSM into the next state.
+	// stateDeadlineExceeded gets called if either the backoff returns
+	// backoff.Stop or the stateDeadline runs out. It is responsible for
+	// advancing the FSM into the next state.
 	stateDeadlineExceeded func() error
 }
 
@@ -405,9 +440,10 @@
 		receiveDeadline = earliestDeadline(s.stateDeadline, receiveDeadline)
 	}
 
-	// Jump out if deadline expires in less than 10ms. Minimum lease time is 1s and if we have less
-	// than 10ms to wait for an answer before switching state it makes no sense to send out another
-	// request. This nearly eliminates the problem of sending two different requests back-to-back.
+	// Jump out if deadline expires in less than 10ms. Minimum lease time is 1s
+	// and if we have less than 10ms to wait for an answer before switching
+	// state it makes no sense to send out another request. This nearly
+	// eliminates the problem of sending two different requests back-to-back.
 	if receiveDeadline.Add(-10 * time.Millisecond).Before(sentTime) {
 		return s.stateDeadlineExceeded()
 	}
diff --git a/metropolis/node/core/network/dhcp4c/dhcpc_test.go b/metropolis/node/core/network/dhcp4c/dhcpc_test.go
index b99558c..97c1b2b 100644
--- a/metropolis/node/core/network/dhcp4c/dhcpc_test.go
+++ b/metropolis/node/core/network/dhcp4c/dhcpc_test.go
@@ -172,7 +172,8 @@
 	assert.Equal(t, dhcpv4.MessageTypeDiscover, mt.sentPacket.MessageType())
 }
 
-// TestAcceptableLease tests if a minimal valid lease is accepted by acceptableLease
+// TestAcceptableLease tests if a minimal valid lease is accepted by
+// acceptableLease
 func TestAcceptableLease(t *testing.T) {
 	c := Client{}
 	offer := &dhcpv4.DHCPv4{
@@ -223,8 +224,8 @@
 	return o
 }
 
-// TestDiscoverOffer tests if the DHCP state machine in discovering state properly selects the first valid lease
-// and transitions to requesting state
+// TestDiscoverOffer tests if the DHCP state machine in discovering state
+// properly selects the first valid lease and transitions to requesting state
 func TestDiscoverRequesting(t *testing.T) {
 	p := newPuppetClient(stateDiscovering)
 
@@ -250,8 +251,8 @@
 	assert.Equal(t, testIP, p.c.offer.YourIPAddr, "DHCP client requested invalid offer")
 }
 
-// TestOfferBound tests if the DHCP state machine in requesting state processes a valid DHCPACK and transitions to
-// bound state.
+// TestOfferBound tests if the DHCP state machine in requesting state processes
+// a valid DHCPACK and transitions to bound state.
 func TestRequestingBound(t *testing.T) {
 	p := newPuppetClient(stateRequesting)
 
@@ -276,8 +277,8 @@
 	assert.Equal(t, testIP, p.c.lease.YourIPAddr, "DHCP client requested invalid offer")
 }
 
-// TestRequestingDiscover tests if the DHCP state machine in requesting state transitions back to discovering if it
-// takes too long to get a valid DHCPACK.
+// TestRequestingDiscover tests if the DHCP state machine in requesting state
+// transitions back to discovering if it takes too long to get a valid DHCPACK.
 func TestRequestingDiscover(t *testing.T) {
 	p := newPuppetClient(stateRequesting)
 
@@ -306,8 +307,9 @@
 	assert.Equal(t, stateDiscovering, p.c.state, "DHCP client didn't switch back to offer after requesting expired")
 }
 
-// TestDiscoverRapidCommit tests if the DHCP state machine in discovering state transitions directly to bound if a
-// rapid commit response (DHCPACK) is received.
+// TestDiscoverRapidCommit tests if the DHCP state machine in discovering state
+// transitions directly to bound if a rapid commit response (DHCPACK) is
+// received.
 func TestDiscoverRapidCommit(t *testing.T) {
 	testIP := net.IP{192, 0, 2, 2}
 	offer := newResponse(dhcpv4.MessageTypeAck)
@@ -341,8 +343,9 @@
 	return fmt.Sprintf("Test Option %d", uint8(o))
 }
 
-// TestBoundRenewingBound tests if the DHCP state machine in bound correctly transitions to renewing after
-// leaseBoundDeadline expires, sends a DHCPREQUEST and after it gets a DHCPACK response calls LeaseCallback and
+// TestBoundRenewingBound tests if the DHCP state machine in bound correctly
+// transitions to renewing after leaseBoundDeadline expires, sends a
+// DHCPREQUEST and after it gets a DHCPACK response calls LeaseCallback and
 // transitions back to bound with correct new deadlines.
 func TestBoundRenewingBound(t *testing.T) {
 	offer := newResponse(dhcpv4.MessageTypeAck)
@@ -364,7 +367,8 @@
 	if err := p.c.runState(context.Background()); err != nil {
 		t.Error(err)
 	}
-	p.ft.Advance(5 * time.Millisecond) // We cannot intercept time.After so we just advance the clock by the time slept
+	// We cannot intercept time.After so we just advance the clock by the time slept
+	p.ft.Advance(5 * time.Millisecond)
 	assert.Equal(t, stateRenewing, p.c.state, "DHCP client not renewing")
 	offer.UpdateOption(dhcpv4.OptGeneric(TestOption(1), []byte{0x12}))
 	p.umt.sendPackets(offer)
@@ -384,8 +388,9 @@
 	assert.Equal(t, dhcpv4.MessageTypeRequest, p.umt.sentPacket.MessageType(), "Invalid message type for renewal")
 }
 
-// TestRenewingRebinding tests if the DHCP state machine in renewing state correctly sends DHCPREQUESTs and transitions
-// to the rebinding state when it hasn't received a valid response until the deadline expires.
+// TestRenewingRebinding tests if the DHCP state machine in renewing state
+// correctly sends DHCPREQUESTs and transitions to the rebinding state when it
+// hasn't received a valid response until the deadline expires.
 func TestRenewingRebinding(t *testing.T) {
 	offer := newResponse(dhcpv4.MessageTypeAck)
 	testIP := net.IP{192, 0, 2, 2}
@@ -431,8 +436,9 @@
 	assert.True(t, p.umt.closed)
 }
 
-// TestRebindingBound tests if the DHCP state machine in rebinding state sends DHCPREQUESTs to the network and if
-// it receives a valid DHCPACK correctly transitions back to bound state.
+// TestRebindingBound tests if the DHCP state machine in rebinding state sends
+// DHCPREQUESTs to the network and if it receives a valid DHCPACK correctly
+// transitions back to bound state.
 func TestRebindingBound(t *testing.T) {
 	offer := newResponse(dhcpv4.MessageTypeAck)
 	testIP := net.IP{192, 0, 2, 2}
@@ -471,8 +477,9 @@
 	assert.Equal(t, stateBound, p.c.state, "DHCP client didn't go back to bound")
 }
 
-// TestRebindingBound tests if the DHCP state machine in rebinding state transitions to discovering state if
-// leaseDeadline expires and calls LeaseCallback with an empty new lease.
+// TestRebindingBound tests if the DHCP state machine in rebinding state
+// transitions to discovering state if leaseDeadline expires and calls
+// LeaseCallback with an empty new lease.
 func TestRebindingDiscovering(t *testing.T) {
 	offer := newResponse(dhcpv4.MessageTypeAck)
 	testIP := net.IP{192, 0, 2, 2}
diff --git a/metropolis/node/core/network/dhcp4c/doc.go b/metropolis/node/core/network/dhcp4c/doc.go
index b270c7b..49aece5 100644
--- a/metropolis/node/core/network/dhcp4c/doc.go
+++ b/metropolis/node/core/network/dhcp4c/doc.go
@@ -14,40 +14,55 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// Package dhcp4c provides a client implementation of the DHCPv4 protocol (RFC2131) and a few extensions for Linux-based
-// systems.
+// Package dhcp4c provides a client implementation of the DHCPv4 protocol
+// (RFC2131) and a few extensions for Linux-based systems.
 // The code is split into three main parts:
 // - The core DHCP state machine, which lives in dhcpc.go
 // - Mechanisms to send and receive DHCP messages, which live in transport/
-// - Standard callbacks which implement necessary kernel configuration steps in a simple and standalone way living in
-//   callback/
+// - Standard callbacks which implement necessary kernel configuration steps in
+//   a simple and standalone way living in callback/
 //
-// Since the DHCP protocol is ugly and underspecified (see https://tools.ietf.org/html/draft-ietf-dhc-implementation-02
-// for a subset of known issues), this client slightly bends the specification in the following cases:
-// - IP fragmentation for DHCP messages is not supported for both sending and receiving messages
-//   This is because the major servers (ISC, dnsmasq, ...) do not implement it and just drop fragmented packets, so it
-//   would be counterproductive to try to send them. The client just attempts to send the full message and hopes it
-//   passes through to the server.
-// - The suggested timeouts and wait periods have been tightened significantly. When the standard was written 10Mbps
-//   Ethernet with hubs was a common interconnect. Using these would make the client extremely slow on today's
+// Since the DHCP protocol is ugly and underspecified (see
+// https://tools.ietf.org/html/draft-ietf-dhc-implementation-02 for a subset of
+// known issues), this client slightly bends the specification in the following
+// cases:
+// - IP fragmentation for DHCP messages is not supported for both sending and
+//   receiving messages This is because the major servers (ISC, dnsmasq, ...)
+//   do not implement it and just drop fragmented packets, so it would be
+//   counterproductive to try to send them. The client just attempts to send
+//   the full message and hopes it passes through to the server.
+// - The suggested timeouts and wait periods have been tightened significantly.
+//   When the standard was written 10Mbps Ethernet with hubs was a common
+//   interconnect. Using these would make the client extremely slow on today's
 //   1Gbps+ networks.
-// - Wrong data in DHCP responses is fixed up if possible. This fixing includes dropping prohibited options, clamping
-//   semantically invalid data and defaulting not set options as far as it's possible. Non-recoverable responses
-//   (for example because a non-Unicast IP is handed out or lease time is not set or zero) are still ignored.
-//   All data which can be stored in both DHCP fields and options is also normalized to the corresponding option.
-// - Duplicate Address Detection is not implemented by default. It's slow, hard to implement correctly and generally
-//   not necessary on modern networks as the servers already waste time checking for duplicate addresses. It's possible
-//   to hook it in via a LeaseCallback if necessary in a given application.
+// - Wrong data in DHCP responses is fixed up if possible. This fixing includes
+//   dropping prohibited options, clamping semantically invalid data and
+//   defaulting not set options as far as it's possible. Non-recoverable
+//   responses (for example because a non-Unicast IP is handed out or lease
+//   time is not set or zero) are still ignored.  All data which can be stored
+//   in both DHCP fields and options is also normalized to the corresponding
+//   option.
+// - Duplicate Address Detection is not implemented by default. It's slow, hard
+//   to implement correctly and generally not necessary on modern networks as
+//   the servers already waste time checking for duplicate addresses. It's
+//   possible to hook it in via a LeaseCallback if necessary in a given
+//   application.
 //
-// Operationally, there's one known caveat to using this client: If the lease offered during the select phase (in a
-// DHCPOFFER) is not the same as the one sent in the following DHCPACK the first one might be acceptable, but the second
-// one might not be. This can cause pathological behavior where the client constantly switches between discovering and
-// requesting states. Depending on the reuse policies on the DHCP server this can cause the client to consume all
-// available IP addresses. Sadly there's no good way of fixing this within the boundaries of the protocol. A DHCPRELEASE
-// for the adresse would need to be unicasted so the unaccepable address would need to be configured which can be either
-// impossible if it's not valid or not acceptable from a security standpoint (for example because it overlaps with a
-// prefix used internally) and a DHCPDECLINE would cause the server to blacklist the IP thus also depleting the IP pool.
-// This could be potentially avoided by originating DHCPRELEASE packages from a userspace transport, but said transport
-// would need to be routing- and PMTU-aware which would make it even more complicated than the existing
+// Operationally, there's one known caveat to using this client: If the lease
+// offered during the select phase (in a DHCPOFFER) is not the same as the one
+// sent in the following DHCPACK the first one might be acceptable, but the
+// second one might not be. This can cause pathological behavior where the
+// client constantly switches between discovering and requesting states.
+// Depending on the reuse policies on the DHCP server this can cause the client
+// to consume all available IP addresses. Sadly there's no good way of fixing
+// this within the boundaries of the protocol. A DHCPRELEASE for the adresse
+// would need to be unicasted so the unaccepable address would need to be
+// configured which can be either impossible if it's not valid or not
+// acceptable from a security standpoint (for example because it overlaps with
+// a prefix used internally) and a DHCPDECLINE would cause the server to
+// blacklist the IP thus also depleting the IP pool.
+// This could be potentially avoided by originating DHCPRELEASE packages from a
+// userspace transport, but said transport would need to be routing- and
+// PMTU-aware which would make it even more complicated than the existing
 // BroadcastTransport.
 package dhcp4c
diff --git a/metropolis/node/core/network/dhcp4c/lease.go b/metropolis/node/core/network/dhcp4c/lease.go
index c56270c..63c51be 100644
--- a/metropolis/node/core/network/dhcp4c/lease.go
+++ b/metropolis/node/core/network/dhcp4c/lease.go
@@ -24,23 +24,27 @@
 	"github.com/insomniacslk/dhcp/dhcpv4"
 )
 
-// Lease represents a DHCPv4 lease. It only consists of an IP, an expiration timestamp and options as all other
-// relevant parts of the message have been normalized into their respective options. It also contains some smart
-// getters for commonly-used options which extract only valid information from options.
+// Lease represents a DHCPv4 lease. It only consists of an IP, an expiration
+// timestamp and options as all other relevant parts of the message have been
+// normalized into their respective options. It also contains some smart
+// getters for commonly-used options which extract only valid information from
+// options.
 type Lease struct {
 	AssignedIP net.IP
 	ExpiresAt  time.Time
 	Options    dhcpv4.Options
 }
 
-// SubnetMask returns the SubnetMask option or the default mask if not set or invalid.
+// SubnetMask returns the SubnetMask option or the default mask if not set or
+// invalid.
 // It returns nil if the lease is nil.
 func (l *Lease) SubnetMask() net.IPMask {
 	if l == nil {
 		return nil
 	}
 	mask := net.IPMask(dhcpv4.GetIP(dhcpv4.OptionSubnetMask, l.Options))
-	if _, bits := mask.Size(); bits != 32 { // If given mask is not valid, use the default mask
+	// If given mask is not valid, use the default mask.
+	if _, bits := mask.Size(); bits != 32 {
 		mask = l.AssignedIP.DefaultMask()
 	}
 	return mask
@@ -58,7 +62,8 @@
 	}
 }
 
-// Router returns the first valid router from the DHCP router option or nil if none such exists.
+// Router returns the first valid router from the DHCP router option or nil if
+// none such exists.
 // It returns nil if the lease is nil.
 func (l *Lease) Router() net.IP {
 	if l == nil {
@@ -101,7 +106,8 @@
 	return binary.BigEndian.Uint32(ip4)
 }
 
-// DNSServers returns all unique valid DNS servers from the DHCP DomainNameServers options.
+// DNSServers returns all unique valid DNS servers from the DHCP
+// DomainNameServers options.
 // It returns nil if the lease is nil.
 func (l *Lease) DNSServers() DNSServers {
 	if l == nil {
diff --git a/metropolis/node/core/network/dhcp4c/transport/transport.go b/metropolis/node/core/network/dhcp4c/transport/transport.go
index 8f5f791..32b2bd7 100644
--- a/metropolis/node/core/network/dhcp4c/transport/transport.go
+++ b/metropolis/node/core/network/dhcp4c/transport/transport.go
@@ -14,8 +14,8 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// Package transport contains Linux-based transports for the DHCP broadcast and unicast
-// specifications.
+// Package transport contains Linux-based transports for the DHCP broadcast and
+// unicast specifications.
 package transport
 
 import (
diff --git a/metropolis/node/core/network/dhcp4c/transport/transport_broadcast.go b/metropolis/node/core/network/dhcp4c/transport/transport_broadcast.go
index 79fad7d..6d051ee 100644
--- a/metropolis/node/core/network/dhcp4c/transport/transport_broadcast.go
+++ b/metropolis/node/core/network/dhcp4c/transport/transport_broadcast.go
@@ -31,7 +31,8 @@
 )
 
 const (
-	// RFC2474 Section 4.2.2.1 with reference to RFC791 Section 3.1 (Network Control Precedence)
+	// RFC2474 Section 4.2.2.1 with reference to RFC791 Section 3.1 (Network
+	// Control Precedence)
 	dscpCS7 = 0x7 << 3
 
 	// IPv4 MTU
@@ -49,13 +50,13 @@
 
 // BPF filter for UDP in IPv4 with destination port 68 (DHCP Client)
 //
-// This is used to make the kernel drop non-DHCP traffic for us so that we don't have to handle
-// excessive unrelated traffic flowing on a given link which might overwhelm the single-threaded
-// receiver.
+// This is used to make the kernel drop non-DHCP traffic for us so that we
+// don't have to handle excessive unrelated traffic flowing on a given link
+// which might overwhelm the single-threaded receiver.
 var bpfFilterInstructions = []bpf.Instruction{
 	// Check IP protocol version equals 4 (first 4 bits of the first byte)
-	// With Ethernet II framing, this is more of a sanity check. We already request the kernel
-	// to only return EtherType 0x0800 (IPv4) frames.
+	// With Ethernet II framing, this is more of a sanity check. We already
+	// request the kernel to only return EtherType 0x0800 (IPv4) frames.
 	bpf.LoadAbsolute{Off: 0, Size: 1},
 	bpf.ALUOpConstant{Op: bpf.ALUOpAnd, Val: 0xf0}, // SubnetMask second 4 bits
 	bpf.JumpIf{Cond: bpf.JumpEqual, Val: 4 << 4, SkipTrue: 1},
@@ -85,8 +86,9 @@
 
 var bpfFilter = mustAssemble(bpfFilterInstructions)
 
-// BroadcastTransport implements a DHCP transport based on a custom IP/UDP stack fulfilling the
-// specific requirements for broadcasting DHCP packets (like all-zero source address, no ARP, ...)
+// BroadcastTransport implements a DHCP transport based on a custom IP/UDP
+// stack fulfilling the specific requirements for broadcasting DHCP packets
+// (like all-zero source address, no ARP, ...)
 type BroadcastTransport struct {
 	rawConn *raw.Conn
 	iface   *net.Interface
@@ -122,13 +124,17 @@
 	}
 
 	ipLayer := &layers.IPv4{
-		Version:  4,
-		TOS:      dscpCS7 << 2, // Shift left of ECN field
-		TTL:      1,            // These packets should never be routed (their IP headers contain garbage)
+		Version: 4,
+		// Shift left of ECN field
+		TOS: dscpCS7 << 2,
+		// These packets should never be routed (their IP headers contain
+		// garbage)
+		TTL:      1,
 		Protocol: layers.IPProtocolUDP,
-		Flags:    layers.IPv4DontFragment, // Most DHCP servers don't support fragmented packets
-		DstIP:    net.IPv4bcast,
-		SrcIP:    net.IPv4zero,
+		// Most DHCP servers don't support fragmented packets.
+		Flags: layers.IPv4DontFragment,
+		DstIP: net.IPv4bcast,
+		SrcIP: net.IPv4zero,
 	}
 	udpLayer := &layers.UDP{
 		DstPort: 67,
diff --git a/metropolis/node/core/network/dhcp4c/transport/transport_unicast.go b/metropolis/node/core/network/dhcp4c/transport/transport_unicast.go
index bf2b3a4..8093e70 100644
--- a/metropolis/node/core/network/dhcp4c/transport/transport_unicast.go
+++ b/metropolis/node/core/network/dhcp4c/transport/transport_unicast.go
@@ -29,8 +29,8 @@
 	"golang.org/x/sys/unix"
 )
 
-// UnicastTransport implements a DHCP transport based on a normal Linux UDP socket with some custom
-// socket options to influence DSCP and routing.
+// UnicastTransport implements a DHCP transport based on a normal Linux UDP
+// socket with some custom socket options to influence DSCP and routing.
 type UnicastTransport struct {
 	udpConn  *net.UDPConn
 	targetIP net.IP
diff --git a/metropolis/node/core/network/dns/coredns.go b/metropolis/node/core/network/dns/coredns.go
index 4ec0869..b57c080 100644
--- a/metropolis/node/core/network/dns/coredns.go
+++ b/metropolis/node/core/network/dns/coredns.go
@@ -51,9 +51,10 @@
 }
 
 // New creates a new CoreDNS service.
-// The given channel can then be used to dynamically register and unregister directives in the configuaration.
-// To register a new directive, send an ExtraDirective on the channel. To remove it again, use CancelDirective()
-// to create a removal message.
+// The given channel can then be used to dynamically register and unregister
+// directives in the configuaration.
+// To register a new directive, send an ExtraDirective on the channel. To
+// remove it again, use CancelDirective() to create a removal message.
 func New(directiveRegistration chan *ExtraDirective) *Service {
 	return &Service{
 		directives:            map[string]ExtraDirective{},
@@ -83,7 +84,8 @@
 	}
 }
 
-// Run runs the DNS service consisting of the CoreDNS process and the directive registration process
+// Run runs the DNS service consisting of the CoreDNS process and the directive
+// registration process
 func (s *Service) Run(ctx context.Context) error {
 	supervisor.Run(ctx, "coredns", s.runCoreDNS)
 	supervisor.Run(ctx, "registration", s.runRegistration)
@@ -116,8 +118,9 @@
 	return supervisor.RunCommand(ctx, s.cmd)
 }
 
-// runRegistration runs the background registration runnable which has a different lifecycle from the CoreDNS
-// runnable. It is responsible for managing dynamic directives.
+// runRegistration runs the background registration runnable which has a
+// different lifecycle from the CoreDNS runnable. It is responsible for
+// managing dynamic directives.
 func (s *Service) runRegistration(ctx context.Context) error {
 	supervisor.Signal(ctx, supervisor.SignalHealthy)
 	for {
@@ -138,7 +141,8 @@
 	} else {
 		s.directives[d.ID] = *d
 	}
-	// If the process is not currenty running we're relying on corefile regeneration on startup
+	// If the process is not currenty running we're relying on corefile
+	// regeneration on startup
 	if s.cmd != nil && s.cmd.Process != nil && s.cmd.ProcessState == nil {
 		s.args.ArgPath("Corefile", s.makeCorefile(s.args))
 		if err := s.cmd.Process.Signal(syscall.SIGUSR1); err != nil {
diff --git a/metropolis/node/core/network/dns/directives.go b/metropolis/node/core/network/dns/directives.go
index 72c4f29..ff25ae8 100644
--- a/metropolis/node/core/network/dns/directives.go
+++ b/metropolis/node/core/network/dns/directives.go
@@ -24,18 +24,21 @@
 
 // Type ExtraDirective contains additional config directives for CoreDNS.
 type ExtraDirective struct {
-	// ID is the identifier of this directive. There can only be one directive with a given ID active at once.
-	// The ID is also used to identify which directive to purge.
+	// ID is the identifier of this directive. There can only be one directive
+	// with a given ID active at once. The ID is also used to identify which
+	// directive to purge.
 	ID string
-	// directive contains a full CoreDNS directive as a string. It can also use the $FILE(<filename>) macro,
-	// which will be expanded to the path of a file from the files field.
+	// directive contains a full CoreDNS directive as a string. It can also use
+	// the $FILE(<filename>) macro, which will be expanded to the path of a
+	// file from the files field.
 	directive string
-	// files contains additional files used in the configuration. The map key is used as the filename.
+	// files contains additional files used in the configuration. The map key
+	// is used as the filename.
 	files map[string][]byte
 }
 
-// NewUpstreamDirective creates a forward with no fallthrough that forwards all requests not yet matched to the given
-// upstream DNS servers.
+// NewUpstreamDirective creates a forward with no fallthrough that forwards all
+// requests not yet matched to the given upstream DNS servers.
 func NewUpstreamDirective(dnsServers []net.IP) *ExtraDirective {
 	strb := strings.Builder{}
 	if len(dnsServers) > 0 {
@@ -59,9 +62,11 @@
 }
 `
 
-// NewKubernetesDirective creates a directive running a "Kubernetes DNS-Based Service Discovery" [1] compliant service
-// under clusterDomain. The given kubeconfig needs at least read access to services, endpoints and endpointslices.
-// [1] https://github.com/kubernetes/dns/blob/master/docs/specification.md
+// NewKubernetesDirective creates a directive running a "Kubernetes DNS-Based
+// Service Discovery" compliant service under clusterDomain. The given
+// kubeconfig needs at least read access to services, endpoints and
+// endpointslices.
+//   [1] https://github.com/kubernetes/dns/blob/master/docs/specification.md
 func NewKubernetesDirective(clusterDomain string, kubeconfig []byte) *ExtraDirective {
 	return &ExtraDirective{
 		ID:        "k8s-clusterdns",
diff --git a/metropolis/node/core/network/main.go b/metropolis/node/core/network/main.go
index 49ace2e..6940da6 100644
--- a/metropolis/node/core/network/main.go
+++ b/metropolis/node/core/network/main.go
@@ -49,7 +49,8 @@
 	// dhcp client for the 'main' interface of the node.
 	dhcp *dhcp4c.Client
 
-	// nftConn is a shared file descriptor handle to nftables, automatically initialized on first use.
+	// nftConn is a shared file descriptor handle to nftables, automatically
+	// initialized on first use.
 	nftConn             nftables.Conn
 	natTable            *nftables.Table
 	natPostroutingChain *nftables.Chain
@@ -75,7 +76,8 @@
 	DNSServers      dhcp4c.DNSServers
 }
 
-// Watcher allows network Service consumers to watch for updates of the current Status.
+// Watcher allows network Service consumers to watch for updates of the current
+// Status.
 type Watcher struct {
 	watcher event.Watcher
 }
@@ -112,7 +114,8 @@
 	s.dnsReg <- d
 }
 
-// nfifname converts an interface name into 16 bytes padded with zeroes (for nftables)
+// nfifname converts an interface name into 16 bytes padded with zeroes (for
+// nftables)
 func nfifname(n string) []byte {
 	b := make([]byte, 16)
 	copy(b, []byte(n+"\x00"))
@@ -180,7 +183,8 @@
 // sysctlOptions contains sysctl options to apply
 type sysctlOptions map[string]string
 
-// apply attempts to apply all options in sysctlOptions. It aborts on the first one which returns an error when applying.
+// apply attempts to apply all options in sysctlOptions. It aborts on the first
+// one which returns an error when applying.
 func (o sysctlOptions) apply() error {
 	for name, value := range o {
 		filePath := path.Join("/proc/sys/", strings.ReplaceAll(name, ".", "/"))
@@ -221,12 +225,14 @@
 	sysctlOpts := sysctlOptions{
 		// Enable IP forwarding for our pods
 		"net.ipv4.ip_forward": "1",
-		// Enable strict reverse path filtering on all interfaces (important for spoofing prevention from Pods with CAP_NET_ADMIN)
+		// Enable strict reverse path filtering on all interfaces (important
+		// for spoofing prevention from Pods with CAP_NET_ADMIN)
 		"net.ipv4.conf.all.rp_filter": "1",
 		// Disable source routing
 		"net.ipv4.conf.all.accept_source_route": "0",
 
-		// Increase Linux socket kernel buffer sizes to 16MiB (needed for fast datacenter networks)
+		// Increase Linux socket kernel buffer sizes to 16MiB (needed for fast
+		// datacenter networks)
 		"net.core.rmem_max": "16777216",
 		"net.core.wmem_max": "16777216",
 		"net.ipv4.tcp_rmem": "4096 87380 16777216",
diff --git a/metropolis/node/kubernetes/clusternet/clusternet.go b/metropolis/node/kubernetes/clusternet/clusternet.go
index 74fe1ba..85a78a1 100644
--- a/metropolis/node/kubernetes/clusternet/clusternet.go
+++ b/metropolis/node/kubernetes/clusternet/clusternet.go
@@ -14,15 +14,21 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// Package clusternet implements a WireGuard-based overlay network for Kubernetes. It relies on controller-manager's
-// IPAM to assign IP ranges to nodes and on Kubernetes' Node objects to distribute the Node IPs and public keys.
+// Package clusternet implements a WireGuard-based overlay network for
+// Kubernetes. It relies on controller-manager's IPAM to assign IP ranges to
+// nodes and on Kubernetes' Node objects to distribute the Node IPs and public
+// keys.
 //
-// It sets up a single WireGuard network interface and routes the entire ClusterCIDR into that network interface,
-// relying on WireGuard's AllowedIPs mechanism to look up the correct peer node to send the traffic to. This means
-// that the routing table doesn't change and doesn't have to be separately managed. When clusternet is started
-// it annotates its WireGuard public key onto its node object.
-// For each node object that's created or updated on the K8s apiserver it checks if a public key annotation is set and
-// if yes a peer with that public key, its InternalIP as endpoint and the CIDR for that node as AllowedIPs is created.
+// It sets up a single WireGuard network interface and routes the entire
+// ClusterCIDR into that network interface, relying on WireGuard's AllowedIPs
+// mechanism to look up the correct peer node to send the traffic to. This
+// means that the routing table doesn't change and doesn't have to be
+// separately managed. When clusternet is started it annotates its WireGuard
+// public key onto its node object.
+// For each node object that's created or updated on the K8s apiserver it
+// checks if a public key annotation is set and if yes a peer with that public
+// key, its InternalIP as endpoint and the CIDR for that node as AllowedIPs is
+// created.
 package clusternet
 
 import (
@@ -45,8 +51,8 @@
 
 	common "source.monogon.dev/metropolis/node"
 	"source.monogon.dev/metropolis/node/core/localstorage"
-	"source.monogon.dev/metropolis/pkg/logtree"
 	"source.monogon.dev/metropolis/pkg/jsonpatch"
+	"source.monogon.dev/metropolis/pkg/logtree"
 	"source.monogon.dev/metropolis/pkg/supervisor"
 )
 
@@ -67,7 +73,8 @@
 	logger   logtree.LeveledLogger
 }
 
-// ensureNode creates/updates the corresponding WireGuard peer entry for the given node objet
+// ensureNode creates/updates the corresponding WireGuard peer entry for the
+// given node objet
 func (s *Service) ensureNode(newNode *corev1.Node) error {
 	if newNode.Name == s.NodeName {
 		// Node doesn't need to connect to itself
@@ -108,8 +115,8 @@
 	}
 	allowedIPs = append(allowedIPs, net.IPNet{IP: internalIP, Mask: net.CIDRMask(32, 32)})
 	s.logger.V(1).Infof("Adding/Updating WireGuard peer node %s, endpoint %s, allowedIPs %+v", newNode.Name, internalIP.String(), allowedIPs)
-	// WireGuard's kernel side has create/update semantics on peers by default. So we can just add the peer multiple
-	// times to update it.
+	// WireGuard's kernel side has create/update semantics on peers by default.
+	// So we can just add the peer multiple times to update it.
 	err = s.wgClient.ConfigureDevice(clusterNetDeviceName, wgtypes.Config{
 		Peers: []wgtypes.PeerConfig{{
 			PublicKey:         pubKey,
@@ -124,7 +131,8 @@
 	return nil
 }
 
-// removeNode removes the corresponding WireGuard peer entry for the given node object
+// removeNode removes the corresponding WireGuard peer entry for the given node
+// object
 func (s *Service) removeNode(oldNode *corev1.Node) error {
 	if oldNode.Name == s.NodeName {
 		// Node doesn't need to connect to itself
@@ -150,7 +158,8 @@
 	return nil
 }
 
-// ensureOnDiskKey loads the private key from disk or (if none exists) generates one and persists it.
+// ensureOnDiskKey loads the private key from disk or (if none exists)
+// generates one and persists it.
 func (s *Service) ensureOnDiskKey() error {
 	keyRaw, err := s.DataDirectory.Key.Read()
 	if os.IsNotExist(err) {
@@ -176,7 +185,8 @@
 	return nil
 }
 
-// annotateThisNode annotates the node (as defined by NodeName) with the wireguard public key of this node.
+// annotateThisNode annotates the node (as defined by NodeName) with the
+// wireguard public key of this node.
 func (s *Service) annotateThisNode(ctx context.Context) error {
 	patch := []jsonpatch.JsonPatchOp{{
 		Operation: "add",
diff --git a/metropolis/node/kubernetes/containerd/main.go b/metropolis/node/kubernetes/containerd/main.go
index 6b99081..c3dd4a0 100644
--- a/metropolis/node/kubernetes/containerd/main.go
+++ b/metropolis/node/kubernetes/containerd/main.go
@@ -76,10 +76,12 @@
 
 			n, err := io.Copy(supervisor.RawLogger(ctx), fifo)
 			if n == 0 && err == nil {
-				// Hack because pipes/FIFOs can return zero reads when nobody is writing. To avoid busy-looping,
-				// sleep a bit before retrying. This does not loose data since the FIFO internal buffer will
-				// stall writes when it becomes full. 10ms maximum stall in a non-latency critical process (reading
-				// debug logs) is not an issue for us.
+				// Hack because pipes/FIFOs can return zero reads when nobody
+				// is writing. To avoid busy-looping, sleep a bit before
+				// retrying. This does not loose data since the FIFO internal
+				// buffer will stall writes when it becomes full. 10ms maximum
+				// stall in a non-latency critical process (reading debug logs)
+				// is not an issue for us.
 				time.Sleep(10 * time.Millisecond)
 			} else if err != nil {
 				return fmt.Errorf("log pump failed: %v", err)
@@ -88,14 +90,18 @@
 	}
 }
 
-// runPreseed loads OCI bundles in tar form from preseedNamespacesDir into containerd at startup.
-// This can be run multiple times, containerd will automatically dedup the layers.
-// containerd uses namespaces to keep images (and everything else) separate so to define where the images will be loaded
-// to they need to be in a folder named after the namespace they should be loaded into.
-// containerd's CRI plugin (which is built as part of containerd) uses a hardcoded namespace ("k8s.io") for everything
-// accessed through CRI, so if an image should be available on K8s it needs to be in that namespace.
-// As an example if image helloworld should be loaded for use with Kubernetes, the OCI bundle needs to be at
-// <preseedNamespacesDir>/k8s.io/helloworld.tar. No tagging beyond what's in the bundle is performed.
+// runPreseed loads OCI bundles in tar form from preseedNamespacesDir into
+// containerd at startup.
+// This can be run multiple times, containerd will automatically dedup the
+// layers.  containerd uses namespaces to keep images (and everything else)
+// separate so to define where the images will be loaded to they need to be in
+// a folder named after the namespace they should be loaded into.  containerd's
+// CRI plugin (which is built as part of containerd) uses a hardcoded namespace
+// ("k8s.io") for everything accessed through CRI, so if an image should be
+// available on K8s it needs to be in that namespace.
+// As an example if image helloworld should be loaded for use with Kubernetes,
+// the OCI bundle needs to be at <preseedNamespacesDir>/k8s.io/helloworld.tar.
+// No tagging beyond what's in the bundle is performed.
 func (s *Service) runPreseed(ctx context.Context) error {
 	client, err := ctr.New(s.EphemeralVolume.ClientSocket.FullPath())
 	if err != nil {
@@ -126,8 +132,9 @@
 			if err != nil {
 				return fmt.Errorf("failed to open preseed image \"%v\": %w", image.Name(), err)
 			}
-			// defer in this loop is fine since we're never going to preseed more than ~1M images which is where our
-			// file descriptor limit is.
+			// defer in this loop is fine since we're never going to preseed
+			// more than ~1M images which is where our file descriptor limit
+			// is.
 			defer imageFile.Close()
 			importedImages, err := client.Import(ctxWithNS, imageFile)
 			if err != nil {
diff --git a/metropolis/node/kubernetes/csi.go b/metropolis/node/kubernetes/csi.go
index efd8af4..4893254 100644
--- a/metropolis/node/kubernetes/csi.go
+++ b/metropolis/node/kubernetes/csi.go
@@ -39,9 +39,10 @@
 	"source.monogon.dev/metropolis/pkg/supervisor"
 )
 
-// Derived from K8s spec for acceptable names, but shortened to 130 characters to avoid issues with
-// maximum path length. We don't provision longer names so this applies only if you manually create
-// a volume with a name of more than 130 characters.
+// Derived from K8s spec for acceptable names, but shortened to 130 characters
+// to avoid issues with maximum path length. We don't provision longer names so
+// this applies only if you manually create a volume with a name of more than
+// 130 characters.
 var acceptableNames = regexp.MustCompile("^[a-z][a-z0-9-.]{0,128}[a-z0-9]$")
 
 type csiPluginServer struct {
@@ -64,8 +65,8 @@
 	pluginServer := grpc.NewServer()
 	csi.RegisterIdentityServer(pluginServer, s)
 	csi.RegisterNodeServer(pluginServer, s)
-	// Enable graceful shutdown since we don't have long-running RPCs and most of them shouldn't and can't be
-	// cancelled anyways.
+	// Enable graceful shutdown since we don't have long-running RPCs and most
+	// of them shouldn't and can't be cancelled anyways.
 	if err := supervisor.Run(ctx, "csi-node", supervisor.GRPCServer(pluginServer, pluginListener, true)); err != nil {
 		return err
 	}
diff --git a/metropolis/node/kubernetes/hyperkube/main.go b/metropolis/node/kubernetes/hyperkube/main.go
index 3b4ac08..10c7a2d 100644
--- a/metropolis/node/kubernetes/hyperkube/main.go
+++ b/metropolis/node/kubernetes/hyperkube/main.go
@@ -56,9 +56,9 @@
 
 	hyperkubeCommand, allCommandFns := NewHyperKubeCommand()
 
-	// TODO: once we switch everything over to Cobra commands, we can go back to calling
-	// cliflag.InitFlags() (by removing its pflag.Parse() call). For now, we have to set the
-	// normalize func and add the go flag set by hand.
+	// TODO: once we switch everything over to Cobra commands, we can go back
+	// to calling cliflag.InitFlags() (by removing its pflag.Parse() call). For
+	// now, we have to set the normalize func and add the go flag set by hand.
 	pflag.CommandLine.SetNormalizeFunc(cliflag.WordSepNormalizeFunc)
 	pflag.CommandLine.AddGoFlagSet(goflag.CommandLine)
 	// cliflag.InitFlags()
@@ -89,8 +89,8 @@
 
 // NewHyperKubeCommand is the entry point for hyperkube
 func NewHyperKubeCommand() (*cobra.Command, []func() *cobra.Command) {
-	// these have to be functions since the command is polymorphic. Cobra wants you to be top level
-	// command to get executed
+	// these have to be functions since the command is polymorphic. Cobra wants
+	// you to be top level command to get executed
 	apiserver := func() *cobra.Command { return kubeapiserver.NewAPIServerCommand() }
 	controller := func() *cobra.Command { return kubecontrollermanager.NewControllerManagerCommand() }
 	scheduler := func() *cobra.Command { return kubescheduler.NewSchedulerCommand() }
diff --git a/metropolis/node/kubernetes/kubelet.go b/metropolis/node/kubernetes/kubelet.go
index 953a201..d966e5d 100644
--- a/metropolis/node/kubernetes/kubelet.go
+++ b/metropolis/node/kubernetes/kubelet.go
@@ -102,8 +102,8 @@
 			"memory": "300Mi",
 		},
 
-		// We're not going to use this, but let's make it point to a known-empty directory in case anybody manages to
-		// trigger it.
+		// We're not going to use this, but let's make it point to a
+		// known-empty directory in case anybody manages to trigger it.
 		VolumePluginDir: s.EphemeralDirectory.FlexvolumePlugins.FullPath(),
 	}
 }
diff --git a/metropolis/node/kubernetes/nfproxy/nfproxy.go b/metropolis/node/kubernetes/nfproxy/nfproxy.go
index ac13af1..5fcc5b5 100644
--- a/metropolis/node/kubernetes/nfproxy/nfproxy.go
+++ b/metropolis/node/kubernetes/nfproxy/nfproxy.go
@@ -14,8 +14,9 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// Package nfproxy is a Kubernetes Service IP proxy based exclusively on the Linux nftables interface.
-// It uses netfilter's NAT capabilities to accept traffic on service IPs and DNAT it to the respective endpoint.
+// Package nfproxy is a Kubernetes Service IP proxy based exclusively on the
+// Linux nftables interface.  It uses netfilter's NAT capabilities to accept
+// traffic on service IPs and DNAT it to the respective endpoint.
 package nfproxy
 
 import (
@@ -42,7 +43,8 @@
 )
 
 type Service struct {
-	// Traffic in ClusterCIDR is assumed to be originated inside the cluster and will not be SNATed
+	// Traffic in ClusterCIDR is assumed to be originated inside the cluster
+	// and will not be SNATed
 	ClusterCIDR net.IPNet
 	// A Kubernetes ClientSet with read access to endpoints and services
 	ClientSet kubernetes.Interface
diff --git a/metropolis/node/kubernetes/pki/kubernetes.go b/metropolis/node/kubernetes/pki/kubernetes.go
index 467f718..0e59306 100644
--- a/metropolis/node/kubernetes/pki/kubernetes.go
+++ b/metropolis/node/kubernetes/pki/kubernetes.go
@@ -56,9 +56,11 @@
 	// APIServer client certificate used to authenticate to kubelets.
 	APIServerKubeletClient KubeCertificateName = "apiserver-kubelet-client"
 
-	// Kubernetes Controller manager client certificate, used to authenticate to the apiserver.
+	// Kubernetes Controller manager client certificate, used to authenticate
+	// to the apiserver.
 	ControllerManagerClient KubeCertificateName = "controller-manager-client"
-	// Kubernetes Controller manager server certificate, used to run its HTTP server.
+	// Kubernetes Controller manager server certificate, used to run its HTTP
+	// server.
 	ControllerManager KubeCertificateName = "controller-manager"
 
 	// Kubernetes Scheduler client certificate, used to authenticate to the apiserver.
@@ -66,12 +68,12 @@
 	// Kubernetes scheduler server certificate, used to run its HTTP server.
 	Scheduler KubeCertificateName = "scheduler"
 
-	// Root-on-kube (system:masters) client certificate. Used to control the apiserver (and resources) by Metropolis
-	// internally.
+	// Root-on-kube (system:masters) client certificate. Used to control the
+	// apiserver (and resources) by Metropolis internally.
 	Master KubeCertificateName = "master"
 
 	// OpenAPI Kubernetes Aggregation CA.
-	// See: https://kubernetes.io/docs/tasks/extend-kubernetes/configure-aggregation-layer/#ca-reusage-and-conflicts
+	//   https://kubernetes.io/docs/tasks/extend-kubernetes/configure-aggregation-layer/#ca-reusage-and-conflicts
 	AggregationCA    KubeCertificateName = "aggregation-ca"
 	FrontProxyClient KubeCertificateName = "front-proxy-client"
 )
@@ -79,8 +81,9 @@
 const (
 	// etcdPrefix is where all the PKI data is stored in etcd.
 	etcdPrefix = "/kube-pki/"
-	// serviceAccountKeyName is the etcd path part that is used to store the ServiceAccount authentication secret.
-	// This is not a certificate, just an RSA key.
+	// serviceAccountKeyName is the etcd path part that is used to store the
+	// ServiceAccount authentication secret. This is not a certificate, just an
+	// RSA key.
 	serviceAccountKeyName = "service-account-privkey"
 )
 
@@ -116,7 +119,8 @@
 			"kubernetes.default.svc.cluster.local",
 			"localhost",
 		},
-		[]net.IP{{10, 0, 255, 1}, {127, 0, 0, 1}}, // TODO(q3k): add service network internal apiserver address
+		// TODO(q3k): add service network internal apiserver address
+		[]net.IP{{10, 0, 255, 1}, {127, 0, 0, 1}},
 	))
 	make(IdCA, APIServerKubeletClient, opki.Client("metropolis:apiserver-kubelet-client", nil))
 	make(IdCA, ControllerManagerClient, opki.Client("system:kube-controller-manager", nil))
@@ -131,7 +135,8 @@
 	return &pki
 }
 
-// EnsureAll ensures that all static certificates (and the serviceaccount key) are present on etcd.
+// EnsureAll ensures that all static certificates (and the serviceaccount key)
+// are present on etcd.
 func (k *PKI) EnsureAll(ctx context.Context) error {
 	for n, v := range k.Certificates {
 		k.logger.Infof("Ensuring %s exists", string(n))
@@ -147,8 +152,8 @@
 	return nil
 }
 
-// Kubeconfig generates a kubeconfig blob for a given certificate name. The same lifetime semantics as in .Certificate
-// apply.
+// Kubeconfig generates a kubeconfig blob for a given certificate name. The
+// same lifetime semantics as in .Certificate apply.
 func (k *PKI) Kubeconfig(ctx context.Context, name KubeCertificateName) ([]byte, error) {
 	c, ok := k.Certificates[name]
 	if !ok {
@@ -157,9 +162,11 @@
 	return Kubeconfig(ctx, k.KV, c)
 }
 
-// Certificate retrieves an x509 DER-encoded (but not PEM-wrapped) key and certificate for a given certificate name.
-// If the requested certificate is volatile, it will be created on demand. Otherwise it will be created on etcd (if not
-// present), and retrieved from there.
+// Certificate retrieves an x509 DER-encoded (but not PEM-wrapped) key and
+// certificate for a given certificate name.
+// If the requested certificate is volatile, it will be created on demand.
+// Otherwise it will be created on etcd (if not present), and retrieved from
+// there.
 func (k *PKI) Certificate(ctx context.Context, name KubeCertificateName) (cert, key []byte, err error) {
 	c, ok := k.Certificates[name]
 	if !ok {
@@ -168,7 +175,8 @@
 	return c.Ensure(ctx, k.KV)
 }
 
-// Kubeconfig generates a kubeconfig blob for this certificate. The same lifetime semantics as in .Ensure apply.
+// Kubeconfig generates a kubeconfig blob for this certificate. The same
+// lifetime semantics as in .Ensure apply.
 func Kubeconfig(ctx context.Context, kv clientv3.KV, c *opki.Certificate) ([]byte, error) {
 
 	cert, key, err := c.Ensure(ctx, kv)
@@ -204,11 +212,12 @@
 	return clientcmd.Write(*kubeconfig)
 }
 
-// ServiceAccountKey retrieves (and possibly generates and stores on etcd) the Kubernetes service account key. The
-// returned data is ready to be used by Kubernetes components (in PKIX form).
+// ServiceAccountKey retrieves (and possibly generates and stores on etcd) the
+// Kubernetes service account key. The returned data is ready to be used by
+// Kubernetes components (in PKIX form).
 func (k *PKI) ServiceAccountKey(ctx context.Context) ([]byte, error) {
-	// TODO(q3k): this should be abstracted away once we abstract away etcd access into a library with try-or-create
-	// semantics.
+	// TODO(q3k): this should be abstracted away once we abstract away etcd
+	// access into a library with try-or-create semantics.
 	path := fmt.Sprintf("%s%s.der", etcdPrefix, serviceAccountKeyName)
 
 	// Try loading  key from etcd.
diff --git a/metropolis/node/kubernetes/plugins/kvmdevice/kvmdevice.go b/metropolis/node/kubernetes/plugins/kvmdevice/kvmdevice.go
index a437973..ed47f74 100644
--- a/metropolis/node/kubernetes/plugins/kvmdevice/kvmdevice.go
+++ b/metropolis/node/kubernetes/plugins/kvmdevice/kvmdevice.go
@@ -14,10 +14,13 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// Package kvmdevice implements a Kubernetes device plugin for the virtual KVM device. Using the device plugin API
-// allows us to take advantage of the scheduler to locate pods on machines eligible for KVM and also allows granular
-// access control to KVM using quotas instead of needing privileged access.
-// Since KVM devices are virtual, this plugin emulates a huge number of them so that we never run out.
+// Package kvmdevice implements a Kubernetes device plugin for the virtual KVM
+// device. Using the device plugin API allows us to take advantage of the
+// scheduler to locate pods on machines eligible for KVM and also allows
+// granular access control to KVM using quotas instead of needing privileged
+// access.
+// Since KVM devices are virtual, this plugin emulates a huge number of them so
+// that we never run out.
 package kvmdevice
 
 import (
@@ -110,8 +113,9 @@
 	return &response, nil
 }
 
-// deviceNumberFromString gets a Linux device number from a string containing two decimal numbers representing the major
-// and minor device numbers separated by a colon. Whitespace is ignored.
+// deviceNumberFromString gets a Linux device number from a string containing
+// two decimal numbers representing the major and minor device numbers
+// separated by a colon. Whitespace is ignored.
 func deviceNumberFromString(s string) (uint64, error) {
 	kvmDevParts := strings.Split(s, ":")
 	if len(kvmDevParts) != 2 {
diff --git a/metropolis/node/kubernetes/provisioner.go b/metropolis/node/kubernetes/provisioner.go
index 0aa5c66..42edf77 100644
--- a/metropolis/node/kubernetes/provisioner.go
+++ b/metropolis/node/kubernetes/provisioner.go
@@ -46,13 +46,16 @@
 	"source.monogon.dev/metropolis/pkg/supervisor"
 )
 
-// ONCHANGE(//metropolis/node/kubernetes/reconciler:resources_csi.go): needs to match csiProvisionerServerName declared.
+// ONCHANGE(//metropolis/node/kubernetes/reconciler:resources_csi.go): needs to
+// match csiProvisionerServerName declared.
 const csiProvisionerServerName = "dev.monogon.metropolis.vfs"
 
-// csiProvisionerServer is responsible for the provisioning and deprovisioning of CSI-based container volumes. It runs on all
-// nodes and watches PVCs for ones assigned to the node it's running on and fulfills the provisioning request by
-// creating a directory, applying a quota and creating the corresponding PV. When the PV is released and its retention
-// policy is Delete, the directory and the PV resource are deleted.
+// csiProvisionerServer is responsible for the provisioning and deprovisioning
+// of CSI-based container volumes. It runs on all nodes and watches PVCs for
+// ones assigned to the node it's running on and fulfills the provisioning
+// request by creating a directory, applying a quota and creating the
+// corresponding PV. When the PV is released and its retention policy is
+// Delete, the directory and the PV resource are deleted.
 type csiProvisionerServer struct {
 	NodeName         string
 	Kubernetes       kubernetes.Interface
@@ -68,13 +71,16 @@
 	logger               logtree.LeveledLogger
 }
 
-// runCSIProvisioner runs the main provisioning machinery. It consists of a bunch of informers which keep track of
-// the events happening on the Kubernetes control plane and informs us when something happens. If anything happens to
-// PVCs or PVs, we enqueue the identifier of that resource in a work queue. Queues are being worked on by only one
-// worker to limit load and avoid complicated locking infrastructure. Failed items are requeued.
+// runCSIProvisioner runs the main provisioning machinery. It consists of a
+// bunch of informers which keep track of the events happening on the
+// Kubernetes control plane and informs us when something happens. If anything
+// happens to PVCs or PVs, we enqueue the identifier of that resource in a work
+// queue. Queues are being worked on by only one worker to limit load and avoid
+// complicated locking infrastructure. Failed items are requeued.
 func (p *csiProvisionerServer) Run(ctx context.Context) error {
-	// The recorder is used to log Kubernetes events for successful or failed volume provisions. These events then
-	// show up in `kubectl describe pvc` and can be used by admins to debug issues with this provisioner.
+	// The recorder is used to log Kubernetes events for successful or failed
+	// volume provisions. These events then show up in `kubectl describe pvc`
+	// and can be used by admins to debug issues with this provisioner.
 	eventBroadcaster := record.NewBroadcaster()
 	eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: p.Kubernetes.CoreV1().Events("")})
 	p.recorder = eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: csiProvisionerServerName, Host: p.NodeName})
@@ -119,7 +125,8 @@
 	return nil
 }
 
-// isOurPVC checks if the given PVC is is to be provisioned by this provisioner and has been scheduled onto this node
+// isOurPVC checks if the given PVC is is to be provisioned by this provisioner
+// and has been scheduled onto this node
 func (p *csiProvisionerServer) isOurPVC(pvc *v1.PersistentVolumeClaim) bool {
 	if pvc.ObjectMeta.Annotations["volume.beta.kubernetes.io/storage-provisioner"] != csiProvisionerServerName {
 		return false
@@ -130,7 +137,8 @@
 	return true
 }
 
-// isOurPV checks if the given PV has been provisioned by this provisioner and has been scheduled onto this node
+// isOurPV checks if the given PV has been provisioned by this provisioner and
+// has been scheduled onto this node
 func (p *csiProvisionerServer) isOurPV(pv *v1.PersistentVolume) bool {
 	if pv.ObjectMeta.Annotations["pv.kubernetes.io/provisioned-by"] != csiProvisionerServerName {
 		return false
@@ -161,8 +169,8 @@
 	p.pvQueue.Add(key)
 }
 
-// processQueueItems gets items from the given work queue and calls the process function for each of them. It self-
-// terminates once the queue is shut down.
+// processQueueItems gets items from the given work queue and calls the process
+// function for each of them. It self- terminates once the queue is shut down.
 func (p *csiProvisionerServer) processQueueItems(queue workqueue.RateLimitingInterface, process func(key string) error) {
 	for {
 		obj, shutdown := queue.Get()
@@ -194,8 +202,8 @@
 	return filepath.Join(p.VolumesDirectory.FullPath(), volumeID)
 }
 
-// processPVC looks at a single PVC item from the queue, determines if it needs to be provisioned and logs the
-// provisioning result to the recorder
+// processPVC looks at a single PVC item from the queue, determines if it needs
+// to be provisioned and logs the provisioning result to the recorder
 func (p *csiProvisionerServer) processPVC(key string) error {
 	namespace, name, err := cache.SplitMetaNamespaceKey(key)
 	if err != nil {
@@ -223,8 +231,9 @@
 	}
 
 	if storageClass.Provisioner != csiProvisionerServerName {
-		// We're not responsible for this PVC. Can only happen if controller-manager makes a mistake
-		// setting the annotations, but we're bailing here anyways for safety.
+		// We're not responsible for this PVC. Can only happen if
+		// controller-manager makes a mistake setting the annotations, but
+		// we're bailing here anyways for safety.
 		return nil
 	}
 
@@ -239,8 +248,9 @@
 	return nil
 }
 
-// provisionPVC creates the directory where the volume lives, sets a quota for the requested amount of storage and
-// creates the PV object representing this new volume
+// provisionPVC creates the directory where the volume lives, sets a quota for
+// the requested amount of storage and creates the PV object representing this
+// new volume
 func (p *csiProvisionerServer) provisionPVC(pvc *v1.PersistentVolumeClaim, storageClass *storagev1.StorageClass) error {
 	claimRef, err := ref.GetReference(scheme.Scheme, pvc)
 	if err != nil {
@@ -335,8 +345,9 @@
 	return nil
 }
 
-// processPV looks at a single PV item from the queue and checks if it has been released and needs to be deleted. If yes
-// it deletes the associated quota, directory and the PV object and logs the result to the recorder.
+// processPV looks at a single PV item from the queue and checks if it has been
+// released and needs to be deleted. If yes it deletes the associated quota,
+// directory and the PV object and logs the result to the recorder.
 func (p *csiProvisionerServer) processPV(key string) error {
 	_, name, err := cache.SplitMetaNamespaceKey(key)
 	if err != nil {
@@ -362,7 +373,8 @@
 	switch *pv.Spec.VolumeMode {
 	case "", v1.PersistentVolumeFilesystem:
 		if err := fsquota.SetQuota(volumePath, 0, 0); err != nil {
-			// We record these here manually since a successful deletion removes the PV we'd be attaching them to
+			// We record these here manually since a successful deletion
+			// removes the PV we'd be attaching them to.
 			p.recorder.Eventf(pv, v1.EventTypeWarning, "DeprovisioningFailed", "Failed to remove quota: %v", err)
 			return fmt.Errorf("failed to remove quota: %w", err)
 		}
diff --git a/metropolis/node/kubernetes/reconciler/reconciler.go b/metropolis/node/kubernetes/reconciler/reconciler.go
index 51cc248..1828060 100644
--- a/metropolis/node/kubernetes/reconciler/reconciler.go
+++ b/metropolis/node/kubernetes/reconciler/reconciler.go
@@ -48,20 +48,25 @@
 }
 
 const (
-	// BuiltinLabelKey is used as a k8s label to mark built-in objects (ie., managed by the reconciler)
+	// BuiltinLabelKey is used as a k8s label to mark built-in objects (ie.,
+	// managed by the reconciler)
 	BuiltinLabelKey = "metropolis.monogon.dev/builtin"
-	// BuiltinLabelValue is used as a k8s label value, under the BuiltinLabelKey key.
+	// BuiltinLabelValue is used as a k8s label value, under the
+	// BuiltinLabelKey key.
 	BuiltinLabelValue = "true"
-	// BuiltinRBACPrefix is used to prefix all built-in objects that are part of the rbac/v1 API (eg.
-	// {Cluster,}Role{Binding,} objects). This corresponds to the colon-separated 'namespaces' notation used by
+	// BuiltinRBACPrefix is used to prefix all built-in objects that are part
+	// of the rbac/v1 API (eg.  {Cluster,}Role{Binding,} objects). This
+	// corresponds to the colon-separated 'namespaces' notation used by
 	// Kubernetes system (system:) objects.
 	BuiltinRBACPrefix = "metropolis:"
 )
 
-// builtinLabels makes a kubernetes-compatible label dictionary (key->value) that is used to mark objects that are
-// built-in into Metropolis (ie., managed by the reconciler). These are then subsequently retrieved by listBuiltins.
-// The extra argument specifies what other labels are to be merged into the the labels dictionary, for convenience. If
-// nil or empty, no extra labels will be applied.
+// builtinLabels makes a kubernetes-compatible label dictionary (key->value)
+// that is used to mark objects that are built-in into Metropolis (ie., managed
+// by the reconciler). These are then subsequently retrieved by listBuiltins.
+// The extra argument specifies what other labels are to be merged into the the
+// labels dictionary, for convenience. If nil or empty, no extra labels will be
+// applied.
 func builtinLabels(extra map[string]string) map[string]string {
 	l := map[string]string{
 		BuiltinLabelKey: BuiltinLabelValue,
@@ -74,32 +79,39 @@
 	return l
 }
 
-// listBuiltins returns a k8s client ListOptions structure that allows to retrieve all objects that are built-in into
-// Metropolis currently present in the API server (ie., ones that are to be managed by the reconciler). These are
-// created by applying builtinLabels to their metadata labels.
+// listBuiltins returns a k8s client ListOptions structure that allows to
+// retrieve all objects that are built-in into Metropolis currently present in
+// the API server (ie., ones that are to be managed by the reconciler). These
+// are created by applying builtinLabels to their metadata labels.
 var listBuiltins = meta.ListOptions{
 	LabelSelector: fmt.Sprintf("%s=%s", BuiltinLabelKey, BuiltinLabelValue),
 }
 
-// builtinRBACName returns a name that is compatible with colon-delimited 'namespaced' objects, a la system:*.
-// These names are to be used by all builtins created as part of the rbac/v1 Kubernetes API.
+// builtinRBACName returns a name that is compatible with colon-delimited
+// 'namespaced' objects, a la system:*.
+// These names are to be used by all builtins created as part of the rbac/v1
+// Kubernetes API.
 func builtinRBACName(name string) string {
 	return BuiltinRBACPrefix + name
 }
 
-// resource is a type of resource to be managed by the reconciler. All builti-ins/reconciled objects must implement
-// this interface to be managed correctly by the reconciler.
+// resource is a type of resource to be managed by the reconciler. All
+// builti-ins/reconciled objects must implement this interface to be managed
+// correctly by the reconciler.
 type resource interface {
-	// List returns a list of names of objects current present on the target (ie. k8s API server).
+	// List returns a list of names of objects current present on the target
+	// (ie. k8s API server).
 	List(ctx context.Context) ([]string, error)
-	// Create creates an object on the target. The el interface{} argument is the black box object returned by the
-	// Expected() call.
+	// Create creates an object on the target. The el interface{} argument is
+	// the black box object returned by the Expected() call.
 	Create(ctx context.Context, el interface{}) error
 	// Delete delete an object, by name, from the target.
 	Delete(ctx context.Context, name string) error
-	// Expected returns a map of all objects expected to be present on the target. The keys are names (which must
-	// correspond to the names returned by List() and used by Delete(), and the values are blackboxes that will then
-	// be passed to the Create() call if their corresponding key (name) does not exist on the target.
+	// Expected returns a map of all objects expected to be present on the
+	// target. The keys are names (which must correspond to the names returned
+	// by List() and used by Delete(), and the values are blackboxes that will
+	// then be passed to the Create() call if their corresponding key (name)
+	// does not exist on the target.
 	Expected() map[string]interface{}
 }
 
diff --git a/metropolis/node/kubernetes/reconciler/reconciler_test.go b/metropolis/node/kubernetes/reconciler/reconciler_test.go
index b58d4af..ba2f4e8 100644
--- a/metropolis/node/kubernetes/reconciler/reconciler_test.go
+++ b/metropolis/node/kubernetes/reconciler/reconciler_test.go
@@ -28,9 +28,10 @@
 	meta "k8s.io/apimachinery/pkg/apis/meta/v1"
 )
 
-// kubernetesMeta unwraps an interface{} that might contain a Kubernetes resource of type that is managed by the
-// reconciler. Any time a new Kubernetes type is managed by the reconciler, the following switch should be extended
-// to cover that type.
+// kubernetesMeta unwraps an interface{} that might contain a Kubernetes
+// resource of type that is managed by the reconciler. Any time a new
+// Kubernetes type is managed by the reconciler, the following switch should be
+// extended to cover that type.
 func kubernetesMeta(v interface{}) *meta.ObjectMeta {
 	switch v2 := v.(type) {
 	case *rbac.ClusterRole:
@@ -49,9 +50,11 @@
 	return nil
 }
 
-// TestExpectedNamedCorrectly ensures that all the Expected objects of all resource types have a correspondence between
-// their returned key and inner name. This contract must be met in order for the reconciler to not create runaway
-// resources. This assumes all managed resources are Kubernetes resources.
+// TestExpectedNamedCorrectly ensures that all the Expected objects of all
+// resource types have a correspondence between their returned key and inner
+// name. This contract must be met in order for the reconciler to not create
+// runaway resources. This assumes all managed resources are Kubernetes
+// resources.
 func TestExpectedNamedCorrectly(t *testing.T) {
 	for reconciler, r := range allResources(nil) {
 		for outer, v := range r.Expected() {
@@ -68,10 +71,13 @@
 	}
 }
 
-// TestExpectedLabeledCorrectly ensures that all the Expected objects of all resource types have a Kubernetes metadata
-// label that signifies it's a builtin object, to be retrieved afterwards. This contract must be met in order for the
-// reconciler to not keep overwriting objects (and possibly failing), when a newly created object is not then
-// retrievable using a selector corresponding to this label. This assumes all managed resources are Kubernetes objects.
+// TestExpectedLabeledCorrectly ensures that all the Expected objects of all
+// resource types have a Kubernetes metadata label that signifies it's a
+// builtin object, to be retrieved afterwards. This contract must be met in
+// order for the reconciler to not keep overwriting objects (and possibly
+// failing), when a newly created object is not then retrievable using a
+// selector corresponding to this label. This assumes all managed resources are
+// Kubernetes objects.
 func TestExpectedLabeledCorrectly(t *testing.T) {
 	for reconciler, r := range allResources(nil) {
 		for outer, v := range r.Expected() {
@@ -88,8 +94,9 @@
 	}
 }
 
-// testResource is a resource type used for testing. The inner type is a string that is equal to its name (key).
-// It simulates a target (ie. k8s apiserver mock) that always acts nominally (all resources are created, deleted as
+// testResource is a resource type used for testing. The inner type is a string
+// that is equal to its name (key).  It simulates a target (ie. k8s apiserver
+// mock) that always acts nominally (all resources are created, deleted as
 // requested, and the state is consistent with requests).
 type testResource struct {
 	// current is the simulated state of resources in the target.
@@ -124,7 +131,8 @@
 	return exp
 }
 
-// newTestResource creates a test resource with a list of expected resource strings.
+// newTestResource creates a test resource with a list of expected resource
+// strings.
 func newTestResource(want ...string) *testResource {
 	expected := make(map[string]string)
 	for _, w := range want {
@@ -136,8 +144,9 @@
 	}
 }
 
-// currentDiff returns a human-readable string showing the different between the current state and the given resource
-// strings. If no difference is present, the returned string is empty.
+// currentDiff returns a human-readable string showing the different between
+// the current state and the given resource strings. If no difference is
+// present, the returned string is empty.
 func (r *testResource) currentDiff(want ...string) string {
 	expected := make(map[string]string)
 	for _, w := range want {
@@ -154,8 +163,8 @@
 	return ""
 }
 
-// TestBasicReconciliation ensures that the reconcile function does manipulate a target state based on a set of
-// expected resources.
+// TestBasicReconciliation ensures that the reconcile function does manipulate
+// a target state based on a set of expected resources.
 func TestBasicReconciliation(t *testing.T) {
 	ctx := context.Background()
 	r := newTestResource("foo", "bar", "baz")
diff --git a/metropolis/node/kubernetes/reconciler/resources_csi.go b/metropolis/node/kubernetes/reconciler/resources_csi.go
index c7f7b2b..04d52a8 100644
--- a/metropolis/node/kubernetes/reconciler/resources_csi.go
+++ b/metropolis/node/kubernetes/reconciler/resources_csi.go
@@ -24,9 +24,11 @@
 	"k8s.io/client-go/kubernetes"
 )
 
-// TODO(q3k): this is duplicated with //metropolis/node/kubernetes:provisioner.go; integrate this once provisioner.go
-// gets moved into a subpackage.
-// ONCHANGE(//metropolis/node/kubernetes:provisioner.go): needs to match csiProvisionerName declared.
+// TODO(q3k): this is duplicated with
+// //metropolis/node/kubernetes:provisioner.go; integrate this once
+// provisioner.go gets moved into a subpackage.
+// ONCHANGE(//metropolis/node/kubernetes:provisioner.go): needs to match
+// csiProvisionerName declared.
 const csiProvisionerName = "dev.monogon.metropolis.vfs"
 
 type resourceCSIDrivers struct {
diff --git a/metropolis/node/kubernetes/service.go b/metropolis/node/kubernetes/service.go
index 5c8b037..fe701e6 100644
--- a/metropolis/node/kubernetes/service.go
+++ b/metropolis/node/kubernetes/service.go
@@ -206,7 +206,8 @@
 	return nil
 }
 
-// GetDebugKubeconfig issues a kubeconfig for an arbitrary given identity. Useful for debugging and testing.
+// GetDebugKubeconfig issues a kubeconfig for an arbitrary given identity.
+// Useful for debugging and testing.
 func (s *Service) GetDebugKubeconfig(ctx context.Context, request *apb.GetDebugKubeconfigRequest) (*apb.GetDebugKubeconfigResponse, error) {
 	client, err := s.c.KPKI.VolatileClient(ctx, request.Id, request.Groups)
 	if err != nil {
diff --git a/metropolis/pkg/devicemapper/devicemapper.go b/metropolis/pkg/devicemapper/devicemapper.go
index 2687e3a..d56e8f9 100644
--- a/metropolis/pkg/devicemapper/devicemapper.go
+++ b/metropolis/pkg/devicemapper/devicemapper.go
@@ -122,8 +122,8 @@
 	}
 }
 
-// stringToDelimitedBuf copies src to dst and returns an error if len(src) > len(dst),
-// or when the string contains a null byte.
+// stringToDelimitedBuf copies src to dst and returns an error if len(src) >
+// len(dst), or when the string contains a null byte.
 func stringToDelimitedBuf(dst []byte, src string) error {
 	if len(src) > len(dst)-1 {
 		return fmt.Errorf("string longer than target buffer (%v > %v)", len(src), len(dst)-1)
diff --git a/metropolis/pkg/erofs/compression.go b/metropolis/pkg/erofs/compression.go
index 58b2f4b..dca9946 100644
--- a/metropolis/pkg/erofs/compression.go
+++ b/metropolis/pkg/erofs/compression.go
@@ -21,8 +21,8 @@
 
 import "encoding/binary"
 
-// mapHeader is a legacy but still-used advisory structure at the start of a compressed VLE block. It contains constant
-// values as annotated.
+// mapHeader is a legacy but still-used advisory structure at the start of a
+// compressed VLE block. It contains constant values as annotated.
 type mapHeader struct {
 	Reserved      uint32 // 0
 	Advise        uint16 // 1
diff --git a/metropolis/pkg/erofs/defs.go b/metropolis/pkg/erofs/defs.go
index b547867..85898bf 100644
--- a/metropolis/pkg/erofs/defs.go
+++ b/metropolis/pkg/erofs/defs.go
@@ -16,11 +16,13 @@
 
 package erofs
 
-// This file contains definitions coming from the in-Kernel implementation of the EROFS filesystem.
-// All definitions come from @linux//fs/erofs:erofs_fs.h unless stated otherwise.
+// This file contains definitions coming from the in-Kernel implementation of
+// the EROFS filesystem.  All definitions come from @linux//fs/erofs:erofs_fs.h
+// unless stated otherwise.
 
-// Magic contains the 4 magic bytes starting at position 1024 identifying an EROFS filesystem.
-// Defined in @linux//include/uapi/linux/magic.h EROFS_SUPER_MAGIC_V1
+// Magic contains the 4 magic bytes starting at position 1024 identifying an
+// EROFS filesystem.  Defined in @linux//include/uapi/linux/magic.h
+// EROFS_SUPER_MAGIC_V1
 var Magic = [4]byte{0xe2, 0xe1, 0xf5, 0xe0}
 
 const blockSizeBits = 12
diff --git a/metropolis/pkg/erofs/defs_test.go b/metropolis/pkg/erofs/defs_test.go
index e32e155..1d31bff 100644
--- a/metropolis/pkg/erofs/defs_test.go
+++ b/metropolis/pkg/erofs/defs_test.go
@@ -24,8 +24,8 @@
 	"github.com/stretchr/testify/assert"
 )
 
-// These test that the specified structures serialize to the same number of bytes as the ones in the
-// EROFS kernel module.
+// These test that the specified structures serialize to the same number of
+// bytes as the ones in the EROFS kernel module.
 
 func TestSuperblockSize(t *testing.T) {
 	var buf bytes.Buffer
diff --git a/metropolis/pkg/erofs/erofs.go b/metropolis/pkg/erofs/erofs.go
index af6ad1c..3e4ce89 100644
--- a/metropolis/pkg/erofs/erofs.go
+++ b/metropolis/pkg/erofs/erofs.go
@@ -29,19 +29,22 @@
 // Writer writes a new EROFS filesystem.
 type Writer struct {
 	w io.WriteSeeker
-	// fixDirectoryEntry contains for each referenced path where it is referenced from. Since self-references
-	// are required anyways (for the "." and ".." entries) we let the user write files in any order and just
-	// point the directory entries to the right target nid and file type on Close().
+	// fixDirectoryEntry contains for each referenced path where it is
+	// referenced from. Since self-references are required anyways (for the "."
+	// and ".." entries) we let the user write files in any order and just
+	// point the directory entries to the right target nid and file type on
+	// Close().
 	fixDirectoryEntry map[string][]direntFixupLocation
 	pathInodeMeta     map[string]*uncompressedInodeMeta
-	// legacyInodeIndex stores the next legacy (32-bit) inode to be allocated. 64 bit inodes are automatically
-	// calculated by EROFS on mount.
+	// legacyInodeIndex stores the next legacy (32-bit) inode to be allocated.
+	// 64 bit inodes are automatically calculated by EROFS on mount.
 	legacyInodeIndex    uint32
 	blockAllocatorIndex uint32
 	metadataBlocksFree  metadataBlocksMeta
 }
 
-// NewWriter creates a new EROFS filesystem writer. The given WriteSeeker needs to be at the start.
+// NewWriter creates a new EROFS filesystem writer. The given WriteSeeker needs
+// to be at the start.
 func NewWriter(w io.WriteSeeker) (*Writer, error) {
 	erofsWriter := &Writer{
 		w:                 w,
@@ -56,17 +59,20 @@
 		return nil, fmt.Errorf("failed to write initial padding: %w", err)
 	}
 	if err := binary.Write(erofsWriter.w, binary.LittleEndian, &superblock{
-		Magic:          Magic,
-		BlockSizeBits:  blockSizeBits,
-		RootNodeNumber: 36, // 1024 (padding) + 128 (superblock) / 32, not eligible for fixup as different int size
+		Magic:         Magic,
+		BlockSizeBits: blockSizeBits,
+		// 1024 (padding) + 128 (superblock) / 32, not eligible for fixup as
+		// different int size
+		RootNodeNumber: 36,
 	}); err != nil {
 		return nil, fmt.Errorf("failed to write superblock: %w", err)
 	}
 	return erofsWriter, nil
 }
 
-// allocateMetadata allocates metadata space of size bytes with a given alignment and seeks to the first byte of the
-// newly-allocated metadata space. It also returns the position of that first byte.
+// allocateMetadata allocates metadata space of size bytes with a given
+// alignment and seeks to the first byte of the newly-allocated metadata space.
+// It also returns the position of that first byte.
 func (w *Writer) allocateMetadata(size int, alignment uint16) (int64, error) {
 	if size > BlockSize {
 		panic("cannot allocate a metadata object bigger than BlockSize bytes")
@@ -90,9 +96,10 @@
 	return pos, nil
 }
 
-// allocateBlocks allocates n new BlockSize-sized block and seeks to the beginning of the first newly-allocated block.
-// It also returns the first newly-allocated block number.  The caller is expected to write these blocks completely
-// before calling allocateBlocks again.
+// allocateBlocks allocates n new BlockSize-sized block and seeks to the
+// beginning of the first newly-allocated block.  It also returns the first
+// newly-allocated block number.  The caller is expected to write these blocks
+// completely before calling allocateBlocks again.
 func (w *Writer) allocateBlocks(n uint32) (uint32, error) {
 	if _, err := w.w.Seek(int64(w.blockAllocatorIndex)*BlockSize, io.SeekStart); err != nil {
 		return 0, fmt.Errorf("cannot seek to end of last block, check write alignment: %w", err)
@@ -113,18 +120,20 @@
 	return i
 }
 
-// CreateFile adds a new file to the EROFS. It returns a WriteCloser to which the file contents should be written and
-// which then needs to be closed. The last writer obtained by calling CreateFile() needs to be closed first before
-// opening a new one. The given pathname needs to be referenced by a directory created using Create(), otherwise it will
-// not be accessible.
+// CreateFile adds a new file to the EROFS. It returns a WriteCloser to which
+// the file contents should be written and which then needs to be closed. The
+// last writer obtained by calling CreateFile() needs to be closed first before
+// opening a new one. The given pathname needs to be referenced by a directory
+// created using Create(), otherwise it will not be accessible.
 func (w *Writer) CreateFile(pathname string, meta *FileMeta) io.WriteCloser {
 	return w.create(pathname, meta)
 }
 
-// Create adds a new non-file inode to the EROFS. This includes directories, device nodes, symlinks and FIFOs.
-// The first call to Create() needs to be with pathname "." and a directory inode.
-// The given pathname needs to be referenced by a directory, otherwise it will not be accessible (with the exception of
-// the directory ".").
+// Create adds a new non-file inode to the EROFS. This includes directories,
+// device nodes, symlinks and FIFOs.  The first call to Create() needs to be
+// with pathname "." and a directory inode.  The given pathname needs to be
+// referenced by a directory, otherwise it will not be accessible (with the
+// exception of the directory ".").
 func (w *Writer) Create(pathname string, inode Inode) error {
 	iw := w.create(pathname, inode)
 	switch i := inode.(type) {
@@ -140,8 +149,9 @@
 	return iw.Close()
 }
 
-// Close finishes writing an EROFS filesystem. Errors by this function need to be handled as they indicate if the
-// written filesystem is consistent (i.e. there are no directory entries pointing to nonexistent inodes).
+// Close finishes writing an EROFS filesystem. Errors by this function need to
+// be handled as they indicate if the written filesystem is consistent (i.e.
+// there are no directory entries pointing to nonexistent inodes).
 func (w *Writer) Close() error {
 	for targetPath, entries := range w.fixDirectoryEntry {
 		for _, entry := range entries {
@@ -157,8 +167,9 @@
 	return nil
 }
 
-// uncompressedInodeMeta tracks enough metadata about a written inode to be able to point dirents to it and to provide
-// a WriteSeeker into the inode itself.
+// uncompressedInodeMeta tracks enough metadata about a written inode to be
+// able to point dirents to it and to provide a WriteSeeker into the inode
+// itself.
 type uncompressedInodeMeta struct {
 	nid   uint64
 	ftype uint8
@@ -188,8 +199,9 @@
 
 func (a *uncompressedInodeMeta) Write(p []byte) (int, error) {
 	if a.currentOffset < a.blockLength {
-		// TODO(lorenz): Handle the special case where a directory inode is spread across multiple
-		// blocks (depending on other factors this occurs around ~200 direct children).
+		// TODO(lorenz): Handle the special case where a directory inode is
+		// spread across multiple blocks (depending on other factors this
+		// occurs around ~200 direct children).
 		return 0, errors.New("relocating dirents in multi-block directory inodes is unimplemented")
 	}
 	if _, err := a.writer.w.Seek(a.inlineStart+a.currentOffset, io.SeekStart); err != nil {
@@ -204,8 +216,9 @@
 	entryIndex uint16
 }
 
-// direntFixup overrides nid and file type from the path the dirent is pointing to. The given iw is expected to be at
-// the start of the dirent inode to be fixed up.
+// direntFixup overrides nid and file type from the path the dirent is pointing
+// to. The given iw is expected to be at the start of the dirent inode to be
+// fixed up.
 func direntFixup(iw io.WriteSeeker, entryIndex int64, meta *uncompressedInodeMeta) error {
 	if _, err := iw.Seek(entryIndex*12, io.SeekStart); err != nil {
 		return fmt.Errorf("failed to seek to dirent: %w", err)
@@ -227,12 +240,14 @@
 	freeBytes   uint16
 }
 
-// metadataBlocksMeta contains metadata about all metadata blocks, most importantly the amount of free
-// bytes in each block. This is not a map for reproducibility (map ordering).
+// metadataBlocksMeta contains metadata about all metadata blocks, most
+// importantly the amount of free bytes in each block. This is not a map for
+// reproducibility (map ordering).
 type metadataBlocksMeta []metadataBlockMeta
 
-// findBlock returns the absolute position where `size` bytes with the specified alignment can still fit.
-// If there is not enough space in any metadata block it returns false as the second return value.
+// findBlock returns the absolute position where `size` bytes with the
+// specified alignment can still fit.  If there is not enough space in any
+// metadata block it returns false as the second return value.
 func (m metadataBlocksMeta) findBlock(size uint16, alignment uint16) (int64, bool) {
 	for i, blockMeta := range m {
 		freeBytesAligned := blockMeta.freeBytes - (blockMeta.freeBytes % alignment)
diff --git a/metropolis/pkg/erofs/inode_types.go b/metropolis/pkg/erofs/inode_types.go
index 05b0f54..bac29c5 100644
--- a/metropolis/pkg/erofs/inode_types.go
+++ b/metropolis/pkg/erofs/inode_types.go
@@ -28,12 +28,14 @@
 	"golang.org/x/sys/unix"
 )
 
-// Inode specifies an interface that all inodes that can be written to an EROFS filesystem implement.
+// Inode specifies an interface that all inodes that can be written to an EROFS
+// filesystem implement.
 type Inode interface {
 	inode() *inodeCompact
 }
 
-// Base contains generic inode metadata independent from the specific inode type.
+// Base contains generic inode metadata independent from the specific inode
+// type.
 type Base struct {
 	Permissions uint16
 	UID, GID    uint16
@@ -47,8 +49,8 @@
 	}
 }
 
-// Directory represents a directory inode. The Children property contains the directories' direct children (just the
-// name, not the full path).
+// Directory represents a directory inode. The Children property contains the
+// directories' direct children (just the name, not the full path).
 type Directory struct {
 	Base
 	Children []string
@@ -59,7 +61,8 @@
 }
 
 func (d *Directory) writeTo(w *uncompressedInodeWriter) error {
-	// children is d.Children with appended backrefs (. and ..), copied to not pollute source
+	// children is d.Children with appended backrefs (. and ..), copied to not
+	// pollute source
 	children := make([]string, len(d.Children))
 	copy(children, d.Children)
 	children = append(children, ".", "..")
@@ -97,7 +100,8 @@
 	return nil
 }
 
-// CharacterDevice represents a Unix character device inode with major and minor numbers.
+// CharacterDevice represents a Unix character device inode with major and
+// minor numbers.
 type CharacterDevice struct {
 	Base
 	Major uint32
@@ -110,7 +114,8 @@
 	return i
 }
 
-// CharacterDevice represents a Unix block device inode with major and minor numbers.
+// CharacterDevice represents a Unix block device inode with major and minor
+// numbers.
 type BlockDevice struct {
 	Base
 	Major uint32
@@ -141,7 +146,8 @@
 	return s.baseInode(unix.S_IFSOCK)
 }
 
-// SymbolicLink represents a symbolic link/symlink to another inode. Target is the literal string target of the symlink.
+// SymbolicLink represents a symbolic link/symlink to another inode. Target is
+// the literal string target of the symlink.
 type SymbolicLink struct {
 	Base
 	Target string
@@ -156,8 +162,9 @@
 	return err
 }
 
-// FileMeta represents the metadata of a regular file. In this case the contents are written to a Writer returned by the
-// CreateFile function on the EROFS Writer and not included in the structure itself.
+// FileMeta represents the metadata of a regular file. In this case the
+// contents are written to a Writer returned by the CreateFile function on the
+// EROFS Writer and not included in the structure itself.
 type FileMeta struct {
 	Base
 }
diff --git a/metropolis/pkg/erofs/uncompressed_inode_writer.go b/metropolis/pkg/erofs/uncompressed_inode_writer.go
index df89fec..97aefc0 100644
--- a/metropolis/pkg/erofs/uncompressed_inode_writer.go
+++ b/metropolis/pkg/erofs/uncompressed_inode_writer.go
@@ -24,9 +24,10 @@
 	"math"
 )
 
-// uncompressedInodeWriter exposes a io.Write-style interface for a single uncompressed inode. It splits the Write-calls
-// into blocks and writes both the blocks and inode metadata. It is required to call Close() to ensure everything is
-// properly written down before writing another inode.
+// uncompressedInodeWriter exposes a io.Write-style interface for a single
+// uncompressed inode. It splits the Write-calls into blocks and writes both
+// the blocks and inode metadata. It is required to call Close() to ensure
+// everything is properly written down before writing another inode.
 type uncompressedInodeWriter struct {
 	buf               bytes.Buffer
 	writer            *Writer
diff --git a/metropolis/pkg/fileargs/fileargs.go b/metropolis/pkg/fileargs/fileargs.go
index 26c054b..bec8fca 100644
--- a/metropolis/pkg/fileargs/fileargs.go
+++ b/metropolis/pkg/fileargs/fileargs.go
@@ -31,8 +31,9 @@
 // DefaultSize is the default size limit for FileArgs
 const DefaultSize = 4 * 1024 * 1024
 
-// TempDirectory is the directory where FileArgs will mount the actual files to. Defaults to
-// os.TempDir() but can be globally overridden by the application before any FileArgs are used.
+// TempDirectory is the directory where FileArgs will mount the actual files
+// to. Defaults to os.TempDir() but can be globally overridden by the
+// application before any FileArgs are used.
 var TempDirectory = os.TempDir()
 
 type FileArgs struct {
@@ -40,14 +41,15 @@
 	lastError error
 }
 
-// New initializes a new set of file-based arguments. Remember to call Close() if you're done
-// using it, otherwise this leaks memory and mounts.
+// New initializes a new set of file-based arguments. Remember to call Close()
+// if you're done using it, otherwise this leaks memory and mounts.
 func New() (*FileArgs, error) {
 	return NewWithSize(DefaultSize)
 }
 
-// NewWthSize is the same as new, but with a custom size limit. Please be aware that this data
-// cannot be swapped out and using a size limit that's too high can deadlock your kernel.
+// NewWthSize is the same as new, but with a custom size limit. Please be aware
+// that this data cannot be swapped out and using a size limit that's too high
+// can deadlock your kernel.
 func NewWithSize(size uint64) (*FileArgs, error) {
 	randomNameRaw := make([]byte, 128/8)
 	if _, err := io.ReadFull(rand.Reader, randomNameRaw); err != nil {
@@ -57,7 +59,8 @@
 	if err := os.MkdirAll(tmpPath, 0700); err != nil {
 		return nil, err
 	}
-	// This uses ramfs instead of tmpfs because we never want to swap this for security reasons
+	// This uses ramfs instead of tmpfs because we never want to swap this for
+	// security reasons
 	if err := unix.Mount("none", tmpPath, "ramfs", unix.MS_NOEXEC|unix.MS_NOSUID|unix.MS_NODEV, fmt.Sprintf("size=%v", size)); err != nil {
 		return nil, err
 	}
@@ -66,8 +69,8 @@
 	}, nil
 }
 
-// ArgPath returns the path of the temporary file for this argument. It names the temporary
-// file according to name.
+// ArgPath returns the path of the temporary file for this argument. It names
+// the temporary file according to name.
 func (f *FileArgs) ArgPath(name string, content []byte) string {
 	if f.lastError != nil {
 		return ""
@@ -83,8 +86,11 @@
 	return path
 }
 
-// FileOpt returns a full option with the temporary file name already filled in.
-// Example: `FileOpt("--testopt", "test.txt", []byte("hello")) == "--testopt=/tmp/daf8ed.../test.txt"`
+// FileOpt returns a full option with the temporary file name already filled
+// in. Example:
+//
+// option := FileOpt("--testopt", "test.txt", []byte("hello"))
+// option == "--testopt=/tmp/daf8ed.../test.txt"
 func (f *FileArgs) FileOpt(optName, fileName string, content []byte) string {
 	return fmt.Sprintf("%v=%v", optName, f.ArgPath(fileName, content))
 }
diff --git a/metropolis/pkg/freeport/freeport.go b/metropolis/pkg/freeport/freeport.go
index bd047b5..da52311 100644
--- a/metropolis/pkg/freeport/freeport.go
+++ b/metropolis/pkg/freeport/freeport.go
@@ -21,10 +21,12 @@
 	"net"
 )
 
-// AllocateTCPPort allocates a TCP port on the looopback address, and starts a temporary listener on it. That listener
-// is returned to the caller alongside with the allocated port number. The listener must be closed right before
-// the port is used by the caller. This naturally still leaves a race condition window where that port number
-// might be snatched up by some other process, but there doesn't seem to be a better way to do this.
+// AllocateTCPPort allocates a TCP port on the looopback address, and starts a
+// temporary listener on it. That listener is returned to the caller alongside with
+// the allocated port number. The listener must be closed right before the port is
+// used by the caller. This naturally still leaves a race condition window where
+// that port number might be snatched up by some other process, but there doesn't
+// seem to be a better way to do this.
 func AllocateTCPPort() (uint16, io.Closer, error) {
 	addr, err := net.ResolveTCPAddr("tcp", "127.0.0.1:0")
 	if err != nil {
@@ -38,8 +40,9 @@
 	return uint16(l.Addr().(*net.TCPAddr).Port), l, nil
 }
 
-// MustConsume takes the result of AllocateTCPPort, closes the listener and returns the allocated port.
-// If anything goes wrong (port could not be allocated or closed) it will panic.
+// MustConsume takes the result of AllocateTCPPort, closes the listener and returns
+// the allocated port. If anything goes wrong (port could not be allocated or
+// closed) it will panic.
 func MustConsume(port uint16, lis io.Closer, err error) int {
 	if err != nil {
 		panic(err)
diff --git a/metropolis/pkg/fsquota/fsinfo.go b/metropolis/pkg/fsquota/fsinfo.go
index f885d51..ecbaecf 100644
--- a/metropolis/pkg/fsquota/fsinfo.go
+++ b/metropolis/pkg/fsquota/fsinfo.go
@@ -24,9 +24,9 @@
 	"golang.org/x/sys/unix"
 )
 
-// This requires fsinfo() support, which is not yet in any stable kernel.
-// Our kernel has that syscall backported. This would otherwise be an extremely expensive
-// operation and also involve lots of logic from our side.
+// This requires fsinfo() support, which is not yet in any stable kernel. Our
+// kernel has that syscall backported. This would otherwise be an extremely
+// expensive operation and also involve lots of logic from our side.
 
 // From syscall_64.tbl
 const sys_fsinfo = 441
diff --git a/metropolis/pkg/fsquota/fsquota.go b/metropolis/pkg/fsquota/fsquota.go
index 3c0c578..263dd48 100644
--- a/metropolis/pkg/fsquota/fsquota.go
+++ b/metropolis/pkg/fsquota/fsquota.go
@@ -14,11 +14,11 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// Package fsquota provides a simplified interface to interact with Linux's filesystem qouta API.
-// It only supports setting quotas on directories, not groups or users.
-// Quotas need to be already enabled on the filesystem to be able to use them using this package.
-// See the quotactl package if you intend to use this on a filesystem where quotas need to be
-// enabled manually.
+// Package fsquota provides a simplified interface to interact with Linux's
+// filesystem qouta API.  It only supports setting quotas on directories, not
+// groups or users.  Quotas need to be already enabled on the filesystem to be
+// able to use them using this package.  See the quotactl package if you intend
+// to use this on a filesystem where quotas need to be enabled manually.
 package fsquota
 
 import (
@@ -32,10 +32,11 @@
 	"source.monogon.dev/metropolis/pkg/fsquota/quotactl"
 )
 
-// SetQuota sets the quota of bytes and/or inodes in a given path. To not set a limit, set the
-// corresponding argument to zero. Setting both arguments to zero removes the quota entirely.
-// This function can only be called on an empty directory. It can't be used to create a quota
-// below a directory which already has a quota since Linux doesn't offer hierarchical quotas.
+// SetQuota sets the quota of bytes and/or inodes in a given path. To not set a
+// limit, set the corresponding argument to zero. Setting both arguments to
+// zero removes the quota entirely.  This function can only be called on an
+// empty directory. It can't be used to create a quota below a directory which
+// already has a quota since Linux doesn't offer hierarchical quotas.
 func SetQuota(path string, maxBytes uint64, maxInodes uint64) error {
 	dir, err := os.Open(path)
 	if err != nil {
@@ -61,10 +62,12 @@
 
 	var lastID uint32 = attrs.ProjectID
 	if lastID == 0 {
-		// No project/quota exists for this directory, assign a new project quota
-		// TODO(lorenz): This is racy, but the kernel does not support atomically assigning
-		// quotas. So this needs to be added to the kernels setquota interface. Due to the short
-		// time window and infrequent calls this should not be an immediate issue.
+		// No project/quota exists for this directory, assign a new project
+		// quota.
+		// TODO(lorenz): This is racy, but the kernel does not support
+		// atomically assigning quotas. So this needs to be added to the
+		// kernels setquota interface. Due to the short time window and
+		// infrequent calls this should not be an immediate issue.
 		for {
 			quota, err := quotactl.GetNextQuota(source, quotactl.QuotaTypeProject, lastID)
 			if err == unix.ENOENT || err == unix.ESRCH {
@@ -115,7 +118,8 @@
 	InodesUsed uint64
 }
 
-// GetQuota returns the current active quota and its utilization at the given path
+// GetQuota returns the current active quota and its utilization at the given
+// path
 func GetQuota(path string) (*Quota, error) {
 	dir, err := os.Open(path)
 	if err != nil {
diff --git a/metropolis/pkg/fsquota/fsquota_test.go b/metropolis/pkg/fsquota/fsquota_test.go
index 4729dac..392a0e9 100644
--- a/metropolis/pkg/fsquota/fsquota_test.go
+++ b/metropolis/pkg/fsquota/fsquota_test.go
@@ -29,9 +29,9 @@
 	"golang.org/x/sys/unix"
 )
 
-// withinTolerance is a helper for asserting that a value is within a certain percentage of the
-// expected value. The tolerance is specified as a float between 0 (exact match)
-// and 1 (between 0 and twice the expected value).
+// withinTolerance is a helper for asserting that a value is within a certain
+// percentage of the expected value. The tolerance is specified as a float
+// between 0 (exact match) and 1 (between 0 and twice the expected value).
 func withinTolerance(t *testing.T, expected uint64, actual uint64, tolerance float64, name string) {
 	t.Helper()
 	delta := uint64(math.Round(float64(expected) * tolerance))
@@ -131,8 +131,9 @@
 		require.Equal(t, uint64(bytesQuota), quotaUtil.Bytes, "bytes quota readback incorrect")
 		require.Equal(t, uint64(inodesQuota), quotaUtil.Inodes, "inodes quota readback incorrect")
 
-		// Give 10% tolerance for quota used values to account for metadata overhead and internal
-		// structures that are also in there. If it's out by more than that it's an issue anyways.
+		// Give 10% tolerance for quota used values to account for metadata
+		// overhead and internal structures that are also in there. If it's out
+		// by more than that it's an issue anyways.
 		withinTolerance(t, uint64(len(sizeFileData)), quotaUtil.BytesUsed, 0.1, "BytesUsed")
 
 		// Write 50 inodes for a total of 51 (with the 512K file)
diff --git a/metropolis/pkg/fsquota/quotactl/quotactl.go b/metropolis/pkg/fsquota/quotactl/quotactl.go
index a2edfa7..337daaa 100644
--- a/metropolis/pkg/fsquota/quotactl/quotactl.go
+++ b/metropolis/pkg/fsquota/quotactl/quotactl.go
@@ -14,9 +14,9 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// Package quotactl implements a low-level wrapper around the modern portion of Linux's
-// quotactl() syscall. See the fsquota package for a nicer interface to the most common part
-// of this API.
+// Package quotactl implements a low-level wrapper around the modern portion of
+// Linux's quotactl() syscall. See the fsquota package for a nicer interface to
+// the most common part of this API.
 package quotactl
 
 import (
@@ -212,7 +212,8 @@
 	return nil
 }
 
-// Sync syncs disk copy of filesystems quotas. If device is empty it syncs all filesystems.
+// Sync syncs disk copy of filesystems quotas. If device is empty it syncs all
+// filesystems.
 func Sync(device string) error {
 	if device != "" {
 		devArg, err := unix.BytePtrFromString(device)
diff --git a/metropolis/pkg/jsonpatch/jsonpatch.go.go b/metropolis/pkg/jsonpatch/jsonpatch.go.go
index 9682980..be3d302 100644
--- a/metropolis/pkg/jsonpatch/jsonpatch.go.go
+++ b/metropolis/pkg/jsonpatch/jsonpatch.go.go
@@ -14,7 +14,8 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// Package jsonpatch contains data structures and encoders for JSON Patch (RFC 6902) and JSON Pointers (RFC 6901)
+// Package jsonpatch contains data structures and encoders for JSON Patch (RFC
+// 6902) and JSON Pointers (RFC 6901)
 package jsonpatch
 
 import "strings"
@@ -27,7 +28,8 @@
 	Value     interface{} `json:"value,omitempty"`
 }
 
-// EncodeJSONRefToken encodes a JSON reference token as part of a JSON Pointer (RFC 6901 Section 2)
+// EncodeJSONRefToken encodes a JSON reference token as part of a JSON Pointer
+// (RFC 6901 Section 2)
 func EncodeJSONRefToken(token string) string {
 	x := strings.ReplaceAll(token, "~", "~0")
 	return strings.ReplaceAll(x, "/", "~1")
diff --git a/metropolis/pkg/logbuffer/linebuffer.go b/metropolis/pkg/logbuffer/linebuffer.go
index 8048604..6fd9a62 100644
--- a/metropolis/pkg/logbuffer/linebuffer.go
+++ b/metropolis/pkg/logbuffer/linebuffer.go
@@ -25,7 +25,8 @@
 	apb "source.monogon.dev/metropolis/proto/api"
 )
 
-// Line is a line stored in the log buffer - a string, that has been perhaps truncated (due to exceeded limits).
+// Line is a line stored in the log buffer - a string, that has been perhaps
+// truncated (due to exceeded limits).
 type Line struct {
 	Data           string
 	OriginalLength int
@@ -36,8 +37,8 @@
 	return l.OriginalLength > len(l.Data)
 }
 
-// String returns the line with an ellipsis at the end (...) if the line has been truncated, or the original line
-// otherwise.
+// String returns the line with an ellipsis at the end (...) if the line has been
+// truncated, or the original line otherwise.
 func (l *Line) String() string {
 	if l.Truncated() {
 		return l.Data + "..."
@@ -68,24 +69,27 @@
 	}, nil
 }
 
-// LineBuffer is a io.WriteCloser that will call a given callback every time a line is completed.
+// LineBuffer is a io.WriteCloser that will call a given callback every time a line
+// is completed.
 type LineBuffer struct {
 	maxLineLength int
 	cb            LineBufferCallback
 
 	mu  sync.Mutex
 	cur strings.Builder
-	// length is the length of the line currently being written - this will continue to increase, even if the string
-	// exceeds maxLineLength.
+	// length is the length of the line currently being written - this will continue to
+	// increase, even if the string exceeds maxLineLength.
 	length int
 	closed bool
 }
 
-// LineBufferCallback is a callback that will get called any time the line is completed. The function must not cause another
-// write to the LineBuffer, or the program will deadlock.
+// LineBufferCallback is a callback that will get called any time the line is
+// completed. The function must not cause another write to the LineBuffer, or the
+// program will deadlock.
 type LineBufferCallback func(*Line)
 
-// NewLineBuffer creates a new LineBuffer with a given line length limit and callback.
+// NewLineBuffer creates a new LineBuffer with a given line length limit and
+// callback.
 func NewLineBuffer(maxLineLength int, cb LineBufferCallback) *LineBuffer {
 	return &LineBuffer{
 		maxLineLength: maxLineLength,
@@ -93,7 +97,8 @@
 	}
 }
 
-// writeLimited writes to the internal buffer, making sure that its size does not exceed the maxLineLength.
+// writeLimited writes to the internal buffer, making sure that its size does not
+// exceed the maxLineLength.
 func (l *LineBuffer) writeLimited(data []byte) {
 	l.length += len(data)
 	if l.cur.Len()+len(data) > l.maxLineLength {
@@ -144,8 +149,8 @@
 	return len(data), nil
 }
 
-// Close will emit any leftover data in the buffer to the callback. Subsequent calls to Write will fail. Subsequent calls to Close
-// will also fail.
+// Close will emit any leftover data in the buffer to the callback. Subsequent
+// calls to Write will fail. Subsequent calls to Close will also fail.
 func (l *LineBuffer) Close() error {
 	if l.closed {
 		return fmt.Errorf("already closed")
diff --git a/metropolis/pkg/logbuffer/logbuffer.go b/metropolis/pkg/logbuffer/logbuffer.go
index ce47816..cd18420 100644
--- a/metropolis/pkg/logbuffer/logbuffer.go
+++ b/metropolis/pkg/logbuffer/logbuffer.go
@@ -14,10 +14,11 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// Package logbuffer implements a fixed-size in-memory ring buffer for line-separated logs.
-// It implements io.Writer and splits the data into lines. The lines are kept in a ring where the
-// oldest are overwritten once it's full. It allows retrieval of the last n lines. There is a built-in
-// line length limit to bound the memory usage at maxLineLength * size.
+// Package logbuffer implements a fixed-size in-memory ring buffer for
+// line-separated logs. It implements io.Writer and splits the data into lines.
+// The lines are kept in a ring where the oldest are overwritten once it's
+// full. It allows retrieval of the last n lines. There is a built-in line
+// length limit to bound the memory usage at maxLineLength * size.
 package logbuffer
 
 import (
@@ -32,7 +33,8 @@
 	*LineBuffer
 }
 
-// New creates a new LogBuffer with a given ringbuffer size and maximum line length.
+// New creates a new LogBuffer with a given ringbuffer size and maximum line
+// length.
 func New(size, maxLineLength int) *LogBuffer {
 	lb := &LogBuffer{
 		content: make([]Line, size),
@@ -49,7 +51,8 @@
 	b.length++
 }
 
-// capToContentLength caps the number of requested lines to what is actually available
+// capToContentLength caps the number of requested lines to what is actually
+// available
 func (b *LogBuffer) capToContentLength(n int) int {
 	// If there aren't enough lines to read, reduce the request size
 	if n > b.length {
@@ -62,8 +65,9 @@
 	return n
 }
 
-// ReadLines reads the last n lines from the buffer in chronological order. If n is bigger than the
-// ring buffer or the number of available lines only the number of stored lines are returned.
+// ReadLines reads the last n lines from the buffer in chronological order. If
+// n is bigger than the ring buffer or the number of available lines only the
+// number of stored lines are returned.
 func (b *LogBuffer) ReadLines(n int) []Line {
 	b.mu.RLock()
 	defer b.mu.RUnlock()
@@ -78,13 +82,14 @@
 	return outArray
 }
 
-// ReadLinesTruncated works exactly the same as ReadLines, but adds an ellipsis at the end of every
-// line that was truncated because it was over MaxLineLength
+// ReadLinesTruncated works exactly the same as ReadLines, but adds an ellipsis
+// at the end of every line that was truncated because it was over
+// MaxLineLength
 func (b *LogBuffer) ReadLinesTruncated(n int, ellipsis string) []string {
 	b.mu.RLock()
 	defer b.mu.RUnlock()
-	// This does not use ReadLines() to prevent excessive reference copying and associated GC pressure
-	// since it could process a lot of lines.
+	// This does not use ReadLines() to prevent excessive reference copying and
+	// associated GC pressure since it could process a lot of lines.
 
 	n = b.capToContentLength(n)
 
diff --git a/metropolis/pkg/logtree/journal.go b/metropolis/pkg/logtree/journal.go
index 78c55a1..d29fdaa 100644
--- a/metropolis/pkg/logtree/journal.go
+++ b/metropolis/pkg/logtree/journal.go
@@ -22,18 +22,20 @@
 	"sync"
 )
 
-// DN is the Distinguished Name, a dot-delimited path used to address loggers within a LogTree. For example, "foo.bar"
-// designates the 'bar' logger node under the 'foo' logger node under the root node of the logger. An empty string is
-// the root node of the tree.
+// DN is the Distinguished Name, a dot-delimited path used to address loggers
+// within a LogTree. For example, "foo.bar" designates the 'bar' logger node
+// under the 'foo' logger node under the root node of the logger. An empty
+// string is the root node of the tree.
 type DN string
 
 var (
 	ErrInvalidDN = errors.New("invalid DN")
 )
 
-// Path return the parts of a DN, ie. all the elements of the dot-delimited DN path. For the root node, an empty list
-// will be returned. An error will be returned if the DN is invalid (contains empty parts, eg. `foo..bar`, `.foo` or
-// `foo.`.
+// Path return the parts of a DN, ie. all the elements of the dot-delimited DN
+// path.  For the root node, an empty list will be returned. An error will be
+// returned if the DN is invalid (contains empty parts, eg. `foo..bar`, `.foo`
+// or `foo.`.
 func (d DN) Path() ([]string, error) {
 	if d == "" {
 		return nil, nil
@@ -47,12 +49,14 @@
 	return parts, nil
 }
 
-// journal is the main log recording structure of logtree. It manages linked lists containing the actual log entries,
-// and implements scans across them. It does not understand the hierarchical nature of logtree, and instead sees all
-// entries as part of a global linked list and a local linked list for a given DN.
+// journal is the main log recording structure of logtree. It manages linked lists
+// containing the actual log entries, and implements scans across them. It does not
+// understand the hierarchical nature of logtree, and instead sees all entries as
+// part of a global linked list and a local linked list for a given DN.
 //
-// The global linked list is represented by the head/tail pointers in journal and nextGlobal/prevGlobal pointers in
-// entries. The local linked lists are represented by heads[DN]/tails[DN] pointers in journal and nextLocal/prevLocal
+// The global linked list is represented by the head/tail pointers in journal and
+// nextGlobal/prevGlobal pointers in entries. The local linked lists are
+// represented by heads[DN]/tails[DN] pointers in journal and nextLocal/prevLocal
 // pointers in entries:
 //
 //       .------------.        .------------.        .------------.
@@ -70,37 +74,42 @@
 //            |                      |                     |
 //         ( head )             ( tails[Z] )            ( tail )
 //      ( heads[A.B] )          ( heads[Z] )         ( tails[A.B] )
-//
 type journal struct {
-	// mu locks the rest of the structure. It must be taken during any operation on the journal.
+	// mu locks the rest of the structure. It must be taken during any operation on the
+	// journal.
 	mu sync.RWMutex
 
-	// tail is the side of the global linked list that contains the newest log entry, ie. the one that has been pushed
-	// the most recently. It can be nil when no log entry has yet been pushed. The global linked list contains all log
-	// entries pushed to the journal.
+	// tail is the side of the global linked list that contains the newest log entry,
+	// ie. the one that has been pushed the most recently. It can be nil when no log
+	// entry has yet been pushed. The global linked list contains all log entries
+	// pushed to the journal.
 	tail *entry
-	// head is the side of the global linked list that contains the oldest log entry. It can be nil when no log entry
-	// has yet been pushed.
+	// head is the side of the global linked list that contains the oldest log entry.
+	// It can be nil when no log entry has yet been pushed.
 	head *entry
 
-	// tails are the tail sides of a local linked list for a given DN, ie. the sides that contain the newest entry. They
-	// are nil if there are no log entries for that DN.
+	// tails are the tail sides of a local linked list for a given DN, ie. the sides
+	// that contain the newest entry. They are nil if there are no log entries for that
+	// DN.
 	tails map[DN]*entry
-	// heads are the head sides of a local linked list for a given DN, ie. the sides that contain the oldest entry. They
-	// are nil if there are no log entries for that DN.
+	// heads are the head sides of a local linked list for a given DN, ie. the sides
+	// that contain the oldest entry. They are nil if there are no log entries for that
+	// DN.
 	heads map[DN]*entry
 
-	// quota is a map from DN to quota structure, representing the quota policy of a particular DN-designated logger.
+	// quota is a map from DN to quota structure, representing the quota policy of a
+	// particular DN-designated logger.
 	quota map[DN]*quota
 
-	// subscribers are observer to logs. New log entries get emitted to channels present in the subscriber structure,
-	// after filtering them through subscriber-provided filters (eg. to limit events to subtrees that interest that
-	// particular subscriber).
+	// subscribers are observer to logs. New log entries get emitted to channels
+	// present in the subscriber structure, after filtering them through subscriber-
+	// provided filters (eg. to limit events to subtrees that interest that particular
+	// subscriber).
 	subscribers []*subscriber
 }
 
-// newJournal creates a new empty journal. All journals are independent from eachother, and as such, all LogTrees are
-// also independent.
+// newJournal creates a new empty journal. All journals are independent from
+// eachother, and as such, all LogTrees are also independent.
 func newJournal() *journal {
 	return &journal{
 		tails: make(map[DN]*entry),
@@ -110,7 +119,8 @@
 	}
 }
 
-// filter is a predicate that returns true if a log subscriber or reader is interested in a given log entry.
+// filter is a predicate that returns true if a log subscriber or reader is
+// interested in a given log entry.
 type filter func(*entry) bool
 
 // filterAll returns a filter that accepts all log entries.
@@ -118,16 +128,18 @@
 	return func(*entry) bool { return true }
 }
 
-// filterExact returns a filter that accepts only log entries at a given exact DN. This filter should not be used in
-// conjunction with journal.scanEntries - instead, journal.getEntries should be used, as it is much faster.
+// filterExact returns a filter that accepts only log entries at a given exact
+// DN.  This filter should not be used in conjunction with journal.scanEntries
+// - instead, journal.getEntries should be used, as it is much faster.
 func filterExact(dn DN) filter {
 	return func(e *entry) bool {
 		return e.origin == dn
 	}
 }
 
-// filterSubtree returns a filter that accepts all log entries at a given DN and sub-DNs. For example, filterSubtree at
-// "foo.bar" would allow entries at "foo.bar", "foo.bar.baz", but not "foo" or "foo.barr".
+// filterSubtree returns a filter that accepts all log entries at a given DN and
+// sub-DNs. For example, filterSubtree at "foo.bar" would allow entries at
+// "foo.bar", "foo.bar.baz", but not "foo" or "foo.barr".
 func filterSubtree(root DN) filter {
 	if root == "" {
 		return filterAll()
@@ -150,8 +162,9 @@
 	}
 }
 
-// filterSeverity returns a filter that accepts log entries at a given severity level or above. See the Severity type
-// for more information about severity levels.
+// filterSeverity returns a filter that accepts log entries at a given severity
+// level or above. See the Severity type for more information about severity
+// levels.
 func filterSeverity(atLeast Severity) filter {
 	return func(e *entry) bool {
 		return e.leveled != nil && e.leveled.severity.AtLeast(atLeast)
@@ -166,10 +179,11 @@
 	return e.leveled != nil
 }
 
-// scanEntries does a linear scan through the global entry list and returns all entries that match the given filters. If
-// retrieving entries for an exact event, getEntries should be used instead, as it will leverage DN-local linked lists
-// to retrieve them faster.
-// journal.mu must be taken at R or RW level when calling this function.
+// scanEntries does a linear scan through the global entry list and returns all
+// entries that match the given filters. If retrieving entries for an exact event,
+// getEntries should be used instead, as it will leverage DN-local linked lists to
+// retrieve them faster. journal.mu must be taken at R or RW level when calling
+// this function.
 func (j *journal) scanEntries(filters ...filter) (res []*entry) {
 	cur := j.tail
 	for {
@@ -191,10 +205,12 @@
 	}
 }
 
-// getEntries returns all entries at a given DN. This is faster than a scanEntries(filterExact), as it uses the special
-// local linked list pointers to traverse the journal. Additional filters can be passed to further limit the entries
-// returned, but a scan through this DN's local linked list will be performed regardless.
-// journal.mu must be taken at R or RW level when calling this function.
+// getEntries returns all entries at a given DN. This is faster than a
+// scanEntries(filterExact), as it uses the special local linked list pointers to
+// traverse the journal. Additional filters can be passed to further limit the
+// entries returned, but a scan through this DN's local linked list will be
+// performed regardless. journal.mu must be taken at R or RW level when calling
+// this function.
 func (j *journal) getEntries(exact DN, filters ...filter) (res []*entry) {
 	cur := j.tails[exact]
 	for {
diff --git a/metropolis/pkg/logtree/journal_entry.go b/metropolis/pkg/logtree/journal_entry.go
index d81b687..d51d406 100644
--- a/metropolis/pkg/logtree/journal_entry.go
+++ b/metropolis/pkg/logtree/journal_entry.go
@@ -18,43 +18,49 @@
 
 import "source.monogon.dev/metropolis/pkg/logbuffer"
 
-// entry is a journal entry, representing a single log event (encompassed in a Payload) at a given DN.
-// See the journal struct for more information about the global/local linked lists.
+// entry is a journal entry, representing a single log event (encompassed in a
+// Payload) at a given DN. See the journal struct for more information about the
+// global/local linked lists.
 type entry struct {
-	// origin is the DN at which the log entry was recorded, or conversely, in which DN it will be available at.
+	// origin is the DN at which the log entry was recorded, or conversely, in which DN
+	// it will be available at.
 	origin DN
-	// journal is the parent journal of this entry. An entry can belong only to a single journal. This pointer is used
-	// to mutate the journal's head/tail pointers when unlinking an entry.
+	// journal is the parent journal of this entry. An entry can belong only to a
+	// single journal. This pointer is used to mutate the journal's head/tail pointers
+	// when unlinking an entry.
 	journal *journal
-	// leveled is the leveled log entry for this entry, if this log entry was emitted by leveled logging. Otherwise it
-	// is nil.
+	// leveled is the leveled log entry for this entry, if this log entry was emitted
+	// by leveled logging. Otherwise it is nil.
 	leveled *LeveledPayload
-	// raw is the raw log entry for this entry, if this log entry was emitted by raw logging. Otherwise it is nil.
+	// raw is the raw log entry for this entry, if this log entry was emitted by raw
+	// logging. Otherwise it is nil.
 	raw *logbuffer.Line
 
-	// prevGlobal is the previous entry in the global linked list, or nil if this entry is the oldest entry in the
-	// global linked list.
+	// prevGlobal is the previous entry in the global linked list, or nil if this entry
+	// is the oldest entry in the global linked list.
 	prevGlobal *entry
-	// nextGlobal is the next entry in the global linked list, or nil if this entry is the newest entry in the global
-	// linked list.
+	// nextGlobal is the next entry in the global linked list, or nil if this entry is
+	// the newest entry in the global linked list.
 	nextGlobal *entry
 
-	// prevLocal is the previous entry in this entry DN's local linked list, or nil if this entry is the oldest entry in
-	// this local linked list.
+	// prevLocal is the previous entry in this entry DN's local linked list, or nil if
+	// this entry is the oldest entry in this local linked list.
 	prevLocal *entry
-	// prevLocal is the next entry in this entry DN's local linked list, or nil if this entry is the newest entry in
-	// this local linked list.
+	// prevLocal is the next entry in this entry DN's local linked list, or nil if this
+	// entry is the newest entry in this local linked list.
 	nextLocal *entry
 
-	// seqLocal is a counter within a local linked list that increases by one each time a new log entry is added. It is
-	// used to quickly establish local linked list sizes (by subtracting seqLocal from both ends). This setup allows for
-	// O(1) length calculation for local linked lists as long as entries are only unlinked from the head or tail (which
-	// is the case in the current implementation).
+	// seqLocal is a counter within a local linked list that increases by one each time
+	// a new log entry is added. It is used to quickly establish local linked list
+	// sizes (by subtracting seqLocal from both ends). This setup allows for O(1)
+	// length calculation for local linked lists as long as entries are only unlinked
+	// from the head or tail (which is the case in the current implementation).
 	seqLocal uint64
 }
 
-// external returns a LogEntry object for this entry, ie. the public version of this object, without fields relating to
-// the parent journal, linked lists, sequences, etc. These objects are visible to library consumers.
+// external returns a LogEntry object for this entry, ie. the public version of
+// this object, without fields relating to the parent journal, linked lists,
+// sequences, etc. These objects are visible to library consumers.
 func (e *entry) external() *LogEntry {
 	return &LogEntry{
 		DN:      e.origin,
@@ -63,9 +69,8 @@
 	}
 }
 
-// unlink removes this entry from both global and local linked lists, updating the journal's head/tail pointers if
-// needed.
-// journal.mu must be taken as RW
+// unlink removes this entry from both global and local linked lists, updating the
+// journal's head/tail pointers if needed. journal.mu must be taken as RW
 func (e *entry) unlink() {
 	// Unlink from the global linked list.
 	if e.prevGlobal != nil {
@@ -102,7 +107,8 @@
 type quota struct {
 	// origin is the exact DN that this quota applies to.
 	origin DN
-	// max is the maximum count of log entries permitted for this DN - ie, the maximum size of the local linked list.
+	// max is the maximum count of log entries permitted for this DN - ie, the maximum
+	// size of the local linked list.
 	max uint64
 }
 
@@ -143,12 +149,13 @@
 		j.tails[e.origin] = e
 	}
 
-	// Apply quota to the local linked list that this entry got inserted to, ie. remove elements in excess of the
-	// quota.max count.
+	// Apply quota to the local linked list that this entry got inserted to, ie. remove
+	// elements in excess of the quota.max count.
 	quota := j.quota[e.origin]
 	count := (j.heads[e.origin].seqLocal - j.tails[e.origin].seqLocal) + 1
 	if count > quota.max {
-		// Keep popping elements off the tail of the local linked list until quota is not violated.
+		// Keep popping elements off the tail of the local linked list until quota is not
+		// violated.
 		left := count - quota.max
 		cur := j.tails[e.origin]
 		for {
diff --git a/metropolis/pkg/logtree/journal_subscriber.go b/metropolis/pkg/logtree/journal_subscriber.go
index e6c7c62..dc9750f 100644
--- a/metropolis/pkg/logtree/journal_subscriber.go
+++ b/metropolis/pkg/logtree/journal_subscriber.go
@@ -24,12 +24,15 @@
 type subscriber struct {
 	// filters that entries need to pass through in order to be sent to the subscriber.
 	filters []filter
-	// dataC is the channel to which entries that pass filters will be sent. The channel must be drained regularly in
-	// order to prevent accumulation of goroutines and possible reordering of messages.
+	// dataC is the channel to which entries that pass filters will be sent. The
+	// channel must be drained regularly in order to prevent accumulation of goroutines
+	// and possible reordering of messages.
 	dataC chan *LogEntry
-	// doneC is a channel that is closed once the subscriber wishes to stop receiving notifications.
+	// doneC is a channel that is closed once the subscriber wishes to stop receiving
+	// notifications.
 	doneC chan struct{}
-	// missed is the amount of messages missed by the subscriber by not receiving from dataC fast enough
+	// missed is the amount of messages missed by the subscriber by not receiving from
+	// dataC fast enough
 	missed uint64
 }
 
diff --git a/metropolis/pkg/logtree/klog.go b/metropolis/pkg/logtree/klog.go
index 8755286..3dd040e 100644
--- a/metropolis/pkg/logtree/klog.go
+++ b/metropolis/pkg/logtree/klog.go
@@ -61,9 +61,8 @@
 	return k
 }
 
-
 type klogParser struct {
-	n *node
+	n      *node
 	buffer *logbuffer.LineBuffer
 }
 
@@ -90,7 +89,7 @@
 	// we permit library users to 'fake' logs? This would also permit us to get rid
 	// of the type assertion in KLogParser().
 	e := &entry{
-		origin: k.n.dn,
+		origin:  k.n.dn,
 		leveled: p,
 	}
 	k.n.tree.journal.append(e)
@@ -98,14 +97,15 @@
 }
 
 var (
-	// reKLog matches and parses klog/glog-formatted log lines.
-	// Format: I0312 14:20:04.240540     204 shared_informer.go:247] Caches are synced for attach detach
+	// reKLog matches and parses klog/glog-formatted log lines. Format: I0312
+	// 14:20:04.240540     204 shared_informer.go:247] Caches are synced for attach
+	// detach
 	reKLog = regexp.MustCompile(`^([IEWF])(\d{4})\s+(\d{2}:\d{2}:\d{2}(\.\d+)?)\s+(\d+)\s+([^:]+):(\d+)]\s+(.+)$`)
 )
 
 // parse attempts to parse a klog-formatted line. Returns nil if the line
 // couldn't have been parsed successfully.
-func parse(now time.Time, s string) (*LeveledPayload) {
+func parse(now time.Time, s string) *LeveledPayload {
 	parts := reKLog.FindStringSubmatch(s)
 	if parts == nil {
 		return nil
@@ -184,13 +184,14 @@
 	// The PID is discarded.
 	_ = pid
 
-	// Finally we have extracted all the data from the line. Inject into the log publisher.
+	// Finally we have extracted all the data from the line. Inject into the log
+	// publisher.
 	return &LeveledPayload{
 		timestamp: ts,
-		severity: severity,
-		messages: []string{message},
-		file: file,
-		line: line,
+		severity:  severity,
+		messages:  []string{message},
+		file:      file,
+		line:      line,
 	}
 }
 
diff --git a/metropolis/pkg/logtree/leveled.go b/metropolis/pkg/logtree/leveled.go
index c0d2aff..a4220f9 100644
--- a/metropolis/pkg/logtree/leveled.go
+++ b/metropolis/pkg/logtree/leveled.go
@@ -22,64 +22,70 @@
 	apb "source.monogon.dev/metropolis/proto/api"
 )
 
-// LeveledLogger is a generic interface for glog-style logging. There are four hardcoded log severities, in increasing
-// order: INFO, WARNING, ERROR, FATAL. Logging at a certain severity level logs not only to consumers expecting data
-// at that severity level, but also all lower severity levels. For example, an ERROR log will also be passed to
-// consumers looking at INFO or WARNING logs.
+// LeveledLogger is a generic interface for glog-style logging. There are four
+// hardcoded log severities, in increasing order: INFO, WARNING, ERROR, FATAL.
+// Logging at a certain severity level logs not only to consumers expecting data at
+// that severity level, but also all lower severity levels. For example, an ERROR
+// log will also be passed to consumers looking at INFO or WARNING logs.
 type LeveledLogger interface {
-	// Info logs at the INFO severity. Arguments are handled in the manner of fmt.Print, a terminating newline is added
-	// if missing.
+	// Info logs at the INFO severity. Arguments are handled in the manner of
+	// fmt.Print, a terminating newline is added if missing.
 	Info(args ...interface{})
-	// Infof logs at the INFO severity. Arguments are handled in the manner of fmt.Printf, a terminating newline is
-	// added if missing.
+	// Infof logs at the INFO severity. Arguments are handled in the manner of
+	// fmt.Printf, a terminating newline is added if missing.
 	Infof(format string, args ...interface{})
 
-	// Warning logs at the WARNING severity. Arguments are handled in the manner of fmt.Print, a terminating newline is
-	// added if missing.
+	// Warning logs at the WARNING severity. Arguments are handled in the manner of
+	// fmt.Print, a terminating newline is added if missing.
 	Warning(args ...interface{})
-	// Warningf logs at the WARNING severity. Arguments are handled in the manner of fmt.Printf, a terminating newline
-	// is added if missing.
+	// Warningf logs at the WARNING severity. Arguments are handled in the manner of
+	// fmt.Printf, a terminating newline is added if missing.
 	Warningf(format string, args ...interface{})
 
-	// Error logs at the ERROR severity. Arguments are handled in the manner of fmt.Print, a terminating newline is
-	// added if missing.
+	// Error logs at the ERROR severity. Arguments are handled in the manner of
+	// fmt.Print, a terminating newline is added if missing.
 	Error(args ...interface{})
-	// Errorf logs at the ERROR severity. Arguments are handled in the manner of fmt.Printf, a terminating newline is
-	// added if missing.
+	// Errorf logs at the ERROR severity. Arguments are handled in the manner of
+	// fmt.Printf, a terminating newline is added if missing.
 	Errorf(format string, args ...interface{})
 
-	// Fatal logs at the FATAL severity and aborts the current program. Arguments are handled in the manner of
-	// fmt.Print, a terminating newline is added if missing.
+	// Fatal logs at the FATAL severity and aborts the current program. Arguments are
+	// handled in the manner of fmt.Print, a terminating newline is added if missing.
 	Fatal(args ...interface{})
-	// Fatalf logs at the FATAL severity and aborts the current program. Arguments are handled in the manner of
-	// fmt.Printf, a terminating newline is added if missing.
+	// Fatalf logs at the FATAL severity and aborts the current program. Arguments are
+	// handled in the manner of fmt.Printf, a terminating newline is added if missing.
 	Fatalf(format string, args ...interface{})
 
-	// V returns a VerboseLeveledLogger at a given verbosity level. These verbosity levels can be dynamically set and
-	// unset on a package-granular level by consumers of the LeveledLogger logs. The returned value represents whether
-	// logging at the given verbosity level was active at that time, and as such should not be a long-lived object
-	// in programs.
-	// This construct is further refered to as 'V-logs'.
+	// V returns a VerboseLeveledLogger at a given verbosity level. These verbosity
+	// levels can be dynamically set and unset on a package-granular level by consumers
+	// of the LeveledLogger logs. The returned value represents whether logging at the
+	// given verbosity level was active at that time, and as such should not be a long-
+	// lived object in programs. This construct is further refered to as 'V-logs'.
 	V(level VerbosityLevel) VerboseLeveledLogger
 }
 
-// VerbosityLevel is a verbosity level defined for V-logs. This can be changed programmatically per Go package. When
-// logging at a given VerbosityLevel V, the current level must be equal or higher to V for the logs to be recorded.
-// Conversely, enabling a V-logging at a VerbosityLevel V also enables all logging at lower levels [Int32Min .. (V-1)].
+// VerbosityLevel is a verbosity level defined for V-logs. This can be changed
+// programmatically per Go package. When logging at a given VerbosityLevel V, the
+// current level must be equal or higher to V for the logs to be recorded.
+// Conversely, enabling a V-logging at a VerbosityLevel V also enables all logging
+// at lower levels [Int32Min .. (V-1)].
 type VerbosityLevel int32
 
 type VerboseLeveledLogger interface {
-	// Enabled returns if this level was enabled. If not enabled, all logging into this logger will be discarded
-	// immediately.
-	// Thus, Enabled() can be used to check the verbosity level before performing any logging:
+	// Enabled returns if this level was enabled. If not enabled, all logging into this
+	// logger will be discarded immediately. Thus, Enabled() can be used to check the
+	// verbosity level before performing any logging:
 	//    if l.V(3).Enabled() { l.Info("V3 is enabled") }
 	// or, in simple cases, the convenience function .Info can be used:
 	//    l.V(3).Info("V3 is enabled")
-	// The second form is shorter and more convenient, but more expensive, as its arguments are always evaluated.
+	// The second form is shorter and more convenient, but more expensive, as its
+	// arguments are always evaluated.
 	Enabled() bool
-	// Info is the equivalent of a LeveledLogger's Info call, guarded by whether this VerboseLeveledLogger is enabled.
+	// Info is the equivalent of a LeveledLogger's Info call, guarded by whether this
+	// VerboseLeveledLogger is enabled.
 	Info(args ...interface{})
-	// Infof is the equivalent of a LeveledLogger's Infof call, guarded by whether this VerboseLeveledLogger is enabled.
+	// Infof is the equivalent of a LeveledLogger's Infof call, guarded by whether this
+	// VerboseLeveledLogger is enabled.
 	Infof(format string, args ...interface{})
 }
 
@@ -94,8 +100,9 @@
 )
 
 var (
-	// SeverityAtLeast maps a given severity to a list of severities that at that severity or higher. In other words,
-	// SeverityAtLeast[X] returns a list of severities that might be seen in a log at severity X.
+	// SeverityAtLeast maps a given severity to a list of severities that at that
+	// severity or higher. In other words, SeverityAtLeast[X] returns a list of
+	// severities that might be seen in a log at severity X.
 	SeverityAtLeast = map[Severity][]Severity{
 		INFO:    {INFO, WARNING, ERROR, FATAL},
 		WARNING: {WARNING, ERROR, FATAL},
diff --git a/metropolis/pkg/logtree/leveled_payload.go b/metropolis/pkg/logtree/leveled_payload.go
index 0ceee4d..ed3ed7e 100644
--- a/metropolis/pkg/logtree/leveled_payload.go
+++ b/metropolis/pkg/logtree/leveled_payload.go
@@ -25,12 +25,13 @@
 	apb "source.monogon.dev/metropolis/proto/api"
 )
 
-// LeveledPayload is a log entry for leveled logs (as per leveled.go). It contains the input to these calls (severity and
-// message split into newline-delimited messages) and additional metadata that would be usually seen in a text
+// LeveledPayload is a log entry for leveled logs (as per leveled.go). It contains
+// the input to these calls (severity and message split into newline-delimited
+// messages) and additional metadata that would be usually seen in a text
 // representation of a leveled log entry.
 type LeveledPayload struct {
-	// messages is the list of messages contained in this payload. This list is built from splitting up the given message
-	// from the user by newline.
+	// messages is the list of messages contained in this payload. This list is built
+	// from splitting up the given message from the user by newline.
 	messages []string
 	// timestamp is the time at which this message was emitted.
 	timestamp time.Time
@@ -42,10 +43,11 @@
 	line int
 }
 
-// String returns a canonical representation of this payload as a single string prefixed with metadata. If the original
-// message was logged with newlines, this representation will also contain newlines, with each original message part
-// prefixed by the metadata.
-// For an alternative call that will instead return a canonical prefix and a list of lines in the message, see Strings().
+// String returns a canonical representation of this payload as a single string
+// prefixed with metadata. If the original message was logged with newlines, this
+// representation will also contain newlines, with each original message part
+// prefixed by the metadata. For an alternative call that will instead return a
+// canonical prefix and a list of lines in the message, see Strings().
 func (p *LeveledPayload) String() string {
 	prefix, lines := p.Strings()
 	res := make([]string, len(p.messages))
@@ -55,9 +57,11 @@
 	return strings.Join(res, "\n")
 }
 
-// Strings returns the canonical representation of this payload split into a prefix and all lines that were contained in
-// the original message. This is meant to be displayed to the user by showing the prefix before each line, concatenated
-// together - possibly in a table form with the prefixes all unified with a rowspan-like mechanism.
+// Strings returns the canonical representation of this payload split into a
+// prefix and all lines that were contained in the original message. This is
+// meant to be displayed to the user by showing the prefix before each line,
+// concatenated together - possibly in a table form with the prefixes all
+// unified with a rowspan- like mechanism.
 //
 // For example, this function can return:
 //   prefix = "I1102 17:20:06.921395 foo.go:42] "
@@ -76,7 +80,6 @@
 // |                                        :------------------|
 // |                                        :  - two           |
 // '-----------------------------------------------------------'
-//
 func (p *LeveledPayload) Strings() (prefix string, lines []string) {
 	_, month, day := p.timestamp.Date()
 	hour, minute, second := p.timestamp.Clock()
@@ -91,8 +94,8 @@
 	return
 }
 
-// Message returns the inner message lines of this entry, ie. what was passed to the actual logging method, but split by
-// newlines.
+// Message returns the inner message lines of this entry, ie. what was passed to
+// the actual logging method, but split by newlines.
 func (p *LeveledPayload) Messages() []string { return p.messages }
 
 func (p *LeveledPayload) MessagesJoined() string { return strings.Join(p.messages, "\n") }
@@ -100,8 +103,8 @@
 // Timestamp returns the time at which this entry was logged.
 func (p *LeveledPayload) Timestamp() time.Time { return p.timestamp }
 
-// Location returns a string in the form of file_name:line_number that shows the origin of the log entry in the
-// program source.
+// Location returns a string in the form of file_name:line_number that shows the
+// origin of the log entry in the program source.
 func (p *LeveledPayload) Location() string { return fmt.Sprintf("%s:%d", p.file, p.line) }
 
 // Severity returns the Severity with which this entry was logged.
diff --git a/metropolis/pkg/logtree/logtree.go b/metropolis/pkg/logtree/logtree.go
index a21545f..968a5a9 100644
--- a/metropolis/pkg/logtree/logtree.go
+++ b/metropolis/pkg/logtree/logtree.go
@@ -24,12 +24,13 @@
 	"source.monogon.dev/metropolis/pkg/logbuffer"
 )
 
-// LogTree is a tree-shaped logging system. For more information, see the package-level documentation.
+// LogTree is a tree-shaped logging system. For more information, see the package-
+// level documentation.
 type LogTree struct {
 	// journal is the tree's journal, storing all log data and managing subscribers.
 	journal *journal
-	// root is the root node of the actual tree of the log tree. The nodes contain per-DN configuration options, notably
-	// the current verbosity level of that DN.
+	// root is the root node of the actual tree of the log tree. The nodes contain per-
+	// DN configuration options, notably the current verbosity level of that DN.
 	root *node
 }
 
@@ -41,25 +42,29 @@
 	return lt
 }
 
-// node represents a given DN as a discrete 'logger'. It implements the LeveledLogger interface for log publishing,
-// entries from which it passes over to the logtree's journal.
+// node represents a given DN as a discrete 'logger'. It implements the
+// LeveledLogger interface for log publishing, entries from which it passes over to
+// the logtree's journal.
 type node struct {
 	// dn is the DN which this node represents (or "" if this is the root node).
 	dn DN
 	// tree is the LogTree to which this node belongs.
 	tree *LogTree
-	// verbosity is the current verbosity level of this DN/node, affecting .V(n) LeveledLogger calls
+	// verbosity is the current verbosity level of this DN/node, affecting .V(n)
+	// LeveledLogger calls
 	verbosity     VerbosityLevel
 	rawLineBuffer *logbuffer.LineBuffer
 
 	// mu guards children.
 	mu sync.Mutex
-	// children is a map of DN-part to a children node in the logtree. A DN-part is a string representing a part of the
-	// DN between the deliming dots, as returned by DN.Path.
+	// children is a map of DN-part to a children node in the logtree. A DN-part is a
+	// string representing a part of the DN between the deliming dots, as returned by
+	// DN.Path.
 	children map[string]*node
 }
 
-// newNode returns a node at a given DN in the LogTree - but doesn't set up the LogTree to insert it accordingly.
+// newNode returns a node at a given DN in the LogTree - but doesn't set up the
+// LogTree to insert it accordingly.
 func newNode(tree *LogTree, dn DN) *node {
 	n := &node{
 		dn:       dn,
@@ -72,8 +77,8 @@
 	return n
 }
 
-// nodeByDN returns the LogTree node corresponding to a given DN. If either the node or some of its parents do not
-// exist they will be created as needed.
+// nodeByDN returns the LogTree node corresponding to a given DN. If either the
+// node or some of its parents do not exist they will be created as needed.
 func (l *LogTree) nodeByDN(dn DN) (*node, error) {
 	traversal, err := newTraversal(dn)
 	if err != nil {
@@ -82,22 +87,27 @@
 	return traversal.execute(l.root), nil
 }
 
-// nodeTraversal represents a request to traverse the LogTree in search of a given node by DN.
+// nodeTraversal represents a request to traverse the LogTree in search of a given
+// node by DN.
 type nodeTraversal struct {
 	// want is the DN of the node's that requested to be found.
 	want DN
-	// current is the path already taken to find the node, in the form of DN parts. It starts out as want.Parts() and
-	// progresses to become empty as the traversal continues.
+	// current is the path already taken to find the node, in the form of DN parts. It
+	// starts out as want.Parts() and progresses to become empty as the traversal
+	// continues.
 	current []string
-	// left is the path that's still needed to be taken in order to find the node, in the form of DN parts. It starts
-	// out empty and progresses to become wants.Parts() as the traversal continues.
+	// left is the path that's still needed to be taken in order to find the node, in
+	// the form of DN parts. It starts out empty and progresses to become wants.Parts()
+	// as the traversal continues.
 	left []string
 }
 
-// next adjusts the traversal's current/left slices to the next element of the traversal, returns the part that's now
-// being looked for (or "" if the traveral is done) and the full DN of the element that's being looked for.
+// next adjusts the traversal's current/left slices to the next element of the
+// traversal, returns the part that's now being looked for (or "" if the traveral
+// is done) and the full DN of the element that's being looked for.
 //
-// For example, a traversal of foo.bar.baz will cause .next() to return the following on each invocation:
+// For example, a traversal of foo.bar.baz will cause .next() to return the
+// following on each invocation:
 //  - part: foo, full: foo
 //  - part: bar, full: foo.bar
 //  - part: baz, full: foo.bar.baz
@@ -125,9 +135,10 @@
 	}, nil
 }
 
-// execute the traversal in order to find the node. This can only be called once per traversal.
-// Nodes will be created within the tree until the target node is reached. Existing nodes will be reused.
-// This is effectively an idempotent way of accessing a node in the tree based on a traversal.
+// execute the traversal in order to find the node. This can only be called once
+// per traversal. Nodes will be created within the tree until the target node is
+// reached. Existing nodes will be reused. This is effectively an idempotent way of
+// accessing a node in the tree based on a traversal.
 func (t *nodeTraversal) execute(n *node) *node {
 	cur := n
 	for {
diff --git a/metropolis/pkg/logtree/logtree_access.go b/metropolis/pkg/logtree/logtree_access.go
index fed202e..1babe1e 100644
--- a/metropolis/pkg/logtree/logtree_access.go
+++ b/metropolis/pkg/logtree/logtree_access.go
@@ -31,46 +31,54 @@
 	leveledWithMinimumSeverity Severity
 }
 
-// WithChildren makes Read return/stream data for both a given DN and all its children.
+// WithChildren makes Read return/stream data for both a given DN and all its
+// children.
 func WithChildren() LogReadOption { return LogReadOption{withChildren: true} }
 
-// WithStream makes Read return a stream of data. This works alongside WithBacklog to create a read-and-stream
-// construct.
+// WithStream makes Read return a stream of data. This works alongside WithBacklog
+// to create a read-and-stream construct.
 func WithStream() LogReadOption { return LogReadOption{withStream: true} }
 
-// WithBacklog makes Read return already recorded log entries, up to count elements.
+// WithBacklog makes Read return already recorded log entries, up to count
+// elements.
 func WithBacklog(count int) LogReadOption { return LogReadOption{withBacklog: count} }
 
-// BacklogAllAvailable makes WithBacklog return all backlogged log data that logtree possesses.
+// BacklogAllAvailable makes WithBacklog return all backlogged log data that
+// logtree possesses.
 const BacklogAllAvailable int = -1
 
 func OnlyRaw() LogReadOption { return LogReadOption{onlyRaw: true} }
 
 func OnlyLeveled() LogReadOption { return LogReadOption{onlyLeveled: true} }
 
-// LeveledWithMinimumSeverity makes Read return only log entries that are at least at a given Severity. If only leveled
-// entries are needed, OnlyLeveled must be used. This is a no-op when OnlyRaw is used.
+// LeveledWithMinimumSeverity makes Read return only log entries that are at least
+// at a given Severity. If only leveled entries are needed, OnlyLeveled must be
+// used. This is a no-op when OnlyRaw is used.
 func LeveledWithMinimumSeverity(s Severity) LogReadOption {
 	return LogReadOption{leveledWithMinimumSeverity: s}
 }
 
-// LogReader permits reading an already existing backlog of log entries and to stream further ones.
+// LogReader permits reading an already existing backlog of log entries and to
+// stream further ones.
 type LogReader struct {
-	// Backlog are the log entries already logged by LogTree. This will only be set if WithBacklog has been passed to
-	// Read.
+	// Backlog are the log entries already logged by LogTree. This will only be set if
+	// WithBacklog has been passed to Read.
 	Backlog []*LogEntry
-	// Stream is a channel of new entries as received live by LogTree. This will only be set if WithStream has been
-	// passed to Read. In this case, entries from this channel must be read as fast as possible by the consumer in order
-	// to prevent missing entries.
+	// Stream is a channel of new entries as received live by LogTree. This will only
+	// be set if WithStream has been passed to Read. In this case, entries from this
+	// channel must be read as fast as possible by the consumer in order to prevent
+	// missing entries.
 	Stream <-chan *LogEntry
-	// done is channel used to signal (by closing) that the log consumer is not interested in more Stream data.
+	// done is channel used to signal (by closing) that the log consumer is not
+	// interested in more Stream data.
 	done chan<- struct{}
-	// missed is an atomic integer pointer that tells the subscriber how many messages in Stream they missed. This
-	// pointer is nil if no streaming has been requested.
+	// missed is an atomic integer pointer that tells the subscriber how many messages
+	// in Stream they missed. This pointer is nil if no streaming has been requested.
 	missed *uint64
 }
 
-// Missed returns the amount of entries that were missed from Stream (as the channel was not drained fast enough).
+// Missed returns the amount of entries that were missed from Stream (as the
+// channel was not drained fast enough).
 func (l *LogReader) Missed() uint64 {
 	// No Stream.
 	if l.missed == nil {
@@ -79,8 +87,8 @@
 	return atomic.LoadUint64(l.missed)
 }
 
-// Close closes the LogReader's Stream. This must be called once the Reader does not wish to receive streaming messages
-// anymore.
+// Close closes the LogReader's Stream. This must be called once the Reader does
+// not wish to receive streaming messages anymore.
 func (l *LogReader) Close() {
 	if l.done != nil {
 		close(l.done)
@@ -91,9 +99,11 @@
 	ErrRawAndLeveled = errors.New("cannot return logs that are simultaneously OnlyRaw and OnlyLeveled")
 )
 
-// Read and/or stream entries from a LogTree. The returned LogReader is influenced by the LogReadOptions passed, which
-// influence whether the Read will return existing entries, a stream, or both. In addition the options also dictate
-// whether only entries for that particular DN are returned, or for all sub-DNs as well.
+// Read and/or stream entries from a LogTree. The returned LogReader is influenced
+// by the LogReadOptions passed, which influence whether the Read will return
+// existing entries, a stream, or both. In addition the options also dictate
+// whether only entries for that particular DN are returned, or for all sub-DNs as
+// well.
 func (l *LogTree) Read(dn DN, opts ...LogReadOption) (*LogReader, error) {
 	l.journal.mu.RLock()
 	defer l.journal.mu.RUnlock()
diff --git a/metropolis/pkg/logtree/logtree_entry.go b/metropolis/pkg/logtree/logtree_entry.go
index a1c2d62..442d456 100644
--- a/metropolis/pkg/logtree/logtree_entry.go
+++ b/metropolis/pkg/logtree/logtree_entry.go
@@ -24,8 +24,9 @@
 	apb "source.monogon.dev/metropolis/proto/api"
 )
 
-// LogEntry contains a log entry, combining both leveled and raw logging into a single stream of events. A LogEntry
-// will contain exactly one of either LeveledPayload or RawPayload.
+// LogEntry contains a log entry, combining both leveled and raw logging into a
+// single stream of events. A LogEntry will contain exactly one of either
+// LeveledPayload or RawPayload.
 type LogEntry struct {
 	// If non-nil, this is a leveled logging entry.
 	Leveled *LeveledPayload
@@ -35,10 +36,12 @@
 	DN DN
 }
 
-// String returns a canonical representation of this payload as a single string prefixed with metadata. If the entry is
-// a leveled log entry that originally was logged with newlines this representation will also contain newlines, with
-// each original message part prefixed by the metadata.
-// For an alternative call that will instead return a canonical prefix and a list of lines in the message, see Strings().
+// String returns a canonical representation of this payload as a single string
+// prefixed with metadata. If the entry is a leveled log entry that originally was
+// logged with newlines this representation will also contain newlines, with each
+// original message part prefixed by the metadata. For an alternative call that
+// will instead return a canonical prefix and a list of lines in the message, see
+// Strings().
 func (l *LogEntry) String() string {
 	if l.Leveled != nil {
 		prefix, messages := l.Leveled.Strings()
@@ -54,9 +57,11 @@
 	return "INVALID"
 }
 
-// Strings returns the canonical representation of this payload split into a prefix and all lines that were contained in
-// the original message. This is meant to be displayed to the user by showing the prefix before each line, concatenated
-// together - possibly in a table form with the prefixes all unified with a rowspan-like mechanism.
+// Strings returns the canonical representation of this payload split into a
+// prefix and all lines that were contained in the original message. This is
+// meant to be displayed to the user by showing the prefix before each line,
+// concatenated together - possibly in a table form with the prefixes all
+// unified with a rowspan- like mechanism.
 //
 // For example, this function can return:
 //   prefix = "root.foo.bar                    I1102 17:20:06.921395     0 foo.go:42] "
@@ -68,14 +73,14 @@
 // root.foo.bar                    I1102 17:20:06.921395 foo.go:42]  - two
 //
 // Or, in a table layout:
-// .-------------------------------------------------------------------------------------.
-// | root.foo.bar                    I1102 17:20:06.921395 foo.go:42] : current tags:    |
-// |                                                                  :------------------|
-// |                                                                  :  - one           |
-// |                                                                  :------------------|
-// |                                                                  :  - two           |
-// '-------------------------------------------------------------------------------------'
-//
+// .----------------------------------------------------------------------.
+// | root.foo.bar     I1102 17:20:06.921395 foo.go:42] : current tags:    |
+// |                                                   :------------------|
+// |                                                   :  - one           |
+// |                                                   :------------------|
+// |                                                   :  - two           |
+// '----------------------------------------------------------------------'
+
 func (l *LogEntry) Strings() (prefix string, lines []string) {
 	if l.Leveled != nil {
 		prefix, messages := l.Leveled.Strings()
@@ -88,8 +93,8 @@
 	return "INVALID ", []string{"INVALID"}
 }
 
-// Convert this LogEntry to proto. Returned value may be nil if given LogEntry is invalid, eg. contains neither a Raw
-// nor Leveled entry.
+// Convert this LogEntry to proto. Returned value may be nil if given LogEntry is
+// invalid, eg. contains neither a Raw nor Leveled entry.
 func (l *LogEntry) Proto() *apb.LogEntry {
 	p := &apb.LogEntry{
 		Dn: string(l.DN),
@@ -111,8 +116,8 @@
 	return p
 }
 
-// Parse a proto LogEntry back into internal structure. This can be used in log proto API consumers to easily print
-// received log entries.
+// Parse a proto LogEntry back into internal structure. This can be used in log
+// proto API consumers to easily print received log entries.
 func LogEntryFromProto(l *apb.LogEntry) (*LogEntry, error) {
 	dn := DN(l.Dn)
 	if _, err := dn.Path(); err != nil {
diff --git a/metropolis/pkg/logtree/logtree_publisher.go b/metropolis/pkg/logtree/logtree_publisher.go
index d4d35ff..6106b19 100644
--- a/metropolis/pkg/logtree/logtree_publisher.go
+++ b/metropolis/pkg/logtree/logtree_publisher.go
@@ -26,8 +26,8 @@
 	"source.monogon.dev/metropolis/pkg/logbuffer"
 )
 
-// LeveledFor returns a LeveledLogger publishing interface for a given DN. An error may be returned if the DN is
-// malformed.
+// LeveledFor returns a LeveledLogger publishing interface for a given DN. An error
+// may be returned if the DN is malformed.
 func (l *LogTree) LeveledFor(dn DN) (LeveledLogger, error) {
 	return l.nodeByDN(dn)
 }
@@ -40,7 +40,8 @@
 	return node.rawLineBuffer, nil
 }
 
-// MustLeveledFor returns a LeveledLogger publishing interface for a given DN, or panics if the given DN is invalid.
+// MustLeveledFor returns a LeveledLogger publishing interface for a given DN, or
+// panics if the given DN is invalid.
 func (l *LogTree) MustLeveledFor(dn DN) LeveledLogger {
 	leveled, err := l.LeveledFor(dn)
 	if err != nil {
@@ -57,7 +58,8 @@
 	return raw
 }
 
-// SetVerbosity sets the verbosity for a given DN (non-recursively, ie. for that DN only, not its children).
+// SetVerbosity sets the verbosity for a given DN (non-recursively, ie. for that DN
+// only, not its children).
 func (l *LogTree) SetVerbosity(dn DN, level VerbosityLevel) error {
 	node, err := l.nodeByDN(dn)
 	if err != nil {
@@ -67,8 +69,9 @@
 	return nil
 }
 
-// logRaw is called by this node's LineBuffer any time a raw log line is completed. It will create a new entry, append
-// it to the journal, and notify all pertinent subscribers.
+// logRaw is called by this node's LineBuffer any time a raw log line is completed.
+// It will create a new entry, append it to the journal, and notify all pertinent
+// subscribers.
 func (n *node) logRaw(line *logbuffer.Line) {
 	e := &entry{
 		origin: n.dn,
@@ -78,8 +81,9 @@
 	n.tree.journal.notify(e)
 }
 
-// log builds a LeveledPayload and entry for a given message, including all related metadata. It will create a new
-// entry append it to the journal, and notify all pertinent subscribers.
+// log builds a LeveledPayload and entry for a given message, including all related
+// metadata. It will create a new entry append it to the journal, and notify all
+// pertinent subscribers.
 func (n *node) logLeveled(depth int, severity Severity, msg string) {
 	_, file, line, ok := runtime.Caller(2 + depth)
 	if !ok {
@@ -158,9 +162,10 @@
 	}
 }
 
-// verbose implements the VerboseLeveledLogger interface. It is a thin wrapper around node, with an 'enabled' bool. This
-// means that V(n)-returned VerboseLeveledLoggers must be short lived, as a changed in verbosity will not affect all
-// already existing VerboseLeveledLoggers.
+// verbose implements the VerboseLeveledLogger interface. It is a thin wrapper
+// around node, with an 'enabled' bool. This means that V(n)-returned
+// VerboseLeveledLoggers must be short lived, as a changed in verbosity will not
+// affect all already existing VerboseLeveledLoggers.
 type verbose struct {
 	node    *node
 	enabled bool
diff --git a/metropolis/pkg/loop/loop.go b/metropolis/pkg/loop/loop.go
index 64b533b..c338f04 100644
--- a/metropolis/pkg/loop/loop.go
+++ b/metropolis/pkg/loop/loop.go
@@ -16,10 +16,13 @@
 
 // Package loop implements an interface to configure Linux loop devices.
 //
-// This package requires Linux 5.8 or higher because it uses the newer LOOP_CONFIGURE ioctl, which is better-behaved
-// and twice as fast as the old approach. It doesn't support all of the cryptloop functionality as it has been
-// superseded by dm-crypt and has known vulnerabilities. It also doesn't support on-the-fly reconfiguration of loop
-// devices as this is rather unusual, works only under very specific circumstances and would make the API less clean.
+// This package requires Linux 5.8 or higher because it uses the newer
+// LOOP_CONFIGURE ioctl, which is better-behaved and twice as fast as the old
+// approach. It doesn't support all of the cryptloop functionality as it has
+// been superseded by dm-crypt and has known vulnerabilities. It also doesn't
+// support on-the-fly reconfiguration of loop devices as this is rather
+// unusual, works only under very specific circumstances and would make the API
+// less clean.
 package loop
 
 import (
@@ -35,7 +38,8 @@
 	"golang.org/x/sys/unix"
 )
 
-// Lazily-initialized file descriptor for the control device /dev/loop-control (singleton)
+// Lazily-initialized file descriptor for the control device /dev/loop-control
+// (singleton)
 var (
 	mutex         sync.Mutex
 	loopControlFd *os.File
@@ -50,8 +54,10 @@
 
 // struct loop_config from @linux//include/uapi/linux:loop.h
 type loopConfig struct {
-	fd        uint32
-	blockSize uint32 // Power of 2 between 512 and os.Getpagesize(), defaults reasonably
+	fd uint32
+	// blockSize is a power of 2 between 512 and os.Getpagesize(), defaults
+	// reasonably
+	blockSize uint32
 	info      loopInfo64
 	_reserved [64]byte
 }
@@ -74,14 +80,16 @@
 }
 
 type Config struct {
-	// Block size of the loop device in bytes. Power of 2 between 512 and page size.
-	// Zero defaults to an reasonable block size.
+	// Block size of the loop device in bytes. Power of 2 between 512 and page
+	// size.  Zero defaults to an reasonable block size.
 	BlockSize uint32
 	// Combination of flags from the Flag constants in this package.
 	Flags uint32
-	// Offset in bytes from the start of the file to the first byte of the device. Usually zero.
+	// Offset in bytes from the start of the file to the first byte of the
+	// device. Usually zero.
 	Offset uint64
-	// Maximum size of the loop device in bytes. Zero defaults to the whole file.
+	// Maximum size of the loop device in bytes. Zero defaults to the whole
+	// file.
 	SizeLimit uint64
 }
 
@@ -118,12 +126,14 @@
 const (
 	// Makes the loop device read-only even if the backing file is read-write.
 	FlagReadOnly = 1
-	// Unbinds the backing file as soon as the last user is gone. Useful for unbinding after unmount.
+	// Unbinds the backing file as soon as the last user is gone. Useful for
+	// unbinding after unmount.
 	FlagAutoclear = 4
-	// Enables kernel-side partition scanning on the loop device. Needed if you want to access specific partitions on
-	// a loop device.
+	// Enables kernel-side partition scanning on the loop device. Needed if you
+	// want to access specific partitions on a loop device.
 	FlagPartscan = 8
-	// Enables direct IO for the loop device, bypassing caches and buffer copying.
+	// Enables direct IO for the loop device, bypassing caches and buffer
+	// copying.
 	FlagDirectIO = 16
 )
 
@@ -169,7 +179,8 @@
 	}
 }
 
-// Open opens a loop device at the given path. It returns an error if the path is not a loop device.
+// Open opens a loop device at the given path. It returns an error if the path
+// is not a loop device.
 func Open(path string) (*Device, error) {
 	potentialDevice, err := os.Open(path)
 	if err != nil {
@@ -219,7 +230,8 @@
 	return string(backingFile), err
 }
 
-// RefreshSize recalculates the size of the loop device based on the config and the size of the backing file.
+// RefreshSize recalculates the size of the loop device based on the config and
+// the size of the backing file.
 func (d *Device) RefreshSize() error {
 	if err := d.ensureOpen(); err != nil {
 		return err
@@ -227,7 +239,8 @@
 	return unix.IoctlSetInt(int(d.dev.Fd()), unix.LOOP_SET_CAPACITY, 0)
 }
 
-// Close closes all file descriptors open to the device. Does not remove the device itself or alter its configuration.
+// Close closes all file descriptors open to the device. Does not remove the
+// device itself or alter its configuration.
 func (d *Device) Close() error {
 	if err := d.ensureOpen(); err != nil {
 		return err
diff --git a/metropolis/pkg/loop/loop_test.go b/metropolis/pkg/loop/loop_test.go
index 1ddb34f..16ead64 100644
--- a/metropolis/pkg/loop/loop_test.go
+++ b/metropolis/pkg/loop/loop_test.go
@@ -32,8 +32,9 @@
 	"golang.org/x/sys/unix"
 )
 
-// Write a test file with a very specific pattern (increasing little-endian 16 bit unsigned integers) to detect offset
-// correctness. File is always 128KiB large (2^16 * 2 bytes).
+// Write a test file with a very specific pattern (increasing little-endian 16
+// bit unsigned integers) to detect offset correctness. File is always 128KiB
+// large (2^16 * 2 bytes).
 func makeTestFile() *os.File {
 	f, err := ioutil.TempFile("/tmp", "")
 	if err != nil {
@@ -100,8 +101,9 @@
 
 	backingFile, err := dev.BackingFilePath()
 	assert.NoError(t, err)
-	// The filename of the temporary file is not available in this context, but we know that the file
-	// needs to be in /tmp, which should be a good-enough test.
+	// The filename of the temporary file is not available in this context, but
+	// we know that the file needs to be in /tmp, which should be a good-enough
+	// test.
 	assert.Contains(t, backingFile, "/tmp/")
 }
 
diff --git a/metropolis/pkg/pki/ca.go b/metropolis/pkg/pki/ca.go
index bbed085..5ab1089 100644
--- a/metropolis/pkg/pki/ca.go
+++ b/metropolis/pkg/pki/ca.go
@@ -33,16 +33,19 @@
 // certificates, and any other Certificate that has been created with CA(),
 // which makes this Certificate act as a CA and issue (sign) ceritficates.
 type Issuer interface {
-	// CACertificate returns the DER-encoded x509 certificate of the CA that will sign certificates when Issue is
-	// called, or nil if this is self-signing issuer.
+	// CACertificate returns the DER-encoded x509 certificate of the CA that
+	// will sign certificates when Issue is called, or nil if this is
+	// self-signing issuer.
 	CACertificate(ctx context.Context, kv clientv3.KV) ([]byte, error)
-	// Issue will generate a key and certificate signed by the Issuer. The returned certificate is x509 DER-encoded,
-	// while the key is a bare ed25519 key.
+	// Issue will generate a key and certificate signed by the Issuer. The
+	// returned certificate is x509 DER-encoded, while the key is a bare
+	// ed25519 key.
 	Issue(ctx context.Context, req *Certificate, kv clientv3.KV) (cert, key []byte, err error)
 }
 
-// issueCertificate is a generic low level certificate-and-key issuance function. If ca or cakey is null, the
-// certificate will be self-signed. The returned certificate is DER-encoded, while the returned key is internal.
+// issueCertificate is a generic low level certificate-and-key issuance
+// function. If ca or cakey is null, the certificate will be self-signed. The
+// returned certificate is DER-encoded, while the returned key is internal.
 func issueCertificate(req *Certificate, ca *x509.Certificate, caKey interface{}) (cert, key []byte, err error) {
 	var privKey ed25519.PrivateKey
 	var pubKey ed25519.PublicKey
@@ -75,7 +78,8 @@
 	req.template.BasicConstraintsValid = true
 	req.template.SubjectKeyId = skid
 
-	// Set the AuthorityKeyID to the SKID of the signing certificate (or self, if self-signing).
+	// Set the AuthorityKeyID to the SKID of the signing certificate (or self,
+	// if self-signing).
 	if ca != nil && caKey != nil {
 		req.template.AuthorityKeyId = ca.AuthorityKeyId
 	} else {
diff --git a/metropolis/pkg/pki/certificate.go b/metropolis/pkg/pki/certificate.go
index ff60f73..c0a1f53 100644
--- a/metropolis/pkg/pki/certificate.go
+++ b/metropolis/pkg/pki/certificate.go
@@ -37,7 +37,8 @@
 	prefix string
 }
 
-// Namespaced creates a namespace for storing certificate data in etcd at a given 'path' prefix.
+// Namespaced creates a namespace for storing certificate data in etcd at a
+// given 'path' prefix.
 func Namespaced(prefix string) Namespace {
 	return Namespace{
 		prefix: prefix,
@@ -91,7 +92,7 @@
 
 // Client makes a Kubernetes PKI-compatible client certificate template.
 // Directly derived from Kubernetes PKI requirements documented at
-// https://kubernetes.io/docs/setup/best-practices/certificates/#configure-certificates-manually
+//   https://kubernetes.io/docs/setup/best-practices/certificates/#configure-certificates-manually
 func Client(identity string, groups []string) x509.Certificate {
 	return x509.Certificate{
 		Subject: pkix.Name{
diff --git a/metropolis/pkg/pki/x509.go b/metropolis/pkg/pki/x509.go
index d2affe8..e198902 100644
--- a/metropolis/pkg/pki/x509.go
+++ b/metropolis/pkg/pki/x509.go
@@ -30,12 +30,12 @@
 	unknownNotAfter = time.Unix(253402300799, 0)
 )
 
-// Workaround for https://github.com/golang/go/issues/26676 in Go's crypto/x509. Specifically Go
-// violates Section 4.2.1.2 of RFC 5280 without this.
-// Fixed for 1.15 in https://go-review.googlesource.com/c/go/+/227098/.
+// Workaround for https://github.com/golang/go/issues/26676 in Go's
+// crypto/x509. Specifically Go violates Section 4.2.1.2 of RFC 5280 without
+// this. Fixed for 1.15 in https://go-review.googlesource.com/c/go/+/227098/.
 //
-// Taken from https://github.com/FiloSottile/mkcert/blob/master/cert.go#L295 written by one of Go's
-// crypto engineers
+// Taken from https://github.com/FiloSottile/mkcert/blob/master/cert.go#L295
+// Written by one of Go's crypto engineers
 //
 // TODO(lorenz): remove this once we migrate to Go 1.15.
 func calculateSKID(pubKey crypto.PublicKey) ([]byte, error) {
diff --git a/metropolis/pkg/supervisor/supervisor.go b/metropolis/pkg/supervisor/supervisor.go
index c37b590..f26732d 100644
--- a/metropolis/pkg/supervisor/supervisor.go
+++ b/metropolis/pkg/supervisor/supervisor.go
@@ -16,9 +16,10 @@
 
 package supervisor
 
-// The service supervision library allows for writing of reliable, service-style software within a Metropolis node.
-// It builds upon the Erlang/OTP supervision tree system, adapted to be more Go-ish.
-// For detailed design see go/supervision.
+// The service supervision library allows for writing of reliable,
+// service-style software within a Metropolis node.  It builds upon the
+// Erlang/OTP supervision tree system, adapted to be more Go-ish.  For detailed
+// design see go/supervision.
 
 import (
 	"context"
@@ -28,17 +29,21 @@
 	"source.monogon.dev/metropolis/pkg/logtree"
 )
 
-// A Runnable is a function that will be run in a goroutine, and supervised throughout its lifetime. It can in turn
-// start more runnables as its children, and those will form part of a supervision tree.
-// The context passed to a runnable is very important and needs to be handled properly. It will be live (non-errored) as
-// long as the runnable should be running, and canceled (ctx.Err() will be non-nil) when the supervisor wants it to
-// exit. This means this context is also perfectly usable for performing any blocking operations.
+// A Runnable is a function that will be run in a goroutine, and supervised
+// throughout its lifetime. It can in turn start more runnables as its
+// children, and those will form part of a supervision tree.
+// The context passed to a runnable is very important and needs to be handled
+// properly. It will be live (non-errored) as long as the runnable should be
+// running, and canceled (ctx.Err() will be non-nil) when the supervisor wants
+// it to exit. This means this context is also perfectly usable for performing
+// any blocking operations.
 type Runnable func(ctx context.Context) error
 
-// RunGroup starts a set of runnables as a group. These runnables will run together, and if any one of them quits
-// unexpectedly, the result will be canceled and restarted.
-// The context here must be an existing Runnable context, and the spawned runnables will run under the node that this
-// context represents.
+// RunGroup starts a set of runnables as a group. These runnables will run
+// together, and if any one of them quits unexpectedly, the result will be
+// canceled and restarted.
+// The context here must be an existing Runnable context, and the spawned
+// runnables will run under the node that this context represents.
 func RunGroup(ctx context.Context, runnables map[string]Runnable) error {
 	node, unlock := fromContext(ctx)
 	defer unlock()
@@ -52,8 +57,9 @@
 	})
 }
 
-// Signal tells the supervisor that the calling runnable has reached a certain state of its lifecycle. All runnables
-// should SignalHealthy when they are ready with set up, running other child runnables and are now 'serving'.
+// Signal tells the supervisor that the calling runnable has reached a certain
+// state of its lifecycle. All runnables should SignalHealthy when they are
+// ready with set up, running other child runnables and are now 'serving'.
 func Signal(ctx context.Context, signal SignalType) {
 	node, unlock := fromContext(ctx)
 	defer unlock()
@@ -63,28 +69,34 @@
 type SignalType int
 
 const (
-	// The runnable is healthy, done with setup, done with spawning more Runnables, and ready to serve in a loop.
-	// The runnable needs to check the parent context and ensure that if that context is done, the runnable exits.
+	// The runnable is healthy, done with setup, done with spawning more
+	// Runnables, and ready to serve in a loop.  The runnable needs to check
+	// the parent context and ensure that if that context is done, the runnable
+	// exits.
 	SignalHealthy SignalType = iota
-	// The runnable is done - it does not need to run any loop. This is useful for Runnables that only set up other
-	// child runnables. This runnable will be restarted if a related failure happens somewhere in the supervision tree.
+	// The runnable is done - it does not need to run any loop. This is useful
+	// for Runnables that only set up other child runnables. This runnable will
+	// be restarted if a related failure happens somewhere in the supervision
+	// tree.
 	SignalDone
 )
 
-// supervisor represents and instance of the supervision system. It keeps track of a supervision tree and a request
-// channel to its internal processor goroutine.
+// supervisor represents and instance of the supervision system. It keeps track
+// of a supervision tree and a request channel to its internal processor
+// goroutine.
 type supervisor struct {
 	// mu guards the entire state of the supervisor.
 	mu sync.RWMutex
-	// root is the root node of the supervision tree, named 'root'. It represents the Runnable started with the
-	// supervisor.New call.
+	// root is the root node of the supervision tree, named 'root'. It
+	// represents the Runnable started with the supervisor.New call.
 	root *node
 	// logtree is the main logtree exposed to runnables and used internally.
 	logtree *logtree.LogTree
 	// ilogger is the internal logger logging to "supervisor" in the logtree.
 	ilogger logtree.LeveledLogger
 
-	// pReq is an interface channel to the lifecycle processor of the supervisor.
+	// pReq is an interface channel to the lifecycle processor of the
+	// supervisor.
 	pReq chan *processorRequest
 
 	// propagate panics, ie. don't catch them.
@@ -95,8 +107,9 @@
 type SupervisorOpt func(s *supervisor)
 
 var (
-	// WithPropagatePanic prevents the Supervisor from catching panics in runnables and treating them as failures.
-	// This is useful to enable for testing and local debugging.
+	// WithPropagatePanic prevents the Supervisor from catching panics in
+	// runnables and treating them as failures. This is useful to enable for
+	// testing and local debugging.
 	WithPropagatePanic = func(s *supervisor) {
 		s.propagatePanic = true
 	}
diff --git a/metropolis/pkg/supervisor/supervisor_node.go b/metropolis/pkg/supervisor/supervisor_node.go
index a7caf82..a3bf5e4 100644
--- a/metropolis/pkg/supervisor/supervisor_node.go
+++ b/metropolis/pkg/supervisor/supervisor_node.go
@@ -25,23 +25,28 @@
 	"github.com/cenkalti/backoff/v4"
 )
 
-// node is a supervision tree node. It represents the state of a Runnable within this tree, its relation to other tree
-// elements, and contains supporting data needed to actually supervise it.
+// node is a supervision tree node. It represents the state of a Runnable
+// within this tree, its relation to other tree elements, and contains
+// supporting data needed to actually supervise it.
 type node struct {
-	// The name of this node. Opaque string. It's used to make up the 'dn' (distinguished name) of a node within
-	// the tree. When starting a runnable inside a tree, this is where that name gets used.
+	// The name of this node. Opaque string. It's used to make up the 'dn'
+	// (distinguished name) of a node within the tree. When starting a runnable
+	// inside a tree, this is where that name gets used.
 	name     string
 	runnable Runnable
 
 	// The supervisor managing this tree.
 	sup *supervisor
-	// The parent, within the tree, of this node. If this is the root node of the tree, this is nil.
+	// The parent, within the tree, of this node. If this is the root node of
+	// the tree, this is nil.
 	parent *node
-	// Children of this tree. This is represented by a map keyed from child node names, for easy access.
+	// Children of this tree. This is represented by a map keyed from child
+	// node names, for easy access.
 	children map[string]*node
-	// Supervision groups. Each group is a set of names of children. Sets, and as such groups, don't overlap between
-	// each other. A supervision group indicates that if any child within that group fails, all others should be
-	// canceled and restarted together.
+	// Supervision groups. Each group is a set of names of children. Sets, and
+	// as such groups, don't overlap between each other. A supervision group
+	// indicates that if any child within that group fails, all others should
+	// be canceled and restarted together.
 	groups []map[string]bool
 
 	// The current state of the runnable in this node.
@@ -55,19 +60,21 @@
 	ctxC context.CancelFunc
 }
 
-// nodeState is the state of a runnable within a node, and in a way the node itself.
-// This follows the state diagram from go/supervision.
+// nodeState is the state of a runnable within a node, and in a way the node
+// itself. This follows the state diagram from go/supervision.
 type nodeState int
 
 const (
-	// A node that has just been created, and whose runnable has been started already but hasn't signaled anything yet.
+	// A node that has just been created, and whose runnable has been started
+	// already but hasn't signaled anything yet.
 	nodeStateNew nodeState = iota
-	// A node whose runnable has signaled being healthy - this means it's ready to serve/act.
+	// A node whose runnable has signaled being healthy - this means it's ready
+	// to serve/act.
 	nodeStateHealthy
 	// A node that has unexpectedly returned or panicked.
 	nodeStateDead
-	// A node that has declared that its done with its work and should not be restarted, unless a supervision tree
-	// failure requires that.
+	// A node that has declared that its done with its work and should not be
+	// restarted, unless a supervision tree failure requires that.
 	nodeStateDone
 	// A node that has returned after being requested to cancel.
 	nodeStateCanceled
@@ -101,8 +108,9 @@
 	dnKey         = contextKey("dn")
 )
 
-// fromContext retrieves a tree node from a runnable context. It takes a lock on the tree and returns an unlock
-// function. This unlock function needs to be called once mutations on the tree/supervisor/node are done.
+// fromContext retrieves a tree node from a runnable context. It takes a lock
+// on the tree and returns an unlock function. This unlock function needs to be
+// called once mutations on the tree/supervisor/node are done.
 func fromContext(ctx context.Context) (*node, func()) {
 	sup, ok := ctx.Value(supervisorKey).(*supervisor)
 	if !ok {
@@ -120,12 +128,13 @@
 	return sup.nodeByDN(dnParent), sup.mu.Unlock
 }
 
-// All the following 'internal' supervisor functions must only be called with the supervisor lock taken. Getting a lock
-// via fromContext is enough.
+// All the following 'internal' supervisor functions must only be called with
+// the supervisor lock taken. Getting a lock via fromContext is enough.
 
-// dn returns the distinguished name of a node. This distinguished name is a period-separated, inverse-DNS-like name.
-// For instance, the runnable 'foo' within the runnable 'bar' will be called 'root.bar.foo'. The root of the tree is
-// always named, and has the dn, 'root'.
+// dn returns the distinguished name of a node. This distinguished name is a
+// period-separated, inverse-DNS-like name.  For instance, the runnable 'foo'
+// within the runnable 'bar' will be called 'root.bar.foo'. The root of the
+// tree is always named, and has the dn, 'root'.
 func (n *node) dn() string {
 	if n.parent != nil {
 		return fmt.Sprintf("%s.%s", n.parent.dn(), n.name)
@@ -133,8 +142,9 @@
 	return n.name
 }
 
-// groupSiblings is a helper function to get all runnable group siblings of a given runnable name within this node.
-// All children are always in a group, even if that group is unary.
+// groupSiblings is a helper function to get all runnable group siblings of a
+// given runnable name within this node.  All children are always in a group,
+// even if that group is unary.
 func (n *node) groupSiblings(name string) map[string]bool {
 	for _, m := range n.groups {
 		if _, ok := m[name]; ok {
@@ -144,11 +154,12 @@
 	return nil
 }
 
-// newNode creates a new node with a given parent. It does not register it with the parent (as that depends on group
-// placement).
+// newNode creates a new node with a given parent. It does not register it with
+// the parent (as that depends on group placement).
 func newNode(name string, runnable Runnable, sup *supervisor, parent *node) *node {
-	// We use exponential backoff for failed runnables, but at some point we cap at a given backoff time.
-	// To achieve this, we set MaxElapsedTime to 0, which will cap the backoff at MaxInterval.
+	// We use exponential backoff for failed runnables, but at some point we
+	// cap at a given backoff time. To achieve this, we set MaxElapsedTime to
+	// 0, which will cap the backoff at MaxInterval.
 	bo := backoff.NewExponentialBackOff()
 	bo.MaxElapsedTime = 0
 
@@ -165,11 +176,12 @@
 	return n
 }
 
-// resetNode sets up all the dynamic fields of the node, in preparation of starting a runnable. It clears the node's
-// children, groups and resets its context.
+// resetNode sets up all the dynamic fields of the node, in preparation of
+// starting a runnable. It clears the node's children, groups and resets its
+// context.
 func (n *node) reset() {
-	// Make new context. First, acquire parent context. For the root node that's Background, otherwise it's the
-	// parent's context.
+	// Make new context. First, acquire parent context. For the root node
+	// that's Background, otherwise it's the parent's context.
 	var pCtx context.Context
 	if n.parent == nil {
 		pCtx = context.Background()
@@ -263,7 +275,8 @@
 	return nil
 }
 
-// signal sequences state changes by signals received from runnables and updates a node's status accordingly.
+// signal sequences state changes by signals received from runnables and
+// updates a node's status accordingly.
 func (n *node) signal(signal SignalType) {
 	switch signal {
 	case SignalHealthy:
diff --git a/metropolis/pkg/supervisor/supervisor_processor.go b/metropolis/pkg/supervisor/supervisor_processor.go
index 965a667..5fa759e 100644
--- a/metropolis/pkg/supervisor/supervisor_processor.go
+++ b/metropolis/pkg/supervisor/supervisor_processor.go
@@ -24,11 +24,13 @@
 	"time"
 )
 
-// The processor maintains runnable goroutines - ie., when requested will start one, and then once it exists it will
-// record the result and act accordingly. It is also responsible for detecting and acting upon supervision subtrees that
-// need to be restarted after death (via a 'GC' process)
+// The processor maintains runnable goroutines - ie., when requested will start
+// one, and then once it exists it will record the result and act accordingly.
+// It is also responsible for detecting and acting upon supervision subtrees
+// that need to be restarted after death (via a 'GC' process)
 
-// processorRequest is a request for the processor. Only one of the fields can be set.
+// processorRequest is a request for the processor. Only one of the fields can
+// be set.
 type processorRequest struct {
 	schedule    *processorRequestSchedule
 	died        *processorRequestDied
@@ -40,7 +42,8 @@
 	dn string
 }
 
-// processorRequestDied is a signal from a runnable goroutine that the runnable has died.
+// processorRequestDied is a signal from a runnable goroutine that the runnable
+// has died.
 type processorRequestDied struct {
 	dn  string
 	err error
@@ -57,8 +60,10 @@
 	// Waiters waiting for the GC to be settled.
 	var waiters []chan struct{}
 
-	// The GC will run every millisecond if needed. Any time the processor requests a change in the supervision tree
-	// (ie a death or a new runnable) it will mark the state as dirty and run the GC on the next millisecond cycle.
+	// The GC will run every millisecond if needed. Any time the processor
+	// requests a change in the supervision tree (ie a death or a new runnable)
+	// it will mark the state as dirty and run the GC on the next millisecond
+	// cycle.
 	gc := time.NewTicker(1 * time.Millisecond)
 	defer gc.Stop()
 	clean := true
@@ -85,7 +90,8 @@
 			clean = true
 			cleanCycles += 1
 
-			// This threshold is somewhat arbitrary. It's a balance between test speed and test reliability.
+			// This threshold is somewhat arbitrary. It's a balance between
+			// test speed and test reliability.
 			if cleanCycles > 50 {
 				for _, w := range waiters {
 					close(w)
@@ -109,8 +115,9 @@
 	}
 }
 
-// processKill cancels all nodes in the supervision tree. This is only called right before exiting the processor, so
-// they do not get automatically restarted.
+// processKill cancels all nodes in the supervision tree. This is only called
+// right before exiting the processor, so they do not get automatically
+// restarted.
 func (s *supervisor) processKill() {
 	s.mu.Lock()
 	defer s.mu.Unlock()
@@ -138,7 +145,8 @@
 	}
 }
 
-// processSchedule starts a node's runnable in a goroutine and records its output once it's done.
+// processSchedule starts a node's runnable in a goroutine and records its
+// output once it's done.
 func (s *supervisor) processSchedule(r *processorRequestSchedule) {
 	s.mu.Lock()
 	defer s.mu.Unlock()
@@ -169,8 +177,9 @@
 	}()
 }
 
-// processDied records the result from a runnable goroutine, and updates its node state accordingly. If the result
-// is a death and not an expected exit, related nodes (ie. children and group siblings) are canceled accordingly.
+// processDied records the result from a runnable goroutine, and updates its
+// node state accordingly. If the result is a death and not an expected exit,
+// related nodes (ie. children and group siblings) are canceled accordingly.
 func (s *supervisor) processDied(r *processorRequestDied) {
 	s.mu.Lock()
 	defer s.mu.Unlock()
@@ -195,14 +204,16 @@
 		break
 	}
 
-	// Simple case: the context was canceled and the returned error is the context error.
+	// Simple case: the context was canceled and the returned error is the
+	// context error.
 	if err := ctx.Err(); err != nil && perr == err {
 		// Mark the node as canceled successfully.
 		n.state = nodeStateCanceled
 		return
 	}
 
-	// Otherwise, the Runnable should not have died or quit. Handle accordingly.
+	// Otherwise, the Runnable should not have died or quit. Handle
+	// accordingly.
 	err := r.err
 	// A lack of returned error is also an error.
 	if err == nil {
@@ -225,27 +236,33 @@
 				continue
 			}
 			sibling := n.parent.children[name]
-			// TODO(q3k): does this need to run in a goroutine, ie. can a context cancel block?
+			// TODO(q3k): does this need to run in a goroutine, ie. can a
+			// context cancel block?
 			sibling.ctxC()
 		}
 	}
 }
 
-// processGC runs the GC process. It's not really Garbage Collection, as in, it doesn't remove unnecessary tree nodes -
-// but it does find nodes that need to be restarted, find the subset that can and then schedules them for running.
-// As such, it's less of a Garbage Collector and more of a Necromancer. However, GC is a friendlier name.
+// processGC runs the GC process. It's not really Garbage Collection, as in, it
+// doesn't remove unnecessary tree nodes - but it does find nodes that need to
+// be restarted, find the subset that can and then schedules them for running.
+// As such, it's less of a Garbage Collector and more of a Necromancer.
+// However, GC is a friendlier name.
 func (s *supervisor) processGC() {
 	s.mu.Lock()
 	defer s.mu.Unlock()
 
-	// The 'GC' serves is the main business logic of the supervision tree. It traverses a locked tree and tries to
-	// find subtrees that must be restarted (because of a DEAD/CANCELED runnable). It then finds which of these
-	// subtrees that should be restarted can be restarted, ie. which ones are fully recursively DEAD/CANCELED. It
-	// also finds the smallest set of largest subtrees that can be restarted, ie. if there's multiple DEAD runnables
-	// that can be restarted at once, it will do so.
+	// The 'GC' serves is the main business logic of the supervision tree. It
+	// traverses a locked tree and tries to find subtrees that must be
+	// restarted (because of a DEAD/CANCELED runnable). It then finds which of
+	// these subtrees that should be restarted can be restarted, ie. which ones
+	// are fully recursively DEAD/CANCELED. It also finds the smallest set of
+	// largest subtrees that can be restarted, ie. if there's multiple DEAD
+	// runnables that can be restarted at once, it will do so.
 
 	// Phase one: Find all leaves.
-	// This is a simple DFS that finds all the leaves of the tree, ie all nodes that do not have children nodes.
+	// This is a simple DFS that finds all the leaves of the tree, ie all nodes
+	// that do not have children nodes.
 	leaves := make(map[string]bool)
 	queue := []*node{s.root}
 	for {
@@ -264,14 +281,17 @@
 		}
 	}
 
-	// Phase two: traverse tree from node to root and make note of all subtrees that can be restarted.
-	// A subtree is restartable/ready iff every node in that subtree is either CANCELED, DEAD or DONE.
-	// Such a 'ready' subtree can be restarted by the supervisor if needed.
+	// Phase two: traverse tree from node to root and make note of all subtrees
+	// that can be restarted.
+	// A subtree is restartable/ready iff every node in that subtree is either
+	// CANCELED, DEAD or DONE.  Such a 'ready' subtree can be restarted by the
+	// supervisor if needed.
 
 	// DNs that we already visited.
 	visited := make(map[string]bool)
 	// DNs whose subtrees are ready to be restarted.
-	// These are all subtrees recursively - ie., root.a.a and root.a will both be marked here.
+	// These are all subtrees recursively - ie., root.a.a and root.a will both
+	// be marked here.
 	ready := make(map[string]bool)
 
 	// We build a queue of nodes to visit, starting from the leaves.
@@ -299,17 +319,20 @@
 			}
 		}
 
-		// If no decision about children is available, it means we ended up in this subtree through some shorter path
-		// of a shorter/lower-order leaf. There is a path to a leaf that's longer than the one that caused this node
-		// to be enqueued. Easy solution: just push back the current element and retry later.
+		// If no decision about children is available, it means we ended up in
+		// this subtree through some shorter path of a shorter/lower-order
+		// leaf. There is a path to a leaf that's longer than the one that
+		// caused this node to be enqueued. Easy solution: just push back the
+		// current element and retry later.
 		if !allVisited {
 			// Push back to queue and wait for a decision later.
 			queue = append(queue, cur)
 			continue
 		}
 
-		// All children have been visited and we have an idea about whether they're ready/restartable. All of the node's
-		// children must be restartable in order for this node to be restartable.
+		// All children have been visited and we have an idea about whether
+		// they're ready/restartable. All of the node's children must be
+		// restartable in order for this node to be restartable.
 		childrenReady := true
 		for _, c := range cur.children {
 			if !ready[c.dn()] {
@@ -318,7 +341,8 @@
 			}
 		}
 
-		// In addition to children, the node itself must be restartable (ie. DONE, DEAD or CANCELED).
+		// In addition to children, the node itself must be restartable (ie.
+		// DONE, DEAD or CANCELED).
 		curReady := false
 		switch cur.state {
 		case nodeStateDone:
@@ -329,7 +353,8 @@
 			curReady = true
 		}
 
-		// Note down that we have an opinion on this node, and note that opinion down.
+		// Note down that we have an opinion on this node, and note that
+		// opinion down.
 		visited[curDn] = true
 		ready[curDn] = childrenReady && curReady
 
@@ -339,15 +364,17 @@
 		}
 	}
 
-	// Phase 3: traverse tree from root to find largest subtrees that need to be restarted and are ready to be
-	// restarted.
+	// Phase 3: traverse tree from root to find largest subtrees that need to
+	// be restarted and are ready to be restarted.
 
 	// All DNs that need to be restarted by the GC process.
 	want := make(map[string]bool)
-	// All DNs that need to be restarted and can be restarted by the GC process - a subset of 'want' DNs.
+	// All DNs that need to be restarted and can be restarted by the GC process
+	// - a subset of 'want' DNs.
 	can := make(map[string]bool)
-	// The set difference between 'want' and 'can' are all nodes that should be restarted but can't yet (ie. because
-	// a child is still in the process of being canceled).
+	// The set difference between 'want' and 'can' are all nodes that should be
+	// restarted but can't yet (ie. because a child is still in the process of
+	// being canceled).
 
 	// DFS from root.
 	queue = []*node{s.root}
@@ -366,14 +393,16 @@
 
 		// If it should be restarted and is ready to be restarted...
 		if want[cur.dn()] && ready[cur.dn()] {
-			// And its parent context is valid (ie hasn't been canceled), mark it as restartable.
+			// And its parent context is valid (ie hasn't been canceled), mark
+			// it as restartable.
 			if cur.parent == nil || cur.parent.ctx.Err() == nil {
 				can[cur.dn()] = true
 				continue
 			}
 		}
 
-		// Otherwise, traverse further down the tree to see if something else needs to be done.
+		// Otherwise, traverse further down the tree to see if something else
+		// needs to be done.
 		for _, c := range cur.children {
 			queue = append(queue, c)
 		}
@@ -383,13 +412,15 @@
 	for dn, _ := range can {
 		n := s.nodeByDN(dn)
 
-		// Only back off when the node unexpectedly died - not when it got canceled.
+		// Only back off when the node unexpectedly died - not when it got
+		// canceled.
 		bo := time.Duration(0)
 		if n.state == nodeStateDead {
 			bo = n.bo.NextBackOff()
 		}
 
-		// Prepare node for rescheduling - remove its children, reset its state to new.
+		// Prepare node for rescheduling - remove its children, reset its state
+		// to new.
 		n.reset()
 		s.ilogger.Infof("rescheduling supervised node %s with backoff %s", dn, bo.String())
 
diff --git a/metropolis/pkg/supervisor/supervisor_support.go b/metropolis/pkg/supervisor/supervisor_support.go
index c2b569c..5f69104 100644
--- a/metropolis/pkg/supervisor/supervisor_support.go
+++ b/metropolis/pkg/supervisor/supervisor_support.go
@@ -16,21 +16,26 @@
 
 package supervisor
 
-// Supporting infrastructure to allow running some non-Go payloads under supervision.
+// Supporting infrastructure to allow running some non-Go payloads under
+// supervision.
 
 import (
 	"context"
 	"net"
 	"os/exec"
+
 	"source.monogon.dev/metropolis/pkg/logtree"
 
 	"google.golang.org/grpc"
 )
 
-// GRPCServer creates a Runnable that serves gRPC requests as longs as it's not canceled.
-// If graceful is set to true, the server will be gracefully stopped instead of plain stopped. This means all pending
-// RPCs will finish, but also requires streaming gRPC handlers to check their context liveliness and exit accordingly.
-// If the server code does not support this, `graceful` should be false and the server will be killed violently instead.
+// GRPCServer creates a Runnable that serves gRPC requests as longs as it's not
+// canceled.
+// If graceful is set to true, the server will be gracefully stopped instead of
+// plain stopped. This means all pending RPCs will finish, but also requires
+// streaming gRPC handlers to check their context liveliness and exit
+// accordingly.  If the server code does not support this, `graceful` should be
+// false and the server will be killed violently instead.
 func GRPCServer(srv *grpc.Server, lis net.Listener, graceful bool) Runnable {
 	return func(ctx context.Context) error {
 		Signal(ctx, SignalHealthy)
@@ -52,7 +57,8 @@
 	}
 }
 
-// RunCommand will create a Runnable that starts a long-running command, whose exit is determined to be a failure.
+// RunCommand will create a Runnable that starts a long-running command, whose
+// exit is determined to be a failure.
 func RunCommand(ctx context.Context, cmd *exec.Cmd, opts ...RunCommandOption) error {
 	Signal(ctx, SignalHealthy)
 
@@ -86,8 +92,8 @@
 	parseKlog bool
 }
 
-// ParseKLog signals that the command being run will return klog-compatible logs
-// to stdout and/or stderr, and these will be re-interpreted as structured
+// ParseKLog signals that the command being run will return klog-compatible
+// logs to stdout and/or stderr, and these will be re-interpreted as structured
 // logging and emitted to the supervisor's logger.
 func ParseKLog() RunCommandOption {
 	return RunCommandOption{
diff --git a/metropolis/pkg/supervisor/supervisor_test.go b/metropolis/pkg/supervisor/supervisor_test.go
index 9c7bdb7..db84163 100644
--- a/metropolis/pkg/supervisor/supervisor_test.go
+++ b/metropolis/pkg/supervisor/supervisor_test.go
@@ -76,7 +76,8 @@
 	}
 }
 
-// rc is a Remote Controlled runnable. It is a generic runnable used for testing the supervisor.
+// rc is a Remote Controlled runnable. It is a generic runnable used for
+// testing the supervisor.
 type rc struct {
 	req chan rcRunnableRequest
 }
@@ -129,7 +130,8 @@
 }
 
 func (r *rc) waitState(s rcRunnableState) {
-	// This is poll based. Making it non-poll based would make the RC runnable logic a bit more complex for little gain.
+	// This is poll based. Making it non-poll based would make the RC runnable
+	// logic a bit more complex for little gain.
 	for {
 		got := r.state()
 		if got == s {
@@ -384,12 +386,14 @@
 	}, WithPropagatePanic)
 
 	one.becomeHealthy()
-	// Die a bunch of times in a row, this brings up the next exponential backoff to over a second.
+	// Die a bunch of times in a row, this brings up the next exponential
+	// backoff to over a second.
 	for i := 0; i < 4; i += 1 {
 		one.die()
 		one.waitState(rcRunnableStateNew)
 	}
-	// Measure how long it takes for the runnable to respawn after a number of failures
+	// Measure how long it takes for the runnable to respawn after a number of
+	// failures
 	start := time.Now()
 	one.die()
 	one.becomeHealthy()
@@ -411,8 +415,9 @@
 	}
 }
 
-// TestResilience throws some curveballs at the supervisor - either programming errors or high load. It then ensures
-// that another runnable is running, and that it restarts on its sibling failure.
+// TestResilience throws some curveballs at the supervisor - either programming
+// errors or high load. It then ensures that another runnable is running, and
+// that it restarts on its sibling failure.
 func TestResilience(t *testing.T) {
 	// request/response channel for testing liveness of the 'one' runnable
 	req := make(chan chan struct{})
@@ -443,7 +448,8 @@
 		timeout.Stop()
 	}
 
-	// A nasty runnable that calls Signal with the wrong context (this is a programming error)
+	// A nasty runnable that calls Signal with the wrong context (this is a
+	// programming error)
 	two := func(ctx context.Context) error {
 		Signal(context.TODO(), SignalHealthy)
 		return nil
diff --git a/metropolis/pkg/tpm/credactivation_compat.go b/metropolis/pkg/tpm/credactivation_compat.go
index 039f8d5..a6710ae 100644
--- a/metropolis/pkg/tpm/credactivation_compat.go
+++ b/metropolis/pkg/tpm/credactivation_compat.go
@@ -16,13 +16,16 @@
 
 package tpm
 
-// This file is adapted from github.com/google/go-tpm/tpm2/credactivation which outputs broken
-// challenges for unknown reasons. They use u16 length-delimited outputs for the challenge blobs
-// which is incorrect. Rather than rewriting the routine, we only applied minimal fixes to it
-// and skip the ECC part of the issue (because we would rather trust the proprietary RSA implementation).
+// This file is adapted from github.com/google/go-tpm/tpm2/credactivation which
+// outputs broken challenges for unknown reasons. They use u16 length-delimited
+// outputs for the challenge blobs which is incorrect. Rather than rewriting
+// the routine, we only applied minimal fixes to it and skip the ECC part of
+// the issue (because we would rather trust the proprietary RSA
+// implementation).
 //
-// TODO(lorenz): I'll eventually deal with this upstream, but for now just fix it here (it's not that)
-// much code after all (https://github.com/google/go-tpm/issues/121)
+// TODO(lorenz): I'll eventually deal with this upstream, but for now just fix
+// it here (it's not that) much code after all.
+//   https://github.com/google/go-tpm/issues/121
 
 import (
 	"crypto/aes"
@@ -48,7 +51,8 @@
 		return nil, nil, err
 	}
 
-	// The seed length should match the keysize used by the EKs symmetric cipher.
+	// The seed length should match the keysize used by the EKs symmetric
+	// cipher.
 	// For typical RSA EKs, this will be 128 bits (16 bytes).
 	// Spec: TCG 2.0 EK Credential Profile revision 14, section 2.1.5.1.
 	seed := make([]byte, symBlockSize)
@@ -64,8 +68,8 @@
 		return nil, nil, fmt.Errorf("generating encrypted seed: %v", err)
 	}
 
-	// Generate the encrypted credential by convolving the seed with the digest of
-	// the AIK, and using the result as the key to encrypt the secret.
+	// Generate the encrypted credential by convolving the seed with the digest
+	// of the AIK, and using the result as the key to encrypt the secret.
 	// See section 24.4 of TPM 2.0 specification, part 1.
 	aikNameEncoded, err := aik.Encode()
 	if err != nil {
diff --git a/metropolis/pkg/tpm/eventlog/eventlog.go b/metropolis/pkg/tpm/eventlog/eventlog.go
index 49a8a26..8367935 100644
--- a/metropolis/pkg/tpm/eventlog/eventlog.go
+++ b/metropolis/pkg/tpm/eventlog/eventlog.go
@@ -14,7 +14,8 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// Taken and pruned from go-attestation revision 2453c8f39a4ff46009f6a9db6fb7c6cca789d9a1 under Apache 2.0
+// Taken and pruned from go-attestation revision
+// 2453c8f39a4ff46009f6a9db6fb7c6cca789d9a1 under Apache 2.0
 
 package eventlog
 
@@ -98,7 +99,7 @@
 
 // TPM algorithms. See the TPM 2.0 specification section 6.3.
 //
-// https://trustedcomputinggroup.org/wp-content/uploads/TPM-Rev-2.0-Part-2-Structures-01.38.pdf#page=42
+//   https://trustedcomputinggroup.org/wp-content/uploads/TPM-Rev-2.0-Part-2-Structures-01.38.pdf#page=42
 const (
 	algSHA1   uint16 = 0x0004
 	algSHA256 uint16 = 0x000B
@@ -391,11 +392,11 @@
 	return verifiedEvents, nil
 }
 
-// EV_NO_ACTION is a special event type that indicates information to the parser
-// instead of holding a measurement. For TPM 2.0, this event type is used to signal
-// switching from SHA1 format to a variable length digest.
+// EV_NO_ACTION is a special event type that indicates information to the
+// parser instead of holding a measurement. For TPM 2.0, this event type is
+// used to signal switching from SHA1 format to a variable length digest.
 //
-// https://trustedcomputinggroup.org/wp-content/uploads/TCG_PCClientSpecPlat_TPM_2p0_1p04_pub.pdf#page=110
+//   https://trustedcomputinggroup.org/wp-content/uploads/TCG_PCClientSpecPlat_TPM_2p0_1p04_pub.pdf#page=110
 const eventTypeNoAction = 0x03
 
 // ParseEventLog parses an unverified measurement log.
@@ -457,7 +458,7 @@
 }
 
 // Expected values for various Spec ID Event fields.
-// https://trustedcomputinggroup.org/wp-content/uploads/EFI-Protocol-Specification-rev13-160330final.pdf#page=19
+//   https://trustedcomputinggroup.org/wp-content/uploads/EFI-Protocol-Specification-rev13-160330final.pdf#page=19
 var wantSignature = [16]byte{0x53, 0x70,
 	0x65, 0x63, 0x20, 0x49,
 	0x44, 0x20, 0x45, 0x76,
@@ -471,8 +472,7 @@
 )
 
 // parseSpecIDEvent parses a TCG_EfiSpecIDEventStruct structure from the reader.
-//
-// https://trustedcomputinggroup.org/wp-content/uploads/EFI-Protocol-Specification-rev13-160330final.pdf#page=18
+//   https://trustedcomputinggroup.org/wp-content/uploads/EFI-Protocol-Specification-rev13-160330final.pdf#page=18
 func parseSpecIDEvent(b []byte) (*specIDEvent, error) {
 	r := bytes.NewReader(b)
 	var header struct {
@@ -535,7 +535,7 @@
 }
 
 // TPM 1.2 event log format. See "5.1 SHA1 Event Log Entry Format"
-// https://trustedcomputinggroup.org/wp-content/uploads/EFI-Protocol-Specification-rev13-160330final.pdf#page=15
+//   https://trustedcomputinggroup.org/wp-content/uploads/EFI-Protocol-Specification-rev13-160330final.pdf#page=15
 type rawEventHeader struct {
 	PCRIndex  uint32
 	Type      uint32
@@ -580,7 +580,7 @@
 }
 
 // TPM 2.0 event log format. See "5.2 Crypto Agile Log Entry Format"
-// https://trustedcomputinggroup.org/wp-content/uploads/EFI-Protocol-Specification-rev13-160330final.pdf#page=15
+//   https://trustedcomputinggroup.org/wp-content/uploads/EFI-Protocol-Specification-rev13-160330final.pdf#page=15
 type rawEvent2Header struct {
 	PCRIndex uint32
 	Type     uint32
diff --git a/metropolis/pkg/tpm/eventlog/internal/events.go b/metropolis/pkg/tpm/eventlog/internal/events.go
index d9b933b..f41ed1c 100644
--- a/metropolis/pkg/tpm/eventlog/internal/events.go
+++ b/metropolis/pkg/tpm/eventlog/internal/events.go
@@ -190,10 +190,10 @@
 	VariableData []byte // []int8
 }
 
-// ParseUEFIVariableData parses the data section of an event structured as
-// a UEFI variable.
+// ParseUEFIVariableData parses the data section of an event structured as a
+// UEFI variable.
 //
-// https://trustedcomputinggroup.org/wp-content/uploads/TCG_PCClient_Specific_Platform_Profile_for_TPM_2p0_1p04_PUBLIC.pdf#page=100
+//   https://trustedcomputinggroup.org/wp-content/uploads/TCG_PCClient_Specific_Platform_Profile_for_TPM_2p0_1p04_PUBLIC.pdf#page=100
 func ParseUEFIVariableData(r io.Reader) (ret UEFIVariableData, err error) {
 	err = binary.Read(r, binary.LittleEndian, &ret.Header)
 	if err != nil {
@@ -244,15 +244,16 @@
 	return UEFIVariableAuthority{Certs: certs}, err
 }
 
-// efiSignatureData represents the EFI_SIGNATURE_DATA type.
-// See section "31.4.1 Signature Database" in the specification for more information.
+// efiSignatureData represents the EFI_SIGNATURE_DATA type.  See section
+// "31.4.1 Signature Database" in the specification for more information.
 type efiSignatureData struct {
 	SignatureOwner efiGUID
 	SignatureData  []byte // []int8
 }
 
 // efiSignatureList represents the EFI_SIGNATURE_LIST type.
-// See section "31.4.1 Signature Database" in the specification for more information.
+// See section "31.4.1 Signature Database" in the specification for more
+// information.
 type efiSignatureListHeader struct {
 	SignatureType       efiGUID
 	SignatureListSize   uint32
diff --git a/metropolis/pkg/tpm/tpm.go b/metropolis/pkg/tpm/tpm.go
index e650ff9..ab02dd3 100644
--- a/metropolis/pkg/tpm/tpm.go
+++ b/metropolis/pkg/tpm/tpm.go
@@ -45,28 +45,32 @@
 )
 
 var (
-	// SecureBootPCRs are all PCRs that measure the current Secure Boot configuration.
-	// This is what we want if we rely on secure boot to verify boot integrity. The firmware
-	// hashes the secure boot policy and custom keys into the PCR.
+	// SecureBootPCRs are all PCRs that measure the current Secure Boot
+	// configuration.  This is what we want if we rely on secure boot to verify
+	// boot integrity. The firmware hashes the secure boot policy and custom
+	// keys into the PCR.
 	//
 	// This requires an extra step that provisions the custom keys.
 	//
 	// Some background: https://mjg59.dreamwidth.org/48897.html?thread=1847297
-	// (the initramfs issue mentioned in the article has been solved by integrating
-	// it into the kernel binary, and we don't have a shim bootloader)
+	// (the initramfs issue mentioned in the article has been solved by
+	// integrating it into the kernel binary, and we don't have a shim
+	// bootloader)
 	//
-	// PCR7 alone is not sufficient - it needs to be combined with firmware measurements.
+	// PCR7 alone is not sufficient - it needs to be combined with firmware
+	// measurements.
 	SecureBootPCRs = []int{7}
 
-	// FirmwarePCRs are alle PCRs that contain the firmware measurements
-	// See https://trustedcomputinggroup.org/wp-content/uploads/TCG_EFI_Platform_1_22_Final_-v15.pdf
+	// FirmwarePCRs are alle PCRs that contain the firmware measurements. See:
+	//   https://trustedcomputinggroup.org/wp-content/uploads/TCG_EFI_Platform_1_22_Final_-v15.pdf
 	FirmwarePCRs = []int{
 		0, // platform firmware
 		2, // option ROM code
 		3, // option ROM configuration and data
 	}
 
-	// FullSystemPCRs are all PCRs that contain any measurements up to the currently running EFI payload.
+	// FullSystemPCRs are all PCRs that contain any measurements up to the
+	// currently running EFI payload.
 	FullSystemPCRs = []int{
 		0, // platform firmware
 		1, // host platform configuration
@@ -75,21 +79,25 @@
 		4, // EFI payload
 	}
 
-	// Using FullSystemPCRs is the most secure, but also the most brittle option since updating the EFI
-	// binary, updating the platform firmware, changing platform settings or updating the binary
-	// would invalidate the sealed data. It's annoying (but possible) to predict values for PCR4,
-	// and even more annoying for the firmware PCR (comparison to known values on similar hardware
-	// is the only thing that comes to mind).
+	// Using FullSystemPCRs is the most secure, but also the most brittle
+	// option since updating the EFI binary, updating the platform firmware,
+	// changing platform settings or updating the binary would invalidate the
+	// sealed data. It's annoying (but possible) to predict values for PCR4,
+	// and even more annoying for the firmware PCR (comparison to known values
+	// on similar hardware is the only thing that comes to mind).
 	//
-	// See also: https://github.com/mxre/sealkey (generates PCR4 from EFI image, BSD license)
+	// See also: https://github.com/mxre/sealkey (generates PCR4 from EFI
+	// image, BSD license)
 	//
-	// Using only SecureBootPCRs is the easiest and still reasonably secure, if we assume that the
-	// platform knows how to take care of itself (i.e. Intel Boot Guard), and that secure boot
-	// is implemented properly. It is, however, a much larger amount of code we need to trust.
+	// Using only SecureBootPCRs is the easiest and still reasonably secure, if
+	// we assume that the platform knows how to take care of itself (i.e. Intel
+	// Boot Guard), and that secure boot is implemented properly. It is,
+	// however, a much larger amount of code we need to trust.
 	//
-	// We do not care about PCR 5 (GPT partition table) since modifying it is harmless. All of
-	// the boot options and cmdline are hardcoded in the kernel image, and we use no bootloader,
-	// so there's no PCR for bootloader configuration or kernel cmdline.
+	// We do not care about PCR 5 (GPT partition table) since modifying it is
+	// harmless. All of the boot options and cmdline are hardcoded in the
+	// kernel image, and we use no bootloader, so there's no PCR for bootloader
+	// configuration or kernel cmdline.
 )
 
 var (
@@ -102,15 +110,17 @@
 var (
 	// ErrNotExists is returned when no TPMs are available in the system
 	ErrNotExists = errors.New("no TPMs found")
-	// ErrNotInitialized is returned when this package was not initialized successfully
+	// ErrNotInitialized is returned when this package was not initialized
+	// successfully
 	ErrNotInitialized = errors.New("no TPM was initialized")
 )
 
 // Singleton since the TPM is too
 var tpm *TPM
 
-// We're serializing all TPM operations since it has a limited number of handles and recovering
-// if it runs out is difficult to implement correctly. Might also be marginally more secure.
+// We're serializing all TPM operations since it has a limited number of
+// handles and recovering if it runs out is difficult to implement correctly.
+// Might also be marginally more secure.
 var lock sync.Mutex
 
 // TPM represents a high-level interface to a connected TPM 2.0
@@ -118,13 +128,14 @@
 	logger logtree.LeveledLogger
 	device io.ReadWriteCloser
 
-	// We keep the AK loaded since it's used fairly often and deriving it is expensive
+	// We keep the AK loaded since it's used fairly often and deriving it is
+	// expensive
 	akHandleCache tpmutil.Handle
 	akPublicKey   crypto.PublicKey
 }
 
-// Initialize finds and opens the TPM (if any). If there is no TPM available it returns
-// ErrNotExists
+// Initialize finds and opens the TPM (if any). If there is no TPM available it
+// returns ErrNotExists
 func Initialize(logger logtree.LeveledLogger) error {
 	lock.Lock()
 	defer lock.Unlock()
@@ -170,7 +181,8 @@
 	return nil
 }
 
-// GenerateSafeKey uses two sources of randomness (Kernel & TPM) to generate the key
+// GenerateSafeKey uses two sources of randomness (Kernel & TPM) to generate
+// the key
 func GenerateSafeKey(size uint16) ([]byte, error) {
 	lock.Lock()
 	defer lock.Unlock()
@@ -204,8 +216,8 @@
 	return encryptionKey, nil
 }
 
-// Seal seals sensitive data and only allows access if the current platform configuration in
-// matches the one the data was sealed on.
+// Seal seals sensitive data and only allows access if the current platform
+// configuration in matches the one the data was sealed on.
 func Seal(data []byte, pcrs []int) ([]byte, error) {
 	lock.Lock()
 	defer lock.Unlock()
@@ -225,8 +237,8 @@
 	return sealedKeyRaw, nil
 }
 
-// Unseal unseals sensitive data if the current platform configuration allows and sealing constraints
-// allow it.
+// Unseal unseals sensitive data if the current platform configuration allows
+// and sealing constraints allow it.
 func Unseal(data []byte) ([]byte, error) {
 	lock.Lock()
 	defer lock.Unlock()
@@ -256,7 +268,8 @@
 	return unsealedData, nil
 }
 
-// Standard AK template for RSA2048 non-duplicatable restricted signing for attestation
+// Standard AK template for RSA2048 non-duplicatable restricted signing for
+// attestation
 var akTemplate = tpm2.Public{
 	Type:       tpm2.AlgRSA,
 	NameAlg:    tpm2.AlgSHA256,
@@ -272,11 +285,12 @@
 
 func loadAK() error {
 	var err error
-	// Rationale: The AK is an EK-equivalent key and used only for attestation. Using a non-primary
-	// key here would require us to store the wrapped version somewhere, which is inconvenient.
-	// This being a primary key in the Endorsement hierarchy means that it can always be recreated
-	// and can never be "destroyed". Under our security model this is of no concern since we identify
-	// a node by its IK (Identity Key) which we can destroy.
+	// Rationale: The AK is an EK-equivalent key and used only for attestation.
+	// Using a non-primary key here would require us to store the wrapped
+	// version somewhere, which is inconvenient.  This being a primary key in
+	// the Endorsement hierarchy means that it can always be recreated and can
+	// never be "destroyed". Under our security model this is of no concern
+	// since we identify a node by its IK (Identity Key) which we can destroy.
 	tpm.akHandleCache, tpm.akPublicKey, err = tpm2.CreatePrimary(tpm.device, tpm2.HandleEndorsement,
 		tpm2.PCRSelection{}, "", "", akTemplate)
 	return err
@@ -284,12 +298,14 @@
 
 // Process documented in TCG EK Credential Profile 2.2.1
 func loadEK() (tpmutil.Handle, crypto.PublicKey, error) {
-	// The EK is a primary key which is supposed to be certified by the manufacturer of the TPM.
-	// Its public attributes are standardized in TCG EK Credential Profile 2.0 Table 1. These need
-	// to match exactly or we aren't getting the key the manufacturere signed. tpm2tools contains
-	// such a template already, so we're using that instead of redoing it ourselves.
-	// This ignores the more complicated ways EKs can be specified, the additional stuff you can do
-	// is just absolutely crazy (see 2.2.1.2 onward)
+	// The EK is a primary key which is supposed to be certified by the
+	// manufacturer of the TPM.  Its public attributes are standardized in TCG
+	// EK Credential Profile 2.0 Table 1. These need to match exactly or we
+	// aren't getting the key the manufacturere signed. tpm2tools contains such
+	// a template already, so we're using that instead of redoing it ourselves.
+	// This ignores the more complicated ways EKs can be specified, the
+	// additional stuff you can do is just absolutely crazy (see 2.2.1.2
+	// onward)
 	return tpm2.CreatePrimary(tpm.device, tpm2.HandleEndorsement,
 		tpm2.PCRSelection{}, "", "", tpm2tools.DefaultEKTemplateRSA())
 }
@@ -313,10 +329,11 @@
 	return public.Encode()
 }
 
-// TCG TPM v2.0 Provisioning Guidance v1.0 7.8 Table 2 and
-// TCG EK Credential Profile v2.1 2.2.1.4 de-facto Standard for Windows
-// These are both non-normative and reference Windows 10 documentation that's no longer available :(
-// But in practice this is what people are using, so if it's normative or not doesn't really matter
+// TCG TPM v2.0 Provisioning Guidance v1.0 7.8 Table 2 and TCG EK Credential
+// Profile v2.1 2.2.1.4 de-facto Standard for Windows These are both
+// non-normative and reference Windows 10 documentation that's no longer
+// available :( But in practice this is what people are using, so if it's
+// normative or not doesn't really matter
 const ekCertHandle = 0x01c00002
 
 // GetEKPublic gets the public key and (if available) Certificate of the EK
@@ -345,7 +362,8 @@
 	return publicKey, ekCertRaw, nil
 }
 
-// MakeAKChallenge generates a challenge for TPM residency and attributes of the AK
+// MakeAKChallenge generates a challenge for TPM residency and attributes of
+// the AK
 func MakeAKChallenge(ekPubKey, akPub []byte, nonce []byte) ([]byte, []byte, error) {
 	ekPubKeyData, err := x509.ParsePKIXPublicKey(ekPubKey)
 	if err != nil {
@@ -385,12 +403,14 @@
 	}
 	defer tpm2.FlushContext(tpm.device, ekHandle)
 
-	// This is necessary since the EK requires an endorsement handle policy in its session
-	// For us this is stupid because we keep all hierarchies open anyways since a) we cannot safely
-	// store secrets on the OS side pre-global unlock and b) it makes no sense in this security model
-	// since an uncompromised host OS will not let an untrusted entity attest as itself and a
-	// compromised OS can either not pass PCR policy checks or the game's already over (you
-	// successfully runtime-exploited a production Metropolis node)
+	// This is necessary since the EK requires an endorsement handle policy in
+	// its session.  For us this is stupid because we keep all hierarchies open
+	// anyways since a) we cannot safely store secrets on the OS side
+	// pre-global unlock and b) it makes no sense in this security model since
+	// an uncompromised host OS will not let an untrusted entity attest as
+	// itself and a compromised OS can either not pass PCR policy checks or the
+	// game's already over (you successfully runtime-exploited a production
+	// Metropolis node).
 	endorsementSession, _, err := tpm2.StartAuthSession(
 		tpm.device,
 		tpm2.HandleNull,
@@ -412,8 +432,10 @@
 
 	for {
 		solution, err := tpm2.ActivateCredentialUsingAuth(tpm.device, []tpm2.AuthCommand{
-			{Session: tpm2.HandlePasswordSession, Attributes: tpm2.AttrContinueSession}, // Use standard no-password authentication
-			{Session: endorsementSession, Attributes: tpm2.AttrContinueSession},         // Use a full policy session for the EK
+			// Use standard no-password authenatication
+			{Session: tpm2.HandlePasswordSession, Attributes: tpm2.AttrContinueSession},
+			// Use a full policy session for the EK
+			{Session: endorsementSession, Attributes: tpm2.AttrContinueSession},
 		}, tpm.akHandleCache, ekHandle, credBlob, secretChallenge)
 		if warn, ok := err.(tpm2.Warning); ok && warn.Code == tpm2.RCRetry {
 			time.Sleep(100 * time.Millisecond)
@@ -445,7 +467,8 @@
 	return nil
 }
 
-// AttestPlatform performs a PCR quote using the AK and returns the quote and its signature
+// AttestPlatform performs a PCR quote using the AK and returns the quote and
+// its signature
 func AttestPlatform(nonce []byte) ([]byte, []byte, error) {
 	lock.Lock()
 	defer lock.Unlock()
@@ -457,9 +480,9 @@
 			return []byte{}, []byte{}, fmt.Errorf("failed to load AK primary key: %w", err)
 		}
 	}
-	// We only care about SHA256 since SHA1 is weak. This is supported on at least GCE and
-	// Intel / AMD fTPM, which is good enough for now. Alg is null because that would just hash the
-	// nonce, which is dumb.
+	// We only care about SHA256 since SHA1 is weak. This is supported on at
+	// least GCE and Intel / AMD fTPM, which is good enough for now. Alg is
+	// null because that would just hash the nonce, which is dumb.
 	quote, signature, err := tpm2.Quote(tpm.device, tpm.akHandleCache, "", "", nonce, srtmPCRs,
 		tpm2.AlgNull)
 	if err != nil {
@@ -468,8 +491,8 @@
 	return quote, signature.RSA.Signature, err
 }
 
-// VerifyAttestPlatform verifies a given attestation. You can rely on all data coming back as being
-// from the TPM on which the AK is bound to.
+// VerifyAttestPlatform verifies a given attestation. You can rely on all data
+// coming back as being from the TPM on which the AK is bound to.
 func VerifyAttestPlatform(nonce, akPub, quote, signature []byte) (*tpm2.AttestationData, error) {
 	hash := crypto.SHA256.New()
 	hash.Write(quote)
@@ -495,12 +518,14 @@
 	if err != nil {
 		return nil, err
 	}
-	// quoteData.Magic works together with the TPM's Restricted key attribute. If this attribute is set
-	// (which it needs to be for the AK to be considered valid) the TPM will not sign external data
-	// having this prefix with such a key. Only data that originates inside the TPM like quotes and
-	// key certifications can have this prefix and sill be signed by a restricted key. This check
-	// is thus vital, otherwise somebody can just feed the TPM an arbitrary attestation to sign with
-	// its AK and this function will happily accept the forged attestation.
+	// quoteData.Magic works together with the TPM's Restricted key attribute.
+	// If this attribute is set (which it needs to be for the AK to be
+	// considered valid) the TPM will not sign external data having this prefix
+	// with such a key. Only data that originates inside the TPM like quotes
+	// and key certifications can have this prefix and sill be signed by a
+	// restricted key. This check is thus vital, otherwise somebody can just
+	// feed the TPM an arbitrary attestation to sign with its AK and this
+	// function will happily accept the forged attestation.
 	if quoteData.Magic != tpmGeneratedValue {
 		return nil, errors.New("invalid TPM quote: data marker for internal data not set - forged attestation")
 	}
@@ -523,8 +548,9 @@
 	}
 	pcrs := make([][]byte, numSRTMPCRs)
 
-	// The TPM can (and most do) return partial results. Let's just retry as many times as we have
-	// PCRs since each read should return at least one PCR.
+	// The TPM can (and most do) return partial results. Let's just retry as
+	// many times as we have PCRs since each read should return at least one
+	// PCR.
 readLoop:
 	for i := 0; i < numSRTMPCRs; i++ {
 		sel := tpm2.PCRSelection{Hash: tpm2.AlgSHA256}
@@ -554,8 +580,9 @@
 	return pcrs, nil
 }
 
-// GetMeasurmentLog returns the binary log of all data hashed into PCRs. The result can be parsed by eventlog.
-// As this library currently doesn't support extending PCRs it just returns the log as supplied by the EFI interface.
+// GetMeasurmentLog returns the binary log of all data hashed into PCRs. The
+// result can be parsed by eventlog.  As this library currently doesn't support
+// extending PCRs it just returns the log as supplied by the EFI interface.
 func GetMeasurementLog() ([]byte, error) {
 	return ioutil.ReadFile("/sys/kernel/security/tpm0/binary_bios_measurements")
 }
diff --git a/metropolis/test/e2e/k8s_cts/main.go b/metropolis/test/e2e/k8s_cts/main.go
index e419b2f..026ca8a 100644
--- a/metropolis/test/e2e/k8s_cts/main.go
+++ b/metropolis/test/e2e/k8s_cts/main.go
@@ -14,8 +14,9 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// This package launches a Metropolis cluster with two nodes and spawns in the CTS container. Then it streams its output
-// to the console. When the CTS has finished it exits with the appropriate error code.
+// This package launches a Metropolis cluster with two nodes and spawns in the
+// CTS container. Then it streams its output to the console. When the CTS has
+// finished it exits with the appropriate error code.
 package main
 
 import (
@@ -37,18 +38,22 @@
 	"source.monogon.dev/metropolis/test/launch"
 )
 
-// makeCTSPodSpec generates a spec for a standalone pod running the Kubernetes CTS. It also sets the test configuration
-// for the Kubernetes E2E test suite to only run CTS tests and excludes known-broken ones.
+// makeCTSPodSpec generates a spec for a standalone pod running the Kubernetes
+// CTS. It also sets the test configuration for the Kubernetes E2E test suite
+// to only run CTS tests and excludes known-broken ones.
 func makeCTSPodSpec(name string, saName string) *corev1.Pod {
 	skipRegexes := []string{
-		// hostNetworking cannot be supported since we run different network stacks for the host and containers
+		// hostNetworking cannot be supported since we run different network
+		// stacks for the host and containers
 		"should function for node-pod communication",
-		// gVisor misreports statfs() syscalls: https://github.com/google/gvisor/issues/3339
+		// gVisor misreports statfs() syscalls:
+		//   https://github.com/google/gvisor/issues/3339
 		`should support \((non-)?root,`,
 		"volume on tmpfs should have the correct mode",
 		"volume on default medium should have the correct mode",
-		// gVisor doesn't support the full Linux privilege machinery including SUID and NewPrivs
-		// https://github.com/google/gvisor/issues/189#issuecomment-481064000
+		// gVisor doesn't support the full Linux privilege machinery including
+		// SUID and NewPrivs:
+		//   https://github.com/google/gvisor/issues/189#issuecomment-481064000
 		"should run the container as unprivileged when false",
 	}
 	return &corev1.Pod{
@@ -73,10 +78,12 @@
 					ImagePullPolicy: corev1.PullNever,
 				},
 			},
-			Tolerations: []corev1.Toleration{{ // Tolerate all taints, otherwise the CTS likes to self-evict
+			// Tolerate all taints, otherwise the CTS likes to self-evict.
+			Tolerations: []corev1.Toleration{{
 				Operator: "Exists",
 			}},
-			PriorityClassName:  "system-cluster-critical", // Don't evict the CTS pod
+			// Don't evict the CTS pod.
+			PriorityClassName:  "system-cluster-critical",
 			RestartPolicy:      corev1.RestartPolicyNever,
 			ServiceAccountName: saName,
 		},
@@ -145,7 +152,8 @@
 	}
 	var logs io.ReadCloser
 	go func() {
-		// This loops the whole .Stream()/io.Copy process because the API sometimes returns streams that immediately return EOF
+		// This loops the whole .Stream()/io.Copy process because the API
+		// sometimes returns streams that immediately return EOF
 		for {
 			logs, err = clientSet.CoreV1().Pods("default").GetLogs(podName, &corev1.PodLogOptions{Follow: true}).Stream(ctx)
 			if err == nil {
diff --git a/metropolis/test/e2e/kubernetes_helpers.go b/metropolis/test/e2e/kubernetes_helpers.go
index 71e8c3a..3c1f95a 100644
--- a/metropolis/test/e2e/kubernetes_helpers.go
+++ b/metropolis/test/e2e/kubernetes_helpers.go
@@ -33,8 +33,9 @@
 	apb "source.monogon.dev/metropolis/proto/api"
 )
 
-// GetKubeClientSet gets a Kubeconfig from the debug API and creates a K8s ClientSet using it. The identity used has
-// the system:masters group and thus has RBAC access to everything.
+// GetKubeClientSet gets a Kubeconfig from the debug API and creates a K8s
+// ClientSet using it. The identity used has the system:masters group and thus
+// has RBAC access to everything.
 func GetKubeClientSet(ctx context.Context, client apb.NodeDebugServiceClient, port uint16) (kubernetes.Interface, error) {
 	var lastErr = errors.New("context canceled before any operation completed")
 	for {
@@ -67,9 +68,11 @@
 	}
 }
 
-// makeTestDeploymentSpec generates a Deployment spec for a single pod running NGINX with a readiness probe. This allows
-// verifying that the control plane is capable of scheduling simple pods and that kubelet works, its runtime is set up
-// well enough to run a simple container and the network to the pod can pass readiness probe traffic.
+// makeTestDeploymentSpec generates a Deployment spec for a single pod running
+// NGINX with a readiness probe. This allows verifying that the control plane
+// is capable of scheduling simple pods and that kubelet works, its runtime is
+// set up well enough to run a simple container and the network to the pod can
+// pass readiness probe traffic.
 func makeTestDeploymentSpec(name string) *appsv1.Deployment {
 	return &appsv1.Deployment{
 		ObjectMeta: metav1.ObjectMeta{Name: name},
diff --git a/metropolis/test/e2e/main_test.go b/metropolis/test/e2e/main_test.go
index 0463fc4..6d9b1db 100644
--- a/metropolis/test/e2e/main_test.go
+++ b/metropolis/test/e2e/main_test.go
@@ -44,8 +44,8 @@
 const (
 	// Timeout for the global test context.
 	//
-	// Bazel would eventually time out the test after 900s ("large") if, for some reason,
-	// the context cancellation fails to abort it.
+	// Bazel would eventually time out the test after 900s ("large") if, for
+	// some reason, the context cancellation fails to abort it.
 	globalTestTimeout = 600 * time.Second
 
 	// Timeouts for individual end-to-end tests of different sizes.
@@ -53,8 +53,10 @@
 	largeTestTimeout = 120 * time.Second
 )
 
-// TestE2E is the main E2E test entrypoint for single-node freshly-bootstrapped E2E tests. It starts a full Metropolis node
-// in bootstrap mode and then runs tests against it. The actual tests it performs are located in the RunGroup subtest.
+// TestE2E is the main E2E test entrypoint for single-node freshly-bootstrapped
+// E2E tests. It starts a full Metropolis node in bootstrap mode and then runs
+// tests against it. The actual tests it performs are located in the RunGroup
+// subtest.
 func TestE2E(t *testing.T) {
 	// Run pprof server for debugging
 	go func() {
@@ -102,8 +104,9 @@
 	}
 	debugClient := apb.NewNodeDebugServiceClient(grpcClient)
 
-	// This exists to keep the parent around while all the children race
-	// It currently tests both a set of OS-level conditions and Kubernetes Deployments and StatefulSets
+	// This exists to keep the parent around while all the children race.
+	// It currently tests both a set of OS-level conditions and Kubernetes
+	// Deployments and StatefulSets
 	t.Run("RunGroup", func(t *testing.T) {
 		t.Run("Get Kubernetes Debug Kubeconfig", func(t *testing.T) {
 			t.Parallel()
diff --git a/metropolis/test/e2e/utils.go b/metropolis/test/e2e/utils.go
index f888189..dcc9eac 100644
--- a/metropolis/test/e2e/utils.go
+++ b/metropolis/test/e2e/utils.go
@@ -23,8 +23,9 @@
 	"time"
 )
 
-// testEventual creates a new subtest looping the given function until it either doesn't return an error anymore or
-// the timeout is exceeded. The last returned non-context-related error is being used as the test error.
+// testEventual creates a new subtest looping the given function until it
+// either doesn't return an error anymore or the timeout is exceeded. The last
+// returned non-context-related error is being used as the test error.
 func testEventual(t *testing.T, name string, ctx context.Context, timeout time.Duration, f func(context.Context) error) {
 	ctx, cancel := context.WithTimeout(ctx, timeout)
 	t.Helper()
diff --git a/metropolis/test/ktest/init/main.go b/metropolis/test/ktest/init/main.go
index 0236531..5523432 100644
--- a/metropolis/test/ktest/init/main.go
+++ b/metropolis/test/ktest/init/main.go
@@ -14,10 +14,11 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// ktestinit is an init designed to run inside a lightweight VM for running tests in there.
-// It performs basic platform initialization like mounting kernel filesystems and launches the
-// test executable at /tester, passes the exit code back out over the control socket to ktest and
-// then terminates the VM kernel.
+// ktestinit is an init designed to run inside a lightweight VM for running
+// tests in there.  It performs basic platform initialization like mounting
+// kernel filesystems and launches the test executable at /tester, passes the
+// exit code back out over the control socket to ktest and then terminates the
+// default VM kernel.
 package main
 
 import (
diff --git a/metropolis/test/ktest/main.go b/metropolis/test/ktest/main.go
index 17a9f71..f082671 100644
--- a/metropolis/test/ktest/main.go
+++ b/metropolis/test/ktest/main.go
@@ -14,8 +14,8 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// ktest is a test launcher for running tests inside a custom kernel and passes the results
-// back out.
+// ktest is a test launcher for running tests inside a custom kernel and passes
+// the results back out.
 package main
 
 import (
diff --git a/metropolis/test/launch/launch.go b/metropolis/test/launch/launch.go
index df36685..f440adb 100644
--- a/metropolis/test/launch/launch.go
+++ b/metropolis/test/launch/launch.go
@@ -46,8 +46,8 @@
 
 type qemuValue map[string][]string
 
-// toOption encodes structured data into a QEMU option.
-// Example: "test", {"key1": {"val1"}, "key2": {"val2", "val3"}} returns "test,key1=val1,key2=val2,key2=val3"
+// toOption encodes structured data into a QEMU option. Example: "test", {"key1":
+// {"val1"}, "key2": {"val2", "val3"}} returns "test,key1=val1,key2=val2,key2=val3"
 func (value qemuValue) toOption(name string) string {
 	var optionValues []string
 	if name != "" {
@@ -84,11 +84,12 @@
 	return out.Close()
 }
 
-// PortMap represents where VM ports are mapped to on the host. It maps from the VM port number to the host port number.
+// PortMap represents where VM ports are mapped to on the host. It maps from the VM
+// port number to the host port number.
 type PortMap map[uint16]uint16
 
-// toQemuForwards generates QEMU hostfwd values (https://qemu.weilnetz.de/doc/qemu-doc.html#:~:text=hostfwd=) for all
-// mapped ports.
+// toQemuForwards generates QEMU hostfwd values (https://qemu.weilnetz.de/doc/qemu-
+// doc.html#:~:text=hostfwd=) for all mapped ports.
 func (p PortMap) toQemuForwards() []string {
 	var hostfwdOptions []string
 	for vmPort, hostPort := range p {
@@ -97,8 +98,8 @@
 	return hostfwdOptions
 }
 
-// DialGRPC creates a gRPC client for a VM port that's forwarded/mapped to the host. The given port is automatically
-// resolved to the host-mapped port.
+// DialGRPC creates a gRPC client for a VM port that's forwarded/mapped to the
+// host. The given port is automatically resolved to the host-mapped port.
 func (p PortMap) DialGRPC(port uint16, opts ...grpc.DialOption) (*grpc.ClientConn, error) {
 	mappedPort, ok := p[port]
 	if !ok {
@@ -113,24 +114,29 @@
 
 // Options contains all options that can be passed to Launch()
 type Options struct {
-	// Ports contains the port mapping where to expose the internal ports of the VM to the host. See IdentityPortMap()
-	// and ConflictFreePortMap(). Ignored when ConnectToSocket is set.
+	// Ports contains the port mapping where to expose the internal ports of the VM to
+	// the host. See IdentityPortMap() and ConflictFreePortMap(). Ignored when
+	// ConnectToSocket is set.
 	Ports PortMap
 
-	// If set to true, reboots are honored. Otherwise all reboots exit the Launch() command. Metropolis nodes
-	// generally restarts on almost all errors, so unless you want to test reboot behavior this should be false.
+	// If set to true, reboots are honored. Otherwise all reboots exit the Launch()
+	// command. Metropolis nodes generally restarts on almost all errors, so unless you
+	// want to test reboot behavior this should be false.
 	AllowReboot bool
 
-	// By default the VM is connected to the Host via SLIRP. If ConnectToSocket is set, it is instead connected
-	// to the given file descriptor/socket. If this is set, all port maps from the Ports option are ignored.
-	// Intended for networking this instance together with others for running  more complex network configurations.
+	// By default the VM is connected to the Host via SLIRP. If ConnectToSocket is set,
+	// it is instead connected to the given file descriptor/socket. If this is set, all
+	// port maps from the Ports option are ignored. Intended for networking this
+	// instance together with others for running  more complex network configurations.
 	ConnectToSocket *os.File
 
-	// SerialPort is a io.ReadWriter over which you can communicate with the serial port of the machine
-	// It can be set to an existing file descriptor (like os.Stdout/os.Stderr) or any Go structure implementing this interface.
+	// SerialPort is a io.ReadWriter over which you can communicate with the serial
+	// port of the machine It can be set to an existing file descriptor (like
+	// os.Stdout/os.Stderr) or any Go structure implementing this interface.
 	SerialPort io.ReadWriter
 
-	// NodeParameters is passed into the VM and subsequently used for bootstrapping or registering into a cluster.
+	// NodeParameters is passed into the VM and subsequently used for bootstrapping or
+	// registering into a cluster.
 	NodeParameters *apb.NodeParameters
 }
 
@@ -138,8 +144,9 @@
 var NodePorts = []uint16{node.ConsensusPort, node.NodeServicePort, node.MasterServicePort,
 	node.ExternalServicePort, node.DebugServicePort, node.KubernetesAPIPort, node.DebuggerPort}
 
-// IdentityPortMap returns a port map where each given port is mapped onto itself on the host. This is mainly useful
-// for development against Metropolis. The dbg command requires this mapping.
+// IdentityPortMap returns a port map where each given port is mapped onto itself
+// on the host. This is mainly useful for development against Metropolis. The dbg
+// command requires this mapping.
 func IdentityPortMap(ports []uint16) PortMap {
 	portMap := make(PortMap)
 	for _, port := range ports {
@@ -148,10 +155,11 @@
 	return portMap
 }
 
-// ConflictFreePortMap returns a port map where each given port is mapped onto a random free port on the host. This is
-// intended for automated testing where multiple instances of Metropolis nodes might be running. Please call this
-// function for each Launch command separately and as close to it as possible since it cannot guarantee that the ports
-// will remain free.
+// ConflictFreePortMap returns a port map where each given port is mapped onto a
+// random free port on the host. This is intended for automated testing where
+// multiple instances of Metropolis nodes might be running. Please call this
+// function for each Launch command separately and as close to it as possible since
+// it cannot guarantee that the ports will remain free.
 func ConflictFreePortMap(ports []uint16) (PortMap, error) {
 	portMap := make(PortMap)
 	for _, port := range ports {
@@ -159,7 +167,8 @@
 		if err != nil {
 			return portMap, fmt.Errorf("failed to get free host port: %w", err)
 		}
-		// Defer closing of the listening port until the function is done and all ports are allocated
+		// Defer closing of the listening port until the function is done and all ports are
+		// allocated
 		defer listenCloser.Close()
 		portMap[port] = mappedPort
 	}
@@ -181,21 +190,26 @@
 	return &mac, nil
 }
 
-// Launch launches a Metropolis node instance with the given options. The instance runs mostly paravirtualized but
-// with some emulated hardware similar to how a cloud provider might set up its VMs. The disk is fully writable but
-// is run in snapshot mode meaning that changes are not kept beyond a single invocation.
+// Launch launches a Metropolis node instance with the given options. The instance
+// runs mostly paravirtualized but with some emulated hardware similar to how a
+// cloud provider might set up its VMs. The disk is fully writable but is run in
+// snapshot mode meaning that changes are not kept beyond a single invocation.
 func Launch(ctx context.Context, options Options) error {
-	// Pin temp directory to /tmp until we can use abstract socket namespace in QEMU (next release after 5.0,
-	// https://github.com/qemu/qemu/commit/776b97d3605ed0fc94443048fdf988c7725e38a9). swtpm accepts already-open FDs
-	// so we can pass in an abstract socket namespace FD that we open and pass the name of it to QEMU. Not pinning this
-	// crashes both swtpm and qemu because we run into UNIX socket length limitations (for legacy reasons 108 chars).
+	// Pin temp directory to /tmp until we can use abstract socket namespace in QEMU
+	// (next release after 5.0,
+	// https://github.com/qemu/qemu/commit/776b97d3605ed0fc94443048fdf988c7725e38a9).
+	// swtpm accepts already-open FDs so we can pass in an abstract socket namespace FD
+	// that we open and pass the name of it to QEMU. Not pinning this crashes both
+	// swtpm and qemu because we run into UNIX socket length limitations (for legacy
+	// reasons 108 chars).
 	tempDir, err := ioutil.TempDir("/tmp", "launch*")
 	if err != nil {
 		return fmt.Errorf("failed to create temporary directory: %w", err)
 	}
 	defer os.RemoveAll(tempDir)
 
-	// Copy TPM state into a temporary directory since it's being modified by the emulator
+	// Copy TPM state into a temporary directory since it's being modified by the
+	// emulator
 	tpmTargetDir := filepath.Join(tempDir, "tpm")
 	tpmSrcDir := "metropolis/node/tpm"
 	if err := os.Mkdir(tpmTargetDir, 0755); err != nil {
@@ -315,8 +329,9 @@
 	return err
 }
 
-// NewSocketPair creates a new socket pair. By connecting both ends to different instances you can connect them
-// with a virtual "network cable". The ends can be passed into the ConnectToSocket option.
+// NewSocketPair creates a new socket pair. By connecting both ends to different
+// instances you can connect them with a virtual "network cable". The ends can be
+// passed into the ConnectToSocket option.
 func NewSocketPair() (*os.File, *os.File, error) {
 	fds, err := unix.Socketpair(unix.AF_UNIX, syscall.SOCK_STREAM, 0)
 	if err != nil {
@@ -328,8 +343,8 @@
 	return fd1, fd2, nil
 }
 
-// HostInterfaceMAC is the MAC address the host SLIRP network interface has if it is not disabled (see
-// DisableHostNetworkInterface in MicroVMOptions)
+// HostInterfaceMAC is the MAC address the host SLIRP network interface has if it
+// is not disabled (see DisableHostNetworkInterface in MicroVMOptions)
 var HostInterfaceMAC = net.HardwareAddr{0x02, 0x72, 0x82, 0xbf, 0xc3, 0x56}
 
 // MicroVMOptions contains all options to start a MicroVM
@@ -343,39 +358,47 @@
 	// Cmdline contains additional kernel commandline options
 	Cmdline string
 
-	// SerialPort is a File(descriptor) over which you can communicate with the serial port of the machine
-	// It can be set to an existing file descriptor (like os.Stdout/os.Stderr) or you can use NewSocketPair() to get one
-	// end to talk to from Go.
+	// SerialPort is a File(descriptor) over which you can communicate with the serial
+	// port of the machine It can be set to an existing file descriptor (like
+	// os.Stdout/os.Stderr) or you can use NewSocketPair() to get one end to talk to
+	// from Go.
 	SerialPort *os.File
 
-	// ExtraChardevs can be used similar to SerialPort, but can contain an arbitrary number of additional serial ports
+	// ExtraChardevs can be used similar to SerialPort, but can contain an arbitrary
+	// number of additional serial ports
 	ExtraChardevs []*os.File
 
-	// ExtraNetworkInterfaces can contain an arbitrary number of file descriptors which are mapped into the VM as virtio
-	// network interfaces. The first interface is always a SLIRP-backed interface for communicating with the host.
+	// ExtraNetworkInterfaces can contain an arbitrary number of file descriptors which
+	// are mapped into the VM as virtio network interfaces. The first interface is
+	// always a SLIRP-backed interface for communicating with the host.
 	ExtraNetworkInterfaces []*os.File
 
-	// PortMap contains ports that are mapped to the host through the built-in SLIRP network interface.
+	// PortMap contains ports that are mapped to the host through the built-in SLIRP
+	// network interface.
 	PortMap PortMap
 
-	// DisableHostNetworkInterface disables the SLIRP-backed host network interface that is normally the first network
-	// interface. If this is set PortMap is ignored. Mostly useful for speeding up QEMU's startup time for tests.
+	// DisableHostNetworkInterface disables the SLIRP-backed host network interface
+	// that is normally the first network interface. If this is set PortMap is ignored.
+	// Mostly useful for speeding up QEMU's startup time for tests.
 	DisableHostNetworkInterface bool
 }
 
-// RunMicroVM launches a tiny VM mostly intended for testing. Very quick to boot (<40ms).
+// RunMicroVM launches a tiny VM mostly intended for testing. Very quick to boot
+// (<40ms).
 func RunMicroVM(ctx context.Context, opts *MicroVMOptions) error {
-	// Generate options for all the file descriptors we'll be passing as virtio "serial ports"
+	// Generate options for all the file descriptors we'll be passing as virtio "serial
+	// ports"
 	var extraArgs []string
 	for idx, _ := range opts.ExtraChardevs {
 		idxStr := strconv.Itoa(idx)
 		id := "extra" + idxStr
-		// That this works is pretty much a hack, but upstream QEMU doesn't have a bidirectional chardev backend not
-		// based around files/sockets on the disk which are a giant pain to work with.
-		// We're using QEMU's fdset functionality to make FDs available as pseudo-files and then "ab"using the pipe
-		// backend's fallback functionality to get a single bidirectional chardev backend backed by a passed-down
-		// RDWR fd.
-		// Ref https://lists.gnu.org/archive/html/qemu-devel/2015-12/msg01256.html
+		// That this works is pretty much a hack, but upstream QEMU doesn't have a
+		// bidirectional chardev backend not based around files/sockets on the disk which
+		// are a giant pain to work with. We're using QEMU's fdset functionality to make
+		// FDs available as pseudo-files and then "ab"using the pipe backend's fallback
+		// functionality to get a single bidirectional chardev backend backed by a passed-
+		// down RDWR fd. Ref https://lists.gnu.org/archive/html/qemu-devel/2015-
+		// 12/msg01256.html
 		addFdConf := qemuValue{
 			"set": {idxStr},
 			"fd":  {strconv.Itoa(idx + 3)},
@@ -400,23 +423,29 @@
 		extraArgs = append(extraArgs, "-netdev", netdevConf.toOption("socket"), "-device", "virtio-net-device,netdev="+id)
 	}
 
-	// This sets up a minimum viable environment for our Linux kernel.
-	// It clears all standard QEMU configuration and sets up a MicroVM machine
-	// (https://github.com/qemu/qemu/blob/master/docs/microvm.rst) with all legacy emulation turned off. This means
-	// the only "hardware" the Linux kernel inside can communicate with is a single virtio-mmio region. Over that MMIO
-	// interface we run a paravirtualized RNG (since the kernel in there has nothing to gather that from and it
-	// delays booting), a single paravirtualized console and an arbitrary number of extra serial ports for talking to
-	// various things that might run inside. The kernel, initramfs and command line are mapped into VM memory at boot
-	// time and not loaded from any sort of disk. Booting and shutting off one of these VMs takes <100ms.
+	// This sets up a minimum viable environment for our Linux kernel. It clears all
+	// standard QEMU configuration and sets up a MicroVM machine
+	// (https://github.com/qemu/qemu/blob/master/docs/microvm.rst) with all legacy
+	// emulation turned off. This means the only "hardware" the Linux kernel inside can
+	// communicate with is a single virtio-mmio region. Over that MMIO interface we run
+	// a paravirtualized RNG (since the kernel in there has nothing to gather that from
+	// and it delays booting), a single paravirtualized console and an arbitrary number
+	// of extra serial ports for talking to various things that might run inside. The
+	// kernel, initramfs and command line are mapped into VM memory at boot time and
+	// not loaded from any sort of disk. Booting and shutting off one of these VMs
+	// takes <100ms.
 	baseArgs := []string{"-nodefaults", "-no-user-config", "-nographic", "-no-reboot",
 		"-accel", "kvm", "-cpu", "host",
-		// Needed until QEMU updates their bundled qboot version (needs https://github.com/bonzini/qboot/pull/28)
+		// Needed until QEMU updates their bundled qboot version (needs
+		// https://github.com/bonzini/qboot/pull/28)
 		"-bios", "external/com_github_bonzini_qboot/bios.bin",
 		"-M", "microvm,x-option-roms=off,pic=off,pit=off,rtc=off,isa-serial=off",
 		"-kernel", opts.KernelPath,
-		// We force using a triple-fault reboot strategy since otherwise the kernel first tries others (like ACPI) which
-		// are not available in this very restricted environment. Similarly we need to override the boot console since
-		// there's nothing on the ISA bus that the kernel could talk to. We also force quiet for performance reasons.
+		// We force using a triple-fault reboot strategy since otherwise the kernel first
+		// tries others (like ACPI) which are not available in this very restricted
+		// environment. Similarly we need to override the boot console since there's
+		// nothing on the ISA bus that the kernel could talk to. We also force quiet for
+		// performance reasons.
 		"-append", "reboot=t console=hvc0 quiet " + opts.Cmdline,
 		"-initrd", opts.InitramfsPath,
 		"-device", "virtio-rng-device,max-bytes=1024,period=1000",
@@ -457,8 +486,8 @@
 	return err
 }
 
-// QEMUError is a special type of ExitError used when QEMU fails. In addition to normal ExitError features it
-// prints stderr for debugging.
+// QEMUError is a special type of ExitError used when QEMU fails. In addition to
+// normal ExitError features it prints stderr for debugging.
 type QEMUError exec.ExitError
 
 func (e *QEMUError) Error() string {
@@ -478,7 +507,8 @@
 	NumNodes int
 }
 
-// LaunchCluster launches a cluster of Metropolis node VMs together with a Nanoswitch instance to network them all together.
+// LaunchCluster launches a cluster of Metropolis node VMs together with a
+// Nanoswitch instance to network them all together.
 func LaunchCluster(ctx context.Context, opts ClusterOptions) (apb.NodeDebugServiceClient, PortMap, error) {
 	var switchPorts []*os.File
 	var vmPorts []*os.File
@@ -509,9 +539,10 @@
 			},
 		}); err != nil {
 
-			// Launch() only terminates when QEMU has terminated. At that point our function probably doesn't run anymore
-			// so we have no way of communicating the error back up, so let's just log it. Also a failure in launching
-			// VMs should be very visible by the unavailability of the clients we return.
+			// Launch() only terminates when QEMU has terminated. At that point our function
+			// probably doesn't run anymore so we have no way of communicating the error back
+			// up, so let's just log it. Also a failure in launching VMs should be very visible
+			// by the unavailability of the clients we return.
 			log.Printf("Failed to launch vm0: %v", err)
 		}
 	}()
diff --git a/metropolis/test/nanoswitch/nanoswitch.go b/metropolis/test/nanoswitch/nanoswitch.go
index 3ab662b..21a526e 100644
--- a/metropolis/test/nanoswitch/nanoswitch.go
+++ b/metropolis/test/nanoswitch/nanoswitch.go
@@ -15,9 +15,11 @@
 // limitations under the License.
 
 // nanoswitch is a virtualized switch/router combo intended for testing.
-// It uses the first interface as an external interface to connect to the host and pass traffic in and out. All other
-// interfaces are switched together and served by a built-in DHCP server. Traffic from that network to the
-// SLIRP/external network is SNATed as the host-side SLIRP ignores routed packets.
+// It uses the first interface as an external interface to connect to the host
+// and pass traffic in and out. All other interfaces are switched together and
+// served by a built-in DHCP server. Traffic from that network to the
+// SLIRP/external network is SNATed as the host-side SLIRP ignores routed
+// packets.
 // It also has built-in userspace proxying support for debugging.
 package main
 
@@ -49,17 +51,21 @@
 var switchIP = net.IP{10, 1, 0, 1}
 var switchSubnetMask = net.CIDRMask(24, 32)
 
-// defaultLeaseOptions sets the lease options needed to properly configure connectivity to nanoswitch
+// defaultLeaseOptions sets the lease options needed to properly configure
+// connectivity to nanoswitch.
 func defaultLeaseOptions(reply *dhcpv4.DHCPv4) {
 	reply.GatewayIPAddr = switchIP
-	reply.UpdateOption(dhcpv4.OptDNS(net.IPv4(10, 42, 0, 3))) // SLIRP fake DNS server
+	// SLIRP fake DNS server.
+	reply.UpdateOption(dhcpv4.OptDNS(net.IPv4(10, 42, 0, 3)))
 	reply.UpdateOption(dhcpv4.OptRouter(switchIP))
-	reply.UpdateOption(dhcpv4.OptIPAddressLeaseTime(30 * time.Second)) // Make sure we exercise our DHCP client in E2E tests
+	// Make sure we exercise our DHCP client in E2E tests.
+	reply.UpdateOption(dhcpv4.OptIPAddressLeaseTime(30 * time.Second))
 	reply.UpdateOption(dhcpv4.OptSubnetMask(switchSubnetMask))
 }
 
-// runDHCPServer runs an extremely minimal DHCP server with most options hardcoded, a wrapping bump allocator for the
-// IPs, 30 second lease timeout and no support for DHCP collision detection.
+// runDHCPServer runs an extremely minimal DHCP server with most options
+// hardcoded, a wrapping bump allocator for the IPs, 30 second lease timeout
+// and no support for DHCP collision detection.
 func runDHCPServer(link netlink.Link) supervisor.Runnable {
 	currentIP := net.IP{10, 1, 0, 1}
 
@@ -114,7 +120,8 @@
 	}
 }
 
-// userspaceProxy listens on port and proxies all TCP connections to the same port on targetIP
+// userspaceProxy listens on port and proxies all TCP connections to the same
+// port on targetIP
 func userspaceProxy(targetIP net.IP, port uint16) supervisor.Runnable {
 	return func(ctx context.Context) error {
 		logger := supervisor.Logger(ctx)
@@ -172,7 +179,8 @@
 	return nil
 }
 
-// nfifname converts an interface name into 16 bytes padded with zeroes (for nftables)
+// nfifname converts an interface name into 16 bytes padded with zeroes (for
+// nftables)
 func nfifname(n string) []byte {
 	b := make([]byte, 16)
 	copy(b, []byte(n+"\x00"))
diff --git a/metropolis/vm/kube/apis/vm/v1alpha1/register.go b/metropolis/vm/kube/apis/vm/v1alpha1/register.go
index 3111d54..4eebae5 100644
--- a/metropolis/vm/kube/apis/vm/v1alpha1/register.go
+++ b/metropolis/vm/kube/apis/vm/v1alpha1/register.go
@@ -32,7 +32,8 @@
 	return SchemeGroupVersion.WithKind(kind).GroupKind()
 }
 
-// Resource takes an unqualified resource and returns a Group qualified GroupResource
+// Resource takes an unqualified resource and returns a Group qualified
+// GroupResource
 func Resource(resource string) schema.GroupResource {
 	return SchemeGroupVersion.WithResource(resource).GroupResource()
 }
@@ -40,7 +41,8 @@
 var (
 	// SchemeBuilder initializes a scheme builder
 	SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
-	// AddToScheme is a global function that registers this API group & version to a scheme
+	// AddToScheme is a global function that registers this API group & version
+	// to a scheme
 	AddToScheme = SchemeBuilder.AddToScheme
 )
 
diff --git a/metropolis/vm/smoketest/main.go b/metropolis/vm/smoketest/main.go
index d9ff7e3..a1bd3a2 100644
--- a/metropolis/vm/smoketest/main.go
+++ b/metropolis/vm/smoketest/main.go
@@ -14,8 +14,8 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// This is a small smoke test which will run in a container on top of Metropolis Kubernetes. It exercises Metropolis'
-// KVM device plugin,
+// This is a small smoke test which will run in a container on top of Metropolis
+// Kubernetes. It exercises Metropolis' KVM device plugin,
 package main
 
 import (