*: reflow comments to 80 characters

This reformats the entire Metropolis codebase to have comments no longer
than 80 characters, implementing CR/66.

This has been done half manually, as we don't have a good integration
between commentwrap/Bazel, but that can be implemented if we decide to
go for this tool/limit.

Change-Id: If1fff0b093ef806f5dc00551c11506e8290379d0
diff --git a/build/analysis/nogo_config.json b/build/analysis/nogo_config.json
index ae01f0a..e9054ca 100644
--- a/build/analysis/nogo_config.json
+++ b/build/analysis/nogo_config.json
@@ -91,7 +91,6 @@
   },
   "commentwrap": {
     "exclude_files": {
-      "dev_source_monogon/": "temporary until fixup CR",
       "external/": "third_party"
     }
   }
diff --git a/build/bazel_cc_fix/main.go b/build/bazel_cc_fix/main.go
index 244e849..1be47f7 100644
--- a/build/bazel_cc_fix/main.go
+++ b/build/bazel_cc_fix/main.go
@@ -14,17 +14,20 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// bazel_cc_fix rewrites include directives in C and C++ code. It rewrites all includes in the target workspace to be
-// workspace-relative and additionally supports rewriting includes via a prototxt-based spec file to for example
+// bazel_cc_fix rewrites include directives in C and C++ code. It rewrites all
+// includes in the target workspace to be workspace-relative and additionally
+// supports rewriting includes via a prototxt-based spec file to for example
 // fix up includes for external libraries.
-// The rewritten code can then be used in Bazel intra- and inter-workspace without dealing with any copts or include-
-// related attributes.
-// To know where an include would resolve to it expects a compilation database (see
-// https://clang.llvm.org/docs/JSONCompilationDatabase.html) as an input. It looks at all files in that database and
-// their transitive dependencies and rewrites all of them according to the include paths specified in the compilation
-// command from the database.
-// The compilation database itself is either generated by the original build system or by using intercept-build, which
-// intercepts calls to the compiler and records them into a compilation database.
+// The rewritten code can then be used in Bazel intra- and inter-workspace
+// without dealing with any copts or include- related attributes.
+// To know where an include would resolve to it expects a compilation database
+// (see https://clang.llvm.org/docs/JSONCompilationDatabase.html) as an input.
+// It looks at all files in that database and their transitive dependencies and
+// rewrites all of them according to the include paths specified in the
+// compilation command from the database.
+// The compilation database itself is either generated by the original build
+// system or by using intercept-build, which intercepts calls to the compiler
+// and records them into a compilation database.
 package main
 
 import (
@@ -44,8 +47,9 @@
 	"source.monogon.dev/build/bazel_cc_fix/ccfixspec"
 )
 
-// compilationDBEntry is a single entry from the compilation database which represents a single compiler invocation on
-// a C/C++ source file. It contains the compiler working directory, arguments and input file path.
+// compilationDBEntry is a single entry from the compilation database which
+// represents a single compiler invocation on a C/C++ source file. It contains
+// the compiler working directory, arguments and input file path.
 type compilationDBEntry struct {
 	Directory string   `json:"directory"`
 	Command   string   `json:"command"`
@@ -54,15 +58,18 @@
 	Output    string   `json:"output"`
 }
 
-// compilationDB is a collection of compilationDBEntries usually stored in a big JSON-serialized document.
+// compilationDB is a collection of compilationDBEntries usually stored in a
+// big JSON-serialized document.
 // https://clang.llvm.org/docs/JSONCompilationDatabase.html
 type compilationDB []compilationDBEntry
 
-// rewrites represents a list of include rewrites with the key being the original include statement
-// (like "#include <xyz.h>", with whitespace trimmed on both sides) and the value being another
+// rewrites represents a list of include rewrites with the key being the
+// original include statement (like "#include <xyz.h>", with whitespace trimmed
+// on both sides) and the value being another
 type rewrites map[string]string
 
-// replacer returns a strings.Replacer which efficiently performs all replacements in a single pass
+// replacer returns a strings.Replacer which efficiently performs all
+// replacements in a single pass
 func (r rewrites) replacer() *strings.Replacer {
 	var replacerArgs []string
 	for from, to := range r {
@@ -107,9 +114,10 @@
 	reIncludeDirective = regexp.MustCompile(`(?m:^\s*#\s*include\s*([<"])(.*)([>"]))`)
 )
 
-// applyReplaceDirectives applies all directives of the given replaceType in directives to originalPath and returns the
-// resulting string. If returnUnmodified is unset, it returns an empty string when no replacements were performed,
-// otherwise it returns the unmodified originalPath.
+// applyReplaceDirectives applies all directives of the given replaceType in
+// directives to originalPath and returns the resulting string. If
+// returnUnmodified is unset, it returns an empty string when no replacements
+// were performed, otherwise it returns the unmodified originalPath.
 // The first rewrite wins, it does not do any recursive processing.
 func applyReplaceDirectives(directives []*ccfixspec.Replace, replaceType ccfixspec.Replace_Type, originalPath string, returnUnmodified bool) string {
 	for _, d := range directives {
@@ -128,9 +136,11 @@
 	return ""
 }
 
-// findFileInWorkspace takes a path from a C include directive and uses the given search path to find its absolute
-// path. If that absolute path is outside the workspace, it returns an empty string, otherwise it returns the path
-// of the file relative to the workspace. It pretends that all files in isGeneratedFile exist on the filesystem.
+// findFileInWorkspace takes a path from a C include directive and uses the
+// given search path to find its absolute path. If that absolute path is
+// outside the workspace, it returns an empty string, otherwise it returns the
+// path of the file relative to the workspace. It pretends that all files in
+// isGeneratedFile exist on the filesystem.
 func findFileInWorkspace(searchPath []string, inclFile string, isGeneratedFile map[string]bool) string {
 	var inclPath string
 	for _, path := range searchPath {
@@ -145,12 +155,13 @@
 		}
 	}
 	if inclPath == "" {
-		// We haven't found the included file. This can happen for system includes (<stdio.h>) or includes from
-		// other operating systems.
+		// We haven't found the included file. This can happen for system
+		// includes (<stdio.h>) or includes from other operating systems.
 		return ""
 	}
 
-	// Ignore all include directives that don't resolve into our workspace after processing
+	// Ignore all include directives that don't resolve into our workspace
+	// after processing
 	if !filepath.HasPrefix(inclPath, *workspacePath) {
 		return ""
 	}
@@ -162,8 +173,9 @@
 	return workspaceRelativeFilePath
 }
 
-// fixIncludesAndGetRefs opens a file, looks at all its includes, records rewriting data into rewriteMetadata and
-// returns all files included by the file for further analysis.
+// fixIncludesAndGetRefs opens a file, looks at all its includes, records
+// rewriting data into rewriteMetadata and returns all files included by the
+// file for further analysis.
 func (m rewriteMetadata) fixIncludesAndGetRefs(filePath string, quoteIncludes, systemIncludes []string, spec *ccfixspec.CCFixSpec, isGeneratedFile map[string]bool) []string {
 	meta, ok := m[filePath]
 	if !ok {
@@ -206,21 +218,24 @@
 			includeFiles = append(includeFiles, filepath.Join(*workspacePath, workspaceRelativeFilePath))
 		}
 
-		// Pretend that a generated file exists at the given path when stripping the BuildDir prefix. This is
-		// generally true for all out-of-tree build systems and saves the user from needing to manually specify
-		// lots of GeneratedFiles.
+		// Pretend that a generated file exists at the given path when
+		// stripping the BuildDir prefix. This is generally true for all
+		// out-of-tree build systems and saves the user from needing to
+		// manually specify lots of GeneratedFiles.
 		if spec.BuildDir != "" && filepath.HasPrefix(workspaceRelativeFilePath, spec.BuildDir+"/") {
 			workspaceRelativeFilePath = filepath.Clean(strings.TrimPrefix(workspaceRelativeFilePath, spec.BuildDir+"/"))
 			foundGenerated = true
 		}
 
-		// Shorten include paths when both files are in the same directory except when a generated file is involved
-		// as these end up in physically different locations and need to be referenced using a full workspace-
-		// relative path
+		// Shorten include paths when both files are in the same directory
+		// except when a generated file is involved as these end up in
+		// physically different locations and need to be referenced using a
+		// full workspace- relative path
 		if !foundGenerated && filepath.Dir(filePath) == filepath.Dir(filepath.Join(*workspacePath, workspaceRelativeFilePath)) {
 			workspaceRelativeFilePath = filepath.Base(workspaceRelativeFilePath)
 		}
-		// Don't perform rewrites when both include directives are semantically equivalent
+		// Don't perform rewrites when both include directives are semantically
+		// equivalent
 		if workspaceRelativeFilePath == inclFile && inclType == "\"" {
 			continue
 		}
@@ -229,7 +244,8 @@
 	return includeFiles
 }
 
-// getIncludeDirs takes a compilation database entry and returns the search paths for both system and quote includes
+// getIncludeDirs takes a compilation database entry and returns the search
+// paths for both system and quote includes
 func getIncludeDirs(entry compilationDBEntry) (quoteIncludes []string, systemIncludes []string, err error) {
 	// Normalize arguments
 	if len(entry.Arguments) == 0 {
@@ -325,7 +341,8 @@
 
 	rewriteMetadata := make(rewriteMetadata)
 
-	// Iterate over all source files in the compilation database and analyze them one-by-one
+	// Iterate over all source files in the compilation database and analyze
+	// them one-by-one
 	for _, entry := range compilationDB {
 		quoteIncludes, systemIncludes, err := getIncludeDirs(entry)
 		if err != nil {
diff --git a/build/fietsje/dependency.go b/build/fietsje/dependency.go
index 6b75000..709b457 100644
--- a/build/fietsje/dependency.go
+++ b/build/fietsje/dependency.go
@@ -27,18 +27,21 @@
 	"github.com/bazelbuild/bazel-gazelle/label"
 )
 
-// dependency is an external Go package/module, requested by the user of Fietsje directly or indirectly.
+// dependency is an external Go package/module, requested by the user of Fietsje
+// directly or indirectly.
 type dependency struct {
 	// importpath is the Go import path that was used to import this dependency.
 	importpath string
-	// version at which this dependency has been requested. This can be in any form that `go get` or the go module
-	// system understands.
+	// version at which this dependency has been requested. This can be in any form
+	// that `go get` or the go module system understands.
 	version string
 
-	// locked is the 'resolved' version of a dependency, containing information about the dependency's hash, etc.
+	// locked is the 'resolved' version of a dependency, containing information about
+	// the dependency's hash, etc.
 	locked *locked
 
-	// parent is the dependency that pulled in this one, or nil if pulled in by the user.
+	// parent is the dependency that pulled in this one, or nil if pulled in by the
+	// user.
 	parent *dependency
 
 	shelf *shelf
@@ -50,9 +53,10 @@
 	patches              []string
 	prePatches           []string
 	buildExtraArgs       []string
-	// replace is an importpath that this dependency will replace. If this is set, this dependency will be visible
-	// in the build as 'importpath', but downloaded at 'replace'/'version'. This might be slighly confusing, but
-	// follows the semantics of what Gazelle exposes via 'replace' in 'go_repository'.
+	// replace is an importpath that this dependency will replace. If this is set, this
+	// dependency will be visible in the build as 'importpath', but downloaded at
+	// 'replace'/'version'. This might be slighly confusing, but follows the semantics
+	// of what Gazelle exposes via 'replace' in 'go_repository'.
 	replace string
 }
 
@@ -63,20 +67,24 @@
 	return d.importpath
 }
 
-// locked is information about a dependency resolved from the go module system. It is expensive to get, and as such
-// it is cached both in memory (as .locked in a dependency) and in the shelf.
+// locked is information about a dependency resolved from the go module system. It
+// is expensive to get, and as such it is cached both in memory (as .locked in a
+// dependency) and in the shelf.
 type locked struct {
-	// bazelName is the external workspace name that Bazel should use for this dependency, eg. com_github_google_glog.
+	// bazelName is the external workspace name that Bazel should use for this
+	// dependency, eg. com_github_google_glog.
 	bazelName string
-	// sum is the gomod compatible checksum of the depdendency, egh1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=.
+	// sum is the gomod compatible checksum of the depdendency,
+	// egh1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=.
 	sum string
-	// semver is the gomod-compatible version of this dependency. If the dependency was requested by git hash that does
-	// not resolve to a particular release, this will be in the form of v0.0.0-20200520133742-deadbeefcafe.
+	// semver is the gomod-compatible version of this dependency. If the dependency was
+	// requested by git hash that does not resolve to a particular release, this will
+	// be in the form of v0.0.0-20200520133742-deadbeefcafe.
 	semver string
 }
 
-// child creates a new child dependence for this dependency, ie. one where the 'parent' pointer points to the dependency
-// on which this method is called.
+// child creates a new child dependence for this dependency, ie. one where the
+// 'parent' pointer points to the dependency on which this method is called.
 func (d *dependency) child(importpath, version string) *dependency {
 	return &dependency{
 		importpath: importpath,
@@ -93,8 +101,9 @@
 	return fmt.Sprintf("%s@%s", d.importpath, d.version)
 }
 
-// lock ensures that this dependency is locked, which means that it has been resolved to a particular, stable version
-// and VCS details. We lock a dependency by either asking the go module subsystem (via a go module proxy or a download),
+// lock ensures that this dependency is locked, which means that it has been
+// resolved to a particular, stable version and VCS details. We lock a dependency
+// by either asking the go module subsystem (via a go module proxy or a download),
 // or by consulting the shelf as a cache.
 func (d *dependency) lock() error {
 	// If already locked in-memory, use that.
@@ -133,8 +142,8 @@
 	return fmt.Sprintf("%s@%s", l.bazelName, l.sum)
 }
 
-// download ensures that this dependency is download locally, and returns the download location and the dependency's
-// gomod-compatible sum.
+// download ensures that this dependency is download locally, and returns the
+// download location and the dependency's gomod-compatible sum.
 func (d *dependency) download() (version, dir, sum string, err error) {
 	goroot := os.Getenv("GOROOT")
 	if goroot == "" {
diff --git a/build/fietsje/deps_containerd.go b/build/fietsje/deps_containerd.go
index 6154960..99d36f6 100644
--- a/build/fietsje/deps_containerd.go
+++ b/build/fietsje/deps_containerd.go
@@ -106,11 +106,11 @@
 		// not yet merged by upstream.
 		// See: https://github.com/containerd/ttrpc/pull/67
 		//
-		// It also contains our own fix that builds up on the above and allows
-		// services to return the original status error library values. This is
-		// required for ttrpc to actually work from runsc and for results to be
-		// correctly interpreted by containerd.
-		// See: https://github.com/monogon-dev/ttrpc/commit/222b428f008e3ecb11cfff12e3fd92e3143a2f01
+		// It also contains our own fix that builds up on the above and allows services to
+		// return the original status error library values. This is required for ttrpc to
+		// actually work from runsc and for results to be correctly interpreted by
+		// containerd. See:
+		//   https://github.com/monogon-dev/ttrpc/commit/222b428f008e3ecb11cfff12e3fd92e3143a2f01
 		//
 		// Note: this is not a good fix, and has known issues, like not being
 		// able to return Details in gRPC status errors. However, with the
@@ -121,10 +121,10 @@
 		"github.com/monogon-dev/ttrpc", "222b428f008e3ecb11cfff12e3fd92e3143a2f01",
 	)
 
-	// This is depended on by github.com/containerd/containerd, but not mentioned in their
-	// vendor.conf. They seem to be moving off of vendoring to gomod, so this should be
-	// reverted on the next containerd bump (when fietsje will panic about vendor.conf
-	// missing).
+	// This is depended on by github.com/containerd/containerd, but not mentioned in
+	// their vendor.conf. They seem to be moving off of vendoring to gomod, so this
+	// should be reverted on the next containerd bump (when fietsje will panic about
+	// vendor.conf missing).
 	p.collectOverride(
 		"github.com/checkpoint-restore/go-criu/v4", "v4.1.0",
 	)
diff --git a/build/fietsje/deps_gvisor.go b/build/fietsje/deps_gvisor.go
index 3209aa4..3414e4c 100644
--- a/build/fietsje/deps_gvisor.go
+++ b/build/fietsje/deps_gvisor.go
@@ -32,6 +32,7 @@
 		"github.com/mohae/deepcopy",
 		"golang.org/x/time",
 	)
-	// gRPC is used by gvisor's bazel machinery, but not present in go.sum. Include it manually.
+	// gRPC is used by gvisor's bazel machinery, but not present in go.sum. Include it
+	// manually.
 	p.collect("github.com/grpc/grpc", "v1.29.1")
 }
diff --git a/build/fietsje/main.go b/build/fietsje/main.go
index 50055ab..4473483 100644
--- a/build/fietsje/main.go
+++ b/build/fietsje/main.go
@@ -53,8 +53,8 @@
 		shelf: shelf,
 	}
 
-	// Currently can't bump past v1.30.0, as that removes the old balancer.Picker API that
-	// go-etcd depends upon. See https://github.com/etcd-io/etcd/pull/12398 .
+	// Currently can't bump past v1.30.0, as that removes the old balancer.Picker API
+	// that go-etcd depends upon. See https://github.com/etcd-io/etcd/pull/12398 .
 	p.collect(
 		"google.golang.org/grpc", "v1.29.1",
 	).use(
@@ -147,8 +147,9 @@
 		"github.com/muesli/reflow",
 	)
 
-	// First generate the repositories starlark rule into memory. This is because rendering will lock all unlocked
-	// dependencies, which might take a while. If a use were to interrupt it now, they would end up with an incomplete
+	// First generate the repositories starlark rule into memory. This is because
+	// rendering will lock all unlocked dependencies, which might take a while. If a
+	// use were to interrupt it now, they would end up with an incomplete
 	// repositories.bzl and would have to restore from git.
 	buf := bytes.NewBuffer(nil)
 	err = p.render(buf)
diff --git a/build/fietsje/planner.go b/build/fietsje/planner.go
index d006039..be955cf 100644
--- a/build/fietsje/planner.go
+++ b/build/fietsje/planner.go
@@ -20,28 +20,31 @@
 	"fmt"
 )
 
-// The Planner provides the main DSL and high-level control logic for resolving dependencies. It is the main API that
-// fietsje users should consume.
+// The Planner provides the main DSL and high-level control logic for resolving
+// dependencies. It is the main API that fietsje users should consume.
 
-// planner is a builder for a single world of Go package dependencies, and what is then emitted into a Starlark file
-// containing gazelle go_repository rules.
-// The planner's builder system covers three increasingly specific contextx:
+// planner is a builder for a single world of Go package dependencies, and what is
+// then emitted into a Starlark file containing gazelle go_repository rules. The
+// planner's builder system covers three increasingly specific contextx:
 //  - planner (this structure, allows for 'collecting' in high-level dependencies. ie. collections)
 //  - collection (represents what has been pulled in by a high-level dependency, and allows for 'using' transitive
 //    dependencies from a collection)
 //  - optionized (represents a collection with extra build flags, eg. disabled proto builds)
 type planner struct {
-	// available is a map of importpaths to dependencies that the planner knows. This is a flat structure that is the
-	// main source of truth of actual dependency data, like a registry of everything that the planner knows about.
-	// The available dependency for a given importpath, as the planner progresses, might change, ie. when there is a
-	// version conflict. As such, code should use importpaths as atoms describing dependencies, instead of holding
-	// dependency pointers.
+	// available is a map of importpaths to dependencies that the planner knows. This
+	// is a flat structure that is the main source of truth of actual dependency data,
+	// like a registry of everything that the planner knows about. The available
+	// dependency for a given importpath, as the planner progresses, might change, ie.
+	// when there is a version conflict. As such, code should use importpaths as atoms
+	// describing dependencies, instead of holding dependency pointers.
 	available map[string]*dependency
-	// enabled is a map of dependencies that will be emitted by the planner into the build via Gazelle.
+	// enabled is a map of dependencies that will be emitted by the planner into the
+	// build via Gazelle.
 	enabled map[string]bool
-	// seen is a map of 'dependency' -> 'parent' importpaths, ie. returns what higher-level dependency (ie. one enabled
-	// with .collect()) pulled in a given dependency. This is only used for error messages to help the user find what
-	// a transitive  dependency has been pulled in by.
+	// seen is a map of 'dependency' -> 'parent' importpaths, ie. returns what higher-
+	// level dependency (ie. one enabled with .collect()) pulled in a given dependency.
+	// This is only used for error messages to help the user find what a transitive
+	// dependency has been pulled in by.
 	seen map[string]string
 
 	shelf *shelf
@@ -89,7 +92,8 @@
 	}
 	// add transitive deps to 'available' map
 	for k, v := range td {
-		// skip dependencies that have already been enabled, dependencies are 'first enabled version wins'.
+		// skip dependencies that have already been enabled, dependencies are 'first
+		// enabled version wins'.
 		if _, ok := p.available[k]; ok && p.enabled[k] {
 			continue
 		}
@@ -107,8 +111,9 @@
 	}
 }
 
-// collection represents the context of the planner after pulling/collecting in a high-level dependency. In this state,
-// the planner can be used to enable transitive dependencies of the high-level dependency.
+// collection represents the context of the planner after pulling/collecting in a
+// high-level dependency. In this state, the planner can be used to enable
+// transitive dependencies of the high-level dependency.
 type collection struct {
 	p *planner
 
@@ -116,18 +121,21 @@
 	transitive map[string]*dependency
 }
 
-// use enables given dependencies defined in the collection by a high-level dependency.
+// use enables given dependencies defined in the collection by a high-level
+// dependency.
 func (c *collection) use(paths ...string) *collection {
 	return c.with().use(paths...)
 }
 
-// replace injects a new dependency with a replacement importpath. This is used to reflect 'replace' stanzas in go.mod
-// files of third-party dependencies. This is not done automatically by Fietsje, as a replacement is global to the
-// entire build tree, and should be done knowingly and explicitly by configuration. The 'oldpath' importpath will be
-// visible to the build system, but will be backed at 'newpath' locked at 'version'.
+// replace injects a new dependency with a replacement importpath. This is used to
+// reflect 'replace' stanzas in go.mod files of third-party dependencies. This is
+// not done automatically by Fietsje, as a replacement is global to the entire
+// build tree, and should be done knowingly and explicitly by configuration. The
+// 'oldpath' importpath will be visible to the build system, but will be backed at
+// 'newpath' locked at 'version'.
 func (c *collection) replace(oldpath, newpath, version string) *collection {
-	// Ensure oldpath is in use. We want as little replacements as possible, and if it's not being used by anything,
-	// it means that we likely don't need it.
+	// Ensure oldpath is in use. We want as little replacements as possible, and if
+	// it's not being used by anything, it means that we likely don't need it.
 	c.use(oldpath)
 
 	d := c.highlevel.child(oldpath, version)
@@ -139,9 +147,11 @@
 	return c
 }
 
-// inject adds a dependency to a collection as if requested by the high-level dependency of the collection. This should
-// be used sparingly, for instance when high-level dependencies contain bazel code that uses some external workspaces
-// from Go modules, and those workspaces are not defined in parsed transitive dependency definitions like go.mod/sum.
+// inject adds a dependency to a collection as if requested by the high-level
+// dependency of the collection. This should be used sparingly, for instance when
+// high-level dependencies contain bazel code that uses some external workspaces
+// from Go modules, and those workspaces are not defined in parsed transitive
+// dependency definitions like go.mod/sum.
 func (c *collection) inject(importpath, version string, opts ...buildOpt) *collection {
 	d := c.highlevel.child(importpath, version)
 	c.transitive[importpath] = d
@@ -162,8 +172,8 @@
 	}
 }
 
-// optionized is a collection that has some build options set, that will be applied to all dependencies 'used' in this
-// context
+// optionized is a collection that has some build options set, that will be applied
+// to all dependencies 'used' in this context
 type optionized struct {
 	c    *collection
 	opts []buildOpt
@@ -191,7 +201,8 @@
 	}
 }
 
-// prePatches applies patches in affected dependencies before BUILD file generation.
+// prePatches applies patches in affected dependencies before BUILD file
+// generation.
 func prePatches(patches ...string) buildOpt {
 	return func(d *dependency) {
 		d.prePatches = patches
@@ -208,8 +219,9 @@
 	}
 }
 
-// use enables given dependencies defined in the collection by a high-level dependency, with any set build options.
-// After returning, the builder degrades to a collection - ie, all build options are reset.
+// use enables given dependencies defined in the collection by a high-level
+// dependency, with any set build options. After returning, the builder degrades to
+// a collection - ie, all build options are reset.
 func (o *optionized) use(paths ...string) *collection {
 	for _, path := range paths {
 		el, ok := o.c.transitive[path]
diff --git a/build/fietsje/render.go b/build/fietsje/render.go
index 3374e8f..03e2857 100644
--- a/build/fietsje/render.go
+++ b/build/fietsje/render.go
@@ -22,7 +22,8 @@
 	"sort"
 )
 
-// render writes a gazelle-compatible starlark file based on the enabled dependencies in this planner.
+// render writes a gazelle-compatible starlark file based on the enabled
+// dependencies in this planner.
 func (p *planner) render(w io.Writer) error {
 	fmt.Fprintln(w, `load("@bazel_gazelle//:deps.bzl", "go_repository")`)
 	fmt.Fprintln(w, ``)
diff --git a/build/fietsje/shelf.go b/build/fietsje/shelf.go
index 99a06c5..963c139 100644
--- a/build/fietsje/shelf.go
+++ b/build/fietsje/shelf.go
@@ -29,17 +29,21 @@
 	pb "source.monogon.dev/build/fietsje/proto"
 )
 
-// The Shelf is a combined cache and dependency lockfile, not unlike go.sum. It's implemented as a text proto file on
-// disk, and currently stores a single mapping of shelfKeys to shelfValues, which are in order a (importpath, version)
-// tuple and the `locked` structure of a dependency.
-// The resulting shelf file should be commited to the monogon repository. It can be freely deleted to force recreation from
-// scratch, which can be useful as there is no garbage collection implemented for it.
-// The 'lockfile' aspect of the Shelf is counter-intuitive to what readers might be used to from other dependency
-// management systems. It does not lock a third-party dependency to a particular version, but only locks a well defined
-// version to its checksum. As such, recreating the shelf from scratch should not bump any dependencies, unless some
-// upstream-project retagged a release to a different VCS commit, or a fietsje user pinned to 'master' instead of a
-// particular commit. The effective changes will always be reflected in the resulting starlark repository ruleset,
-// which (also being commited to source control) can be used as a canary of a version being effectively bumped.
+// The Shelf is a combined cache and dependency lockfile, not unlike go.sum. It's
+// implemented as a text proto file on disk, and currently stores a single mapping
+// of shelfKeys to shelfValues, which are in order a (importpath, version) tuple
+// and the `locked` structure of a dependency. The resulting shelf file should be
+// commited to the monogon repository. It can be freely deleted to force recreation
+// from scratch, which can be useful as there is no garbage collection implemented
+// for it. The 'lockfile' aspect of the Shelf is counter-intuitive to what readers
+// might be used to from other dependency management systems. It does not lock a
+// third-party dependency to a particular version, but only locks a well defined
+// version to its checksum. As such, recreating the shelf from scratch should not
+// bump any dependencies, unless some upstream-project retagged a release to a
+// different VCS commit, or a fietsje user pinned to 'master' instead of a
+// particular commit. The effective changes will always be reflected in the
+// resulting starlark repository ruleset, which (also being commited to source
+// control) can be used as a canary of a version being effectively bumped.
 
 // shelfKey is the key into the shelf map structure.
 type shelfKey struct {
@@ -107,12 +111,14 @@
 	return res.l
 }
 
-// put stores a given locked entry in memory. This will not be commited to disk until .save() is called.
+// put stores a given locked entry in memory. This will not be commited to disk
+// until .save() is called.
 func (s *shelf) put(importpath, version string, l *locked) {
 	s.data[shelfKey{importpath: importpath, version: version}] = shelfValue{l: l}
 }
 
-// save commits the shelf to disk (to the same location it was loaded from), fully overwriting from in-memory data.
+// save commits the shelf to disk (to the same location it was loaded from), fully
+// overwriting from in-memory data.
 func (s *shelf) save() error {
 	// Build proto representation of shelf data.
 	var shelfProto pb.Shelf
diff --git a/build/fietsje/transitive.go b/build/fietsje/transitive.go
index f4a6e1d..e42199b 100644
--- a/build/fietsje/transitive.go
+++ b/build/fietsje/transitive.go
@@ -26,14 +26,14 @@
 	"golang.org/x/mod/modfile"
 )
 
-// getTransitiveDeps is a hairy ball of heuristic used to find all recursively transitive dependencies of a given
-// dependency.
-// It downloads a given dependency using `go get`, and performs analysis of standard (go.mod/go.sum) and project-
-// specific dependency management configuration/lock files in order to build a full view of all known, versioned
-// transitive dependencies.
+// getTransitiveDeps is a hairy ball of heuristic used to find all recursively
+// transitive dependencies of a given dependency. It downloads a given dependency
+// using `go get`, and performs analysis of standard (go.mod/go.sum) and project-
+// specific dependency management configuration/lock files in order to build a full
+// view of all known, versioned transitive dependencies.
 func (d *dependency) getTransitiveDeps() (map[string]*dependency, error) {
-	// First, lock the dependency. Downloading it later will also return a sum, and we want to ensure both are the
-	// same.
+	// First, lock the dependency. Downloading it later will also return a sum, and we
+	// want to ensure both are the same.
 	err := d.lock()
 	if err != nil {
 		return nil, fmt.Errorf("could not lock: %v", err)
@@ -127,9 +127,10 @@
 		}
 	}
 
-	// Special case: root Kubernetes repo - rewrite staging/ deps to k8s.io/ at correct versions, quit early.
-	// Kubernetes vendors all dependencies into vendor/, and also contains sub-projects (components) in staging/.
-	// This converts all staging dependencies into appropriately versioned k8s.io/<dep> paths.
+	// Special case: root Kubernetes repo - rewrite staging/ deps to k8s.io/ at correct
+	// versions, quit early. Kubernetes vendors all dependencies into vendor/, and also
+	// contains sub-projects (components) in staging/. This converts all staging
+	// dependencies into appropriately versioned k8s.io/<dep> paths.
 	if d.importpath == "k8s.io/kubernetes" {
 		log.Printf("%q: special case for Kubernetes main repository", d.importpath)
 		if mf == nil {