treewide: introduce osbase package and move things around

All except localregistry moved from metropolis/pkg to osbase,
localregistry moved to metropolis/test as its only used there anyway.

Change-Id: If1a4bf377364bef0ac23169e1b90379c71b06d72
Reviewed-on: https://review.monogon.dev/c/monogon/+/3079
Tested-by: Jenkins CI
Reviewed-by: Serge Bazanski <serge@monogon.tech>
diff --git a/metropolis/cli/dbg/BUILD.bazel b/metropolis/cli/dbg/BUILD.bazel
index 46ac991..39f19a3 100644
--- a/metropolis/cli/dbg/BUILD.bazel
+++ b/metropolis/cli/dbg/BUILD.bazel
@@ -6,9 +6,9 @@
     importpath = "source.monogon.dev/metropolis/cli/dbg",
     visibility = ["//visibility:private"],
     deps = [
-        "//metropolis/pkg/logtree",
         "//metropolis/proto/api",
         "//metropolis/proto/common",
+        "//osbase/logtree",
         "@io_k8s_cli_runtime//pkg/genericclioptions",
         "@io_k8s_component_base//cli",
         "@io_k8s_kubectl//pkg/cmd",
diff --git a/metropolis/cli/dbg/main.go b/metropolis/cli/dbg/main.go
index f087313..3bb762e 100644
--- a/metropolis/cli/dbg/main.go
+++ b/metropolis/cli/dbg/main.go
@@ -32,7 +32,7 @@
 	"k8s.io/kubectl/pkg/cmd/plugin"
 	"k8s.io/kubectl/pkg/cmd/util"
 
-	"source.monogon.dev/metropolis/pkg/logtree"
+	"source.monogon.dev/osbase/logtree"
 
 	apb "source.monogon.dev/metropolis/proto/api"
 	cpb "source.monogon.dev/metropolis/proto/common"
diff --git a/metropolis/cli/metroctl/BUILD.bazel b/metropolis/cli/metroctl/BUILD.bazel
index 37f4b56..e390ff8 100644
--- a/metropolis/cli/metroctl/BUILD.bazel
+++ b/metropolis/cli/metroctl/BUILD.bazel
@@ -46,12 +46,12 @@
         "//metropolis/node/core/identity",
         "//metropolis/node/core/rpc",
         "//metropolis/node/core/rpc/resolver",
-        "//metropolis/pkg/blkio",
-        "//metropolis/pkg/fat32",
-        "//metropolis/pkg/logtree",
-        "//metropolis/pkg/logtree/proto",
         "//metropolis/proto/api",
         "//metropolis/proto/common",
+        "//osbase/blkio",
+        "//osbase/fat32",
+        "//osbase/logtree",
+        "//osbase/logtree/proto",
         "//version",
         "@com_github_adrg_xdg//:xdg",
         "@com_github_spf13_cobra//:cobra",
diff --git a/metropolis/cli/metroctl/cmd_install.go b/metropolis/cli/metroctl/cmd_install.go
index fc463b4..992da3e 100644
--- a/metropolis/cli/metroctl/cmd_install.go
+++ b/metropolis/cli/metroctl/cmd_install.go
@@ -17,8 +17,8 @@
 	cpb "source.monogon.dev/metropolis/proto/common"
 
 	"source.monogon.dev/metropolis/cli/metroctl/core"
-	"source.monogon.dev/metropolis/pkg/blkio"
-	"source.monogon.dev/metropolis/pkg/fat32"
+	"source.monogon.dev/osbase/blkio"
+	"source.monogon.dev/osbase/fat32"
 )
 
 var installCmd = &cobra.Command{
diff --git a/metropolis/cli/metroctl/cmd_node_logs.go b/metropolis/cli/metroctl/cmd_node_logs.go
index 77dd8c2..5b8cef9 100644
--- a/metropolis/cli/metroctl/cmd_node_logs.go
+++ b/metropolis/cli/metroctl/cmd_node_logs.go
@@ -8,10 +8,10 @@
 	"github.com/spf13/cobra"
 
 	"source.monogon.dev/metropolis/cli/metroctl/core"
-	"source.monogon.dev/metropolis/pkg/logtree"
-	lpb "source.monogon.dev/metropolis/pkg/logtree/proto"
 	"source.monogon.dev/metropolis/proto/api"
 	cpb "source.monogon.dev/metropolis/proto/common"
+	"source.monogon.dev/osbase/logtree"
+	lpb "source.monogon.dev/osbase/logtree/proto"
 )
 
 type metroctlLogFlags struct {
diff --git a/metropolis/cli/metroctl/core/BUILD.bazel b/metropolis/cli/metroctl/core/BUILD.bazel
index 1795765..36f02c3 100644
--- a/metropolis/cli/metroctl/core/BUILD.bazel
+++ b/metropolis/cli/metroctl/core/BUILD.bazel
@@ -16,10 +16,10 @@
         "//metropolis/node/core/curator/proto/api",
         "//metropolis/node/core/rpc",
         "//metropolis/node/core/rpc/resolver",
-        "//metropolis/pkg/blockdev",
-        "//metropolis/pkg/fat32",
-        "//metropolis/pkg/gpt",
         "//metropolis/proto/api",
+        "//osbase/blockdev",
+        "//osbase/fat32",
+        "//osbase/gpt",
         "@io_k8s_client_go//pkg/apis/clientauthentication/v1:clientauthentication",
         "@io_k8s_client_go//tools/clientcmd",
         "@io_k8s_client_go//tools/clientcmd/api",
diff --git a/metropolis/cli/metroctl/core/install.go b/metropolis/cli/metroctl/core/install.go
index 223e773..4175e22 100644
--- a/metropolis/cli/metroctl/core/install.go
+++ b/metropolis/cli/metroctl/core/install.go
@@ -9,10 +9,10 @@
 
 	"google.golang.org/protobuf/proto"
 
-	"source.monogon.dev/metropolis/pkg/blockdev"
-	"source.monogon.dev/metropolis/pkg/fat32"
-	"source.monogon.dev/metropolis/pkg/gpt"
 	"source.monogon.dev/metropolis/proto/api"
+	"source.monogon.dev/osbase/blockdev"
+	"source.monogon.dev/osbase/fat32"
+	"source.monogon.dev/osbase/gpt"
 )
 
 type MakeInstallerImageArgs struct {
diff --git a/metropolis/cli/metroctl/test/BUILD.bazel b/metropolis/cli/metroctl/test/BUILD.bazel
index 23bbc4a..9b9c4cc 100644
--- a/metropolis/cli/metroctl/test/BUILD.bazel
+++ b/metropolis/cli/metroctl/test/BUILD.bazel
@@ -13,10 +13,10 @@
     rundir = ".",
     deps = [
         "//metropolis/node",
-        "//metropolis/pkg/cmd",
-        "//metropolis/test/launch/cluster",
+        "//metropolis/test/launch",
         "//metropolis/test/util",
         "//metropolis/version",
+        "//osbase/cmd",
         "//version",
         "@io_bazel_rules_go//go/runfiles:go_default_library",
     ],
@@ -28,10 +28,10 @@
     importpath = "source.monogon.dev/metropolis/cli/metroctl/test",
     visibility = ["//visibility:private"],
     deps = [
-        "//metropolis/pkg/cmd",
-        "//metropolis/test/launch/cluster",
+        "//metropolis/test/launch",
         "//metropolis/test/util",
         "//metropolis/version",
+        "//osbase/cmd",
         "//version",
         "@io_bazel_rules_go//go/runfiles:go_default_library",
     ],
diff --git a/metropolis/cli/metroctl/test/test.go b/metropolis/cli/metroctl/test/test.go
index 7b31918..f601811 100644
--- a/metropolis/cli/metroctl/test/test.go
+++ b/metropolis/cli/metroctl/test/test.go
@@ -15,9 +15,9 @@
 
 	mversion "source.monogon.dev/metropolis/version"
 
-	"source.monogon.dev/metropolis/pkg/cmd"
-	"source.monogon.dev/metropolis/test/launch/cluster"
+	mlaunch "source.monogon.dev/metropolis/test/launch"
 	"source.monogon.dev/metropolis/test/util"
+	"source.monogon.dev/osbase/cmd"
 	"source.monogon.dev/version"
 )
 
@@ -95,10 +95,10 @@
 	ctx, ctxC := context.WithCancel(context.Background())
 	defer ctxC()
 
-	co := cluster.ClusterOptions{
+	co := mlaunch.ClusterOptions{
 		NumNodes: 2,
 	}
-	cl, err := cluster.LaunchCluster(context.Background(), co)
+	cl, err := mlaunch.LaunchCluster(context.Background(), co)
 	if err != nil {
 		t.Fatalf("LaunchCluster failed: %v", err)
 	}
@@ -109,7 +109,7 @@
 		}
 	}()
 
-	socksRemote := fmt.Sprintf("localhost:%d", cl.Ports[cluster.SOCKSPort])
+	socksRemote := fmt.Sprintf("localhost:%d", cl.Ports[mlaunch.SOCKSPort])
 	var clusterEndpoints []string
 	// Use node starting order for endpoints
 	for _, ep := range cl.NodeIDs {
@@ -118,7 +118,7 @@
 
 	ownerPem := pem.EncodeToMemory(&pem.Block{
 		Type:  "METROPOLIS INITIAL OWNER PRIVATE KEY",
-		Bytes: cluster.InsecurePrivateKey,
+		Bytes: mlaunch.InsecurePrivateKey,
 	})
 	if err := os.WriteFile("owner-key.pem", ownerPem, 0644); err != nil {
 		log.Fatal("Couldn't write owner-key.pem")
diff --git a/metropolis/installer/BUILD.bazel b/metropolis/installer/BUILD.bazel
index f94d134..a0d1993 100644
--- a/metropolis/installer/BUILD.bazel
+++ b/metropolis/installer/BUILD.bazel
@@ -16,9 +16,9 @@
     visibility = ["//visibility:private"],
     deps = [
         "//metropolis/node/build/mkimage/osimage",
-        "//metropolis/pkg/blockdev",
-        "//metropolis/pkg/efivarfs",
-        "//metropolis/pkg/sysfs",
+        "//osbase/blockdev",
+        "//osbase/efivarfs",
+        "//osbase/sysfs",
         "@org_golang_x_sys//unix",
     ],
 )
diff --git a/metropolis/installer/main.go b/metropolis/installer/main.go
index a35a734..0ae7dd6 100644
--- a/metropolis/installer/main.go
+++ b/metropolis/installer/main.go
@@ -35,9 +35,9 @@
 	"golang.org/x/sys/unix"
 
 	"source.monogon.dev/metropolis/node/build/mkimage/osimage"
-	"source.monogon.dev/metropolis/pkg/blockdev"
-	"source.monogon.dev/metropolis/pkg/efivarfs"
-	"source.monogon.dev/metropolis/pkg/sysfs"
+	"source.monogon.dev/osbase/blockdev"
+	"source.monogon.dev/osbase/efivarfs"
+	"source.monogon.dev/osbase/sysfs"
 )
 
 //go:embed metropolis/node/core/abloader/abloader_bin.efi
diff --git a/metropolis/installer/test/BUILD.bazel b/metropolis/installer/test/BUILD.bazel
index 0f3583a..7f71218 100644
--- a/metropolis/installer/test/BUILD.bazel
+++ b/metropolis/installer/test/BUILD.bazel
@@ -22,8 +22,8 @@
     deps = [
         "//metropolis/cli/metroctl/core",
         "//metropolis/node/build/mkimage/osimage",
-        "//metropolis/pkg/cmd",
         "//metropolis/proto/api",
+        "//osbase/cmd",
         "@com_github_diskfs_go_diskfs//:go-diskfs",
         "@com_github_diskfs_go_diskfs//disk",
         "@com_github_diskfs_go_diskfs//partition/gpt",
diff --git a/metropolis/installer/test/main.go b/metropolis/installer/test/main.go
index b2fb8dc..f2c1f46 100644
--- a/metropolis/installer/test/main.go
+++ b/metropolis/installer/test/main.go
@@ -39,7 +39,7 @@
 
 	mctl "source.monogon.dev/metropolis/cli/metroctl/core"
 	"source.monogon.dev/metropolis/node/build/mkimage/osimage"
-	"source.monogon.dev/metropolis/pkg/cmd"
+	"source.monogon.dev/osbase/cmd"
 )
 
 // Each variable in this block points to either a test dependency or a side
diff --git a/metropolis/node/build/fwprune/BUILD.bazel b/metropolis/node/build/fwprune/BUILD.bazel
index bc22665..e29ac7e 100644
--- a/metropolis/node/build/fwprune/BUILD.bazel
+++ b/metropolis/node/build/fwprune/BUILD.bazel
@@ -7,7 +7,7 @@
     visibility = ["//visibility:private"],
     deps = [
         "//metropolis/node/build/fsspec",
-        "//metropolis/pkg/kmod",
+        "//osbase/kmod",
         "@org_golang_google_protobuf//encoding/prototext",
         "@org_golang_google_protobuf//proto",
     ],
diff --git a/metropolis/node/build/fwprune/main.go b/metropolis/node/build/fwprune/main.go
index 4f26fa0..e76e5d0 100644
--- a/metropolis/node/build/fwprune/main.go
+++ b/metropolis/node/build/fwprune/main.go
@@ -20,7 +20,7 @@
 	"google.golang.org/protobuf/proto"
 
 	"source.monogon.dev/metropolis/node/build/fsspec"
-	"source.monogon.dev/metropolis/pkg/kmod"
+	"source.monogon.dev/osbase/kmod"
 )
 
 // linkRegexp parses the Link: lines in the WHENCE file. This does not have
diff --git a/metropolis/node/build/kconfig-patcher/BUILD.bazel b/metropolis/node/build/kconfig-patcher/BUILD.bazel
index e838f9e..278db21 100644
--- a/metropolis/node/build/kconfig-patcher/BUILD.bazel
+++ b/metropolis/node/build/kconfig-patcher/BUILD.bazel
@@ -12,7 +12,7 @@
     embed = [":kconfig-patcher_lib"],
     visibility = [
         "//metropolis/node:__pkg__",
-        "//metropolis/test/ktest:__pkg__",
+        "//osbase/test/ktest:__pkg__",
     ],
 )
 
diff --git a/metropolis/node/build/mkerofs/BUILD.bazel b/metropolis/node/build/mkerofs/BUILD.bazel
index 73fda98..0befb4c 100644
--- a/metropolis/node/build/mkerofs/BUILD.bazel
+++ b/metropolis/node/build/mkerofs/BUILD.bazel
@@ -7,7 +7,7 @@
     visibility = ["//visibility:public"],
     deps = [
         "//metropolis/node/build/fsspec",
-        "//metropolis/pkg/erofs",
+        "//osbase/erofs",
     ],
 )
 
diff --git a/metropolis/node/build/mkerofs/main.go b/metropolis/node/build/mkerofs/main.go
index ac0042a..b5d5568 100644
--- a/metropolis/node/build/mkerofs/main.go
+++ b/metropolis/node/build/mkerofs/main.go
@@ -30,7 +30,7 @@
 	"strings"
 
 	"source.monogon.dev/metropolis/node/build/fsspec"
-	"source.monogon.dev/metropolis/pkg/erofs"
+	"source.monogon.dev/osbase/erofs"
 )
 
 func (spec *entrySpec) writeRecursive(w *erofs.Writer, pathname string) {
diff --git a/metropolis/node/build/mkimage/BUILD.bazel b/metropolis/node/build/mkimage/BUILD.bazel
index ad88acb..41ce603 100644
--- a/metropolis/node/build/mkimage/BUILD.bazel
+++ b/metropolis/node/build/mkimage/BUILD.bazel
@@ -10,8 +10,8 @@
     visibility = ["//visibility:private"],
     deps = [
         "//metropolis/node/build/mkimage/osimage",
-        "//metropolis/pkg/blkio",
-        "//metropolis/pkg/blockdev",
+        "//osbase/blkio",
+        "//osbase/blockdev",
     ],
 )
 
diff --git a/metropolis/node/build/mkimage/main.go b/metropolis/node/build/mkimage/main.go
index 7de951e..3695054 100644
--- a/metropolis/node/build/mkimage/main.go
+++ b/metropolis/node/build/mkimage/main.go
@@ -34,8 +34,8 @@
 	"os"
 
 	"source.monogon.dev/metropolis/node/build/mkimage/osimage"
-	"source.monogon.dev/metropolis/pkg/blkio"
-	"source.monogon.dev/metropolis/pkg/blockdev"
+	"source.monogon.dev/osbase/blkio"
+	"source.monogon.dev/osbase/blockdev"
 )
 
 //go:embed metropolis/node/core/abloader/abloader_bin.efi
diff --git a/metropolis/node/build/mkimage/osimage/BUILD.bazel b/metropolis/node/build/mkimage/osimage/BUILD.bazel
index cfbf736..9799b81 100644
--- a/metropolis/node/build/mkimage/osimage/BUILD.bazel
+++ b/metropolis/node/build/mkimage/osimage/BUILD.bazel
@@ -6,10 +6,10 @@
     importpath = "source.monogon.dev/metropolis/node/build/mkimage/osimage",
     visibility = ["//visibility:public"],
     deps = [
-        "//metropolis/pkg/blockdev",
-        "//metropolis/pkg/efivarfs",
-        "//metropolis/pkg/fat32",
-        "//metropolis/pkg/gpt",
+        "//osbase/blockdev",
+        "//osbase/efivarfs",
+        "//osbase/fat32",
+        "//osbase/gpt",
         "@com_github_google_uuid//:uuid",
     ],
 )
diff --git a/metropolis/node/build/mkimage/osimage/osimage.go b/metropolis/node/build/mkimage/osimage/osimage.go
index d0ccaef..f877ded 100644
--- a/metropolis/node/build/mkimage/osimage/osimage.go
+++ b/metropolis/node/build/mkimage/osimage/osimage.go
@@ -25,10 +25,10 @@
 
 	"github.com/google/uuid"
 
-	"source.monogon.dev/metropolis/pkg/blockdev"
-	"source.monogon.dev/metropolis/pkg/efivarfs"
-	"source.monogon.dev/metropolis/pkg/fat32"
-	"source.monogon.dev/metropolis/pkg/gpt"
+	"source.monogon.dev/osbase/blockdev"
+	"source.monogon.dev/osbase/efivarfs"
+	"source.monogon.dev/osbase/fat32"
+	"source.monogon.dev/osbase/gpt"
 )
 
 var (
diff --git a/metropolis/node/build/mkverity/BUILD.bazel b/metropolis/node/build/mkverity/BUILD.bazel
index caabc26..a748b86 100644
--- a/metropolis/node/build/mkverity/BUILD.bazel
+++ b/metropolis/node/build/mkverity/BUILD.bazel
@@ -15,5 +15,5 @@
     srcs = ["mkverity.go"],
     importpath = "source.monogon.dev/metropolis/node/build/mkverity",
     visibility = ["//visibility:private"],
-    deps = ["//metropolis/pkg/verity"],
+    deps = ["//osbase/verity"],
 )
diff --git a/metropolis/node/build/mkverity/mkverity.go b/metropolis/node/build/mkverity/mkverity.go
index f44b601..ff2807b 100644
--- a/metropolis/node/build/mkverity/mkverity.go
+++ b/metropolis/node/build/mkverity/mkverity.go
@@ -19,7 +19,7 @@
 // outputs a Verity mapping table on success.
 //
 // For more information, see:
-// - source.monogon.dev/metropolis/pkg/verity
+// - source.monogon.dev/osbase/verity
 // - https://gitlab.com/cryptsetup/cryptsetup/wikis/DMVerity
 package main
 
@@ -30,7 +30,7 @@
 	"log"
 	"os"
 
-	"source.monogon.dev/metropolis/pkg/verity"
+	"source.monogon.dev/osbase/verity"
 )
 
 // createImage creates a dm-verity target image by combining the input image
diff --git a/metropolis/node/core/BUILD.bazel b/metropolis/node/core/BUILD.bazel
index 8605534..938a7d7 100644
--- a/metropolis/node/core/BUILD.bazel
+++ b/metropolis/node/core/BUILD.bazel
@@ -34,13 +34,13 @@
         "//metropolis/node/core/rpc/resolver",
         "//metropolis/node/core/time",
         "//metropolis/node/core/update",
-        "//metropolis/pkg/logtree",
-        "//metropolis/pkg/pstore",
-        "//metropolis/pkg/supervisor",
-        "//metropolis/pkg/sysctl",
-        "//metropolis/pkg/tpm",
         "//metropolis/proto/api",
         "//metropolis/version",
+        "//osbase/logtree",
+        "//osbase/pstore",
+        "//osbase/supervisor",
+        "//osbase/sysctl",
+        "//osbase/tpm",
         "//version",
         "@com_github_cenkalti_backoff_v4//:backoff",
         "@com_github_containerd_containerd//:containerd",
diff --git a/metropolis/node/core/cluster/BUILD.bazel b/metropolis/node/core/cluster/BUILD.bazel
index 4da6c76..665f94d 100644
--- a/metropolis/node/core/cluster/BUILD.bazel
+++ b/metropolis/node/core/cluster/BUILD.bazel
@@ -21,10 +21,10 @@
         "//metropolis/node/core/rpc",
         "//metropolis/node/core/rpc/resolver",
         "//metropolis/node/core/update",
-        "//metropolis/pkg/supervisor",
         "//metropolis/proto/api",
         "//metropolis/proto/common",
         "//metropolis/proto/private",
+        "//osbase/supervisor",
         "@com_github_cenkalti_backoff_v4//:backoff",
         "@org_golang_google_grpc//:go_default_library",
         "@org_golang_google_protobuf//proto",
diff --git a/metropolis/node/core/cluster/cluster.go b/metropolis/node/core/cluster/cluster.go
index 323c8ca..1d3135c 100644
--- a/metropolis/node/core/cluster/cluster.go
+++ b/metropolis/node/core/cluster/cluster.go
@@ -34,9 +34,9 @@
 	"source.monogon.dev/metropolis/node/core/network"
 	"source.monogon.dev/metropolis/node/core/roleserve"
 	"source.monogon.dev/metropolis/node/core/update"
-	"source.monogon.dev/metropolis/pkg/supervisor"
 	apb "source.monogon.dev/metropolis/proto/api"
 	cpb "source.monogon.dev/metropolis/proto/common"
+	"source.monogon.dev/osbase/supervisor"
 )
 
 type Manager struct {
diff --git a/metropolis/node/core/cluster/cluster_bootstrap.go b/metropolis/node/core/cluster/cluster_bootstrap.go
index 3e5f745..0c9d7fe 100644
--- a/metropolis/node/core/cluster/cluster_bootstrap.go
+++ b/metropolis/node/core/cluster/cluster_bootstrap.go
@@ -30,7 +30,7 @@
 	"source.monogon.dev/metropolis/node/core/curator"
 	"source.monogon.dev/metropolis/node/core/identity"
 	"source.monogon.dev/metropolis/node/core/roleserve"
-	"source.monogon.dev/metropolis/pkg/supervisor"
+	"source.monogon.dev/osbase/supervisor"
 
 	apb "source.monogon.dev/metropolis/proto/api"
 	cpb "source.monogon.dev/metropolis/proto/common"
diff --git a/metropolis/node/core/cluster/cluster_join.go b/metropolis/node/core/cluster/cluster_join.go
index cdbf0b3..18cfa4e 100644
--- a/metropolis/node/core/cluster/cluster_join.go
+++ b/metropolis/node/core/cluster/cluster_join.go
@@ -14,9 +14,9 @@
 	"source.monogon.dev/metropolis/node/core/identity"
 	"source.monogon.dev/metropolis/node/core/rpc"
 	"source.monogon.dev/metropolis/node/core/rpc/resolver"
-	"source.monogon.dev/metropolis/pkg/supervisor"
 	cpb "source.monogon.dev/metropolis/proto/common"
 	ppb "source.monogon.dev/metropolis/proto/private"
+	"source.monogon.dev/osbase/supervisor"
 )
 
 // join implements Join Flow of an already registered node.
diff --git a/metropolis/node/core/cluster/cluster_register.go b/metropolis/node/core/cluster/cluster_register.go
index 2389eaf..36f7394 100644
--- a/metropolis/node/core/cluster/cluster_register.go
+++ b/metropolis/node/core/cluster/cluster_register.go
@@ -17,7 +17,7 @@
 	"source.monogon.dev/metropolis/node/core/identity"
 	"source.monogon.dev/metropolis/node/core/rpc"
 	"source.monogon.dev/metropolis/node/core/rpc/resolver"
-	"source.monogon.dev/metropolis/pkg/supervisor"
+	"source.monogon.dev/osbase/supervisor"
 
 	ipb "source.monogon.dev/metropolis/node/core/curator/proto/api"
 	apb "source.monogon.dev/metropolis/proto/api"
diff --git a/metropolis/node/core/clusternet/BUILD.bazel b/metropolis/node/core/clusternet/BUILD.bazel
index 1ccce66..5ab1d8a 100644
--- a/metropolis/node/core/clusternet/BUILD.bazel
+++ b/metropolis/node/core/clusternet/BUILD.bazel
@@ -1,5 +1,5 @@
 load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
-load("//metropolis/test/ktest:ktest.bzl", "ktest")
+load("//osbase/test/ktest:ktest.bzl", "ktest")
 
 go_library(
     name = "clusternet",
@@ -16,9 +16,9 @@
         "//metropolis/node/core/curator/watcher",
         "//metropolis/node/core/localstorage",
         "//metropolis/node/core/network",
-        "//metropolis/pkg/event",
-        "//metropolis/pkg/supervisor",
         "//metropolis/proto/common",
+        "//osbase/event",
+        "//osbase/supervisor",
         "@com_github_cenkalti_backoff_v4//:backoff",
         "@com_github_vishvananda_netlink//:netlink",
         "@com_zx2c4_golang_wireguard_wgctrl//:wgctrl",
@@ -36,10 +36,10 @@
         "//metropolis/node/core/localstorage",
         "//metropolis/node/core/localstorage/declarative",
         "//metropolis/node/core/network",
-        "//metropolis/pkg/event/memory",
-        "//metropolis/pkg/supervisor",
         "//metropolis/proto/common",
         "//metropolis/test/util",
+        "//osbase/event/memory",
+        "//osbase/supervisor",
         "@com_zx2c4_golang_wireguard_wgctrl//:wgctrl",
         "@com_zx2c4_golang_wireguard_wgctrl//wgtypes",
     ],
diff --git a/metropolis/node/core/clusternet/clusternet.go b/metropolis/node/core/clusternet/clusternet.go
index cde3e0e..1c3c7b3 100644
--- a/metropolis/node/core/clusternet/clusternet.go
+++ b/metropolis/node/core/clusternet/clusternet.go
@@ -34,8 +34,8 @@
 	"source.monogon.dev/metropolis/node/core/curator/watcher"
 	"source.monogon.dev/metropolis/node/core/localstorage"
 	"source.monogon.dev/metropolis/node/core/network"
-	"source.monogon.dev/metropolis/pkg/event"
-	"source.monogon.dev/metropolis/pkg/supervisor"
+	"source.monogon.dev/osbase/event"
+	"source.monogon.dev/osbase/supervisor"
 
 	apb "source.monogon.dev/metropolis/node/core/curator/proto/api"
 	cpb "source.monogon.dev/metropolis/proto/common"
diff --git a/metropolis/node/core/clusternet/clusternet_test.go b/metropolis/node/core/clusternet/clusternet_test.go
index 7089cc7..f07f3e5 100644
--- a/metropolis/node/core/clusternet/clusternet_test.go
+++ b/metropolis/node/core/clusternet/clusternet_test.go
@@ -17,9 +17,9 @@
 	"source.monogon.dev/metropolis/node/core/localstorage"
 	"source.monogon.dev/metropolis/node/core/localstorage/declarative"
 	"source.monogon.dev/metropolis/node/core/network"
-	"source.monogon.dev/metropolis/pkg/event/memory"
-	"source.monogon.dev/metropolis/pkg/supervisor"
 	"source.monogon.dev/metropolis/test/util"
+	"source.monogon.dev/osbase/event/memory"
+	"source.monogon.dev/osbase/supervisor"
 
 	apb "source.monogon.dev/metropolis/node/core/curator/proto/api"
 	cpb "source.monogon.dev/metropolis/proto/common"
diff --git a/metropolis/node/core/consensus/BUILD.bazel b/metropolis/node/core/consensus/BUILD.bazel
index 359b6dc..eeb8d3b 100644
--- a/metropolis/node/core/consensus/BUILD.bazel
+++ b/metropolis/node/core/consensus/BUILD.bazel
@@ -16,13 +16,13 @@
         "//metropolis/node/core/consensus/client",
         "//metropolis/node/core/identity",
         "//metropolis/node/core/localstorage",
-        "//metropolis/pkg/event",
-        "//metropolis/pkg/event/memory",
-        "//metropolis/pkg/logbuffer",
-        "//metropolis/pkg/logtree",
-        "//metropolis/pkg/logtree/unraw",
-        "//metropolis/pkg/pki",
-        "//metropolis/pkg/supervisor",
+        "//osbase/event",
+        "//osbase/event/memory",
+        "//osbase/logbuffer",
+        "//osbase/logtree",
+        "//osbase/logtree/unraw",
+        "//osbase/pki",
+        "//osbase/supervisor",
         "@io_etcd_go_etcd_client_v3//:client",
         "@io_etcd_go_etcd_server_v3//embed",
     ],
@@ -45,10 +45,10 @@
     deps = [
         "//metropolis/node/core/localstorage",
         "//metropolis/node/core/localstorage/declarative",
-        "//metropolis/pkg/logbuffer",
-        "//metropolis/pkg/logtree",
-        "//metropolis/pkg/supervisor",
         "//metropolis/test/util",
+        "//osbase/logbuffer",
+        "//osbase/logtree",
+        "//osbase/supervisor",
         "@com_github_google_go_cmp//cmp",
     ],
 )
diff --git a/metropolis/node/core/consensus/configuration.go b/metropolis/node/core/consensus/configuration.go
index dc3cd06..bb3db82 100644
--- a/metropolis/node/core/consensus/configuration.go
+++ b/metropolis/node/core/consensus/configuration.go
@@ -15,7 +15,7 @@
 	"source.monogon.dev/metropolis/node"
 	"source.monogon.dev/metropolis/node/core/identity"
 	"source.monogon.dev/metropolis/node/core/localstorage"
-	"source.monogon.dev/metropolis/pkg/pki"
+	"source.monogon.dev/osbase/pki"
 )
 
 // Config describes the startup configuration of a consensus instance.
diff --git a/metropolis/node/core/consensus/consensus.go b/metropolis/node/core/consensus/consensus.go
index 3244972..e749f7b 100644
--- a/metropolis/node/core/consensus/consensus.go
+++ b/metropolis/node/core/consensus/consensus.go
@@ -102,11 +102,11 @@
 
 	"source.monogon.dev/metropolis/node/core/consensus/client"
 	"source.monogon.dev/metropolis/node/core/identity"
-	"source.monogon.dev/metropolis/pkg/event"
-	"source.monogon.dev/metropolis/pkg/event/memory"
-	"source.monogon.dev/metropolis/pkg/logtree/unraw"
-	"source.monogon.dev/metropolis/pkg/pki"
-	"source.monogon.dev/metropolis/pkg/supervisor"
+	"source.monogon.dev/osbase/event"
+	"source.monogon.dev/osbase/event/memory"
+	"source.monogon.dev/osbase/logtree/unraw"
+	"source.monogon.dev/osbase/pki"
+	"source.monogon.dev/osbase/supervisor"
 )
 
 var (
diff --git a/metropolis/node/core/consensus/consensus_test.go b/metropolis/node/core/consensus/consensus_test.go
index 1866ff5..85df62e 100644
--- a/metropolis/node/core/consensus/consensus_test.go
+++ b/metropolis/node/core/consensus/consensus_test.go
@@ -29,8 +29,8 @@
 
 	"source.monogon.dev/metropolis/node/core/localstorage"
 	"source.monogon.dev/metropolis/node/core/localstorage/declarative"
-	"source.monogon.dev/metropolis/pkg/supervisor"
 	"source.monogon.dev/metropolis/test/util"
+	"source.monogon.dev/osbase/supervisor"
 )
 
 type boilerplate struct {
diff --git a/metropolis/node/core/consensus/logparser.go b/metropolis/node/core/consensus/logparser.go
index dbd3b1d..b403423 100644
--- a/metropolis/node/core/consensus/logparser.go
+++ b/metropolis/node/core/consensus/logparser.go
@@ -8,9 +8,9 @@
 	"strings"
 	"time"
 
-	"source.monogon.dev/metropolis/pkg/logbuffer"
-	"source.monogon.dev/metropolis/pkg/logtree"
-	"source.monogon.dev/metropolis/pkg/logtree/unraw"
+	"source.monogon.dev/osbase/logbuffer"
+	"source.monogon.dev/osbase/logtree"
+	"source.monogon.dev/osbase/logtree/unraw"
 )
 
 // etcdLogEntry is a JSON-encoded, structured log entry received from a running
diff --git a/metropolis/node/core/consensus/logparser_test.go b/metropolis/node/core/consensus/logparser_test.go
index c53c13d..cfe6fea 100644
--- a/metropolis/node/core/consensus/logparser_test.go
+++ b/metropolis/node/core/consensus/logparser_test.go
@@ -6,8 +6,8 @@
 
 	"github.com/google/go-cmp/cmp"
 
-	"source.monogon.dev/metropolis/pkg/logbuffer"
-	"source.monogon.dev/metropolis/pkg/logtree"
+	"source.monogon.dev/osbase/logbuffer"
+	"source.monogon.dev/osbase/logtree"
 )
 
 // TestParsing exercises the parseEtcdLogEntry function.
diff --git a/metropolis/node/core/consensus/status.go b/metropolis/node/core/consensus/status.go
index 44562bf..994e9f7 100644
--- a/metropolis/node/core/consensus/status.go
+++ b/metropolis/node/core/consensus/status.go
@@ -13,8 +13,8 @@
 	"source.monogon.dev/metropolis/node"
 	"source.monogon.dev/metropolis/node/core/consensus/client"
 	"source.monogon.dev/metropolis/node/core/identity"
-	"source.monogon.dev/metropolis/pkg/event"
-	"source.monogon.dev/metropolis/pkg/pki"
+	"source.monogon.dev/osbase/event"
+	"source.monogon.dev/osbase/pki"
 )
 
 // ServiceHandle is implemented by Service and should be the type expected by
diff --git a/metropolis/node/core/consensus/testhelpers.go b/metropolis/node/core/consensus/testhelpers.go
index 3522e06..3471e3c 100644
--- a/metropolis/node/core/consensus/testhelpers.go
+++ b/metropolis/node/core/consensus/testhelpers.go
@@ -6,7 +6,7 @@
 
 	clientv3 "go.etcd.io/etcd/client/v3"
 
-	"source.monogon.dev/metropolis/pkg/event/memory"
+	"source.monogon.dev/osbase/event/memory"
 )
 
 type testServiceHandle struct {
diff --git a/metropolis/node/core/curator/BUILD.bazel b/metropolis/node/core/curator/BUILD.bazel
index e8c3746..f619d4b 100644
--- a/metropolis/node/core/curator/BUILD.bazel
+++ b/metropolis/node/core/curator/BUILD.bazel
@@ -31,13 +31,13 @@
         "//metropolis/node/core/identity",
         "//metropolis/node/core/rpc",
         "//metropolis/node/kubernetes/pki",
-        "//metropolis/pkg/event",
-        "//metropolis/pkg/event/etcd",
-        "//metropolis/pkg/event/memory",
-        "//metropolis/pkg/pki",
-        "//metropolis/pkg/supervisor",
         "//metropolis/proto/api",
         "//metropolis/proto/common",
+        "//osbase/event",
+        "//osbase/event/etcd",
+        "//osbase/event/memory",
+        "//osbase/pki",
+        "//osbase/supervisor",
         "@com_github_google_cel_go//cel:go_default_library",
         "@com_github_google_cel_go//checker/decls:go_default_library",
         "@com_github_google_cel_go//common/types:go_default_library",
@@ -73,13 +73,13 @@
         "//metropolis/node/core/curator/proto/private",
         "//metropolis/node/core/identity",
         "//metropolis/node/core/rpc",
-        "//metropolis/pkg/event",
-        "//metropolis/pkg/logtree",
-        "//metropolis/pkg/pki",
-        "//metropolis/pkg/supervisor",
         "//metropolis/proto/api",
         "//metropolis/proto/common",
         "//metropolis/test/util",
+        "//osbase/event",
+        "//osbase/logtree",
+        "//osbase/pki",
+        "//osbase/supervisor",
         "@com_github_google_go_cmp//cmp",
         "@io_etcd_go_etcd_client_v3//:client",
         "@io_etcd_go_etcd_tests_v3//integration",
diff --git a/metropolis/node/core/curator/bootstrap.go b/metropolis/node/core/curator/bootstrap.go
index ac2a4de..e092c63 100644
--- a/metropolis/node/core/curator/bootstrap.go
+++ b/metropolis/node/core/curator/bootstrap.go
@@ -9,7 +9,7 @@
 
 	"source.monogon.dev/metropolis/node/core/consensus/client"
 	"source.monogon.dev/metropolis/node/core/identity"
-	"source.monogon.dev/metropolis/pkg/pki"
+	"source.monogon.dev/osbase/pki"
 
 	ppb "source.monogon.dev/metropolis/node/core/curator/proto/private"
 )
diff --git a/metropolis/node/core/curator/curator.go b/metropolis/node/core/curator/curator.go
index 149cb18..665fea1 100644
--- a/metropolis/node/core/curator/curator.go
+++ b/metropolis/node/core/curator/curator.go
@@ -27,8 +27,8 @@
 	"source.monogon.dev/metropolis/node/core/consensus/client"
 	ppb "source.monogon.dev/metropolis/node/core/curator/proto/private"
 	"source.monogon.dev/metropolis/node/core/identity"
-	"source.monogon.dev/metropolis/pkg/event/memory"
-	"source.monogon.dev/metropolis/pkg/supervisor"
+	"source.monogon.dev/osbase/event/memory"
+	"source.monogon.dev/osbase/supervisor"
 )
 
 // Config is the configuration of the curator.
diff --git a/metropolis/node/core/curator/curator_test.go b/metropolis/node/core/curator/curator_test.go
index 6cd3f84..841a813 100644
--- a/metropolis/node/core/curator/curator_test.go
+++ b/metropolis/node/core/curator/curator_test.go
@@ -14,10 +14,10 @@
 
 	"source.monogon.dev/metropolis/node/core/consensus"
 	"source.monogon.dev/metropolis/node/core/identity"
-	"source.monogon.dev/metropolis/pkg/event"
-	"source.monogon.dev/metropolis/pkg/logtree"
-	"source.monogon.dev/metropolis/pkg/supervisor"
 	"source.monogon.dev/metropolis/test/util"
+	"source.monogon.dev/osbase/event"
+	"source.monogon.dev/osbase/logtree"
+	"source.monogon.dev/osbase/supervisor"
 )
 
 var (
diff --git a/metropolis/node/core/curator/impl_follower.go b/metropolis/node/core/curator/impl_follower.go
index 5963737..2a7f45e 100644
--- a/metropolis/node/core/curator/impl_follower.go
+++ b/metropolis/node/core/curator/impl_follower.go
@@ -11,7 +11,7 @@
 	cpb "source.monogon.dev/metropolis/node/core/curator/proto/api"
 	"source.monogon.dev/metropolis/node/core/identity"
 	"source.monogon.dev/metropolis/node/core/rpc"
-	"source.monogon.dev/metropolis/pkg/event/memory"
+	"source.monogon.dev/osbase/event/memory"
 )
 
 type curatorFollower struct {
diff --git a/metropolis/node/core/curator/impl_leader_aaa.go b/metropolis/node/core/curator/impl_leader_aaa.go
index c9eb08f..8f4eb84 100644
--- a/metropolis/node/core/curator/impl_leader_aaa.go
+++ b/metropolis/node/core/curator/impl_leader_aaa.go
@@ -13,8 +13,8 @@
 	ppb "source.monogon.dev/metropolis/node/core/curator/proto/private"
 	"source.monogon.dev/metropolis/node/core/identity"
 	"source.monogon.dev/metropolis/node/core/rpc"
-	"source.monogon.dev/metropolis/pkg/pki"
 	apb "source.monogon.dev/metropolis/proto/api"
+	"source.monogon.dev/osbase/pki"
 )
 
 const (
diff --git a/metropolis/node/core/curator/impl_leader_cluster_networking.go b/metropolis/node/core/curator/impl_leader_cluster_networking.go
index 52d8c12..5dab657 100644
--- a/metropolis/node/core/curator/impl_leader_cluster_networking.go
+++ b/metropolis/node/core/curator/impl_leader_cluster_networking.go
@@ -12,8 +12,8 @@
 	ipb "source.monogon.dev/metropolis/node/core/curator/proto/api"
 	"source.monogon.dev/metropolis/node/core/identity"
 	"source.monogon.dev/metropolis/node/core/rpc"
-	"source.monogon.dev/metropolis/pkg/event"
-	"source.monogon.dev/metropolis/pkg/event/etcd"
+	"source.monogon.dev/osbase/event"
+	"source.monogon.dev/osbase/event/etcd"
 )
 
 // preapreClusternetCacheUnlocked makes sure the leader's clusternetCache exists,
diff --git a/metropolis/node/core/curator/impl_leader_curator.go b/metropolis/node/core/curator/impl_leader_curator.go
index d81f4eb..126ba49 100644
--- a/metropolis/node/core/curator/impl_leader_curator.go
+++ b/metropolis/node/core/curator/impl_leader_curator.go
@@ -19,10 +19,10 @@
 	ipb "source.monogon.dev/metropolis/node/core/curator/proto/api"
 	"source.monogon.dev/metropolis/node/core/identity"
 	"source.monogon.dev/metropolis/node/core/rpc"
-	"source.monogon.dev/metropolis/pkg/event"
-	"source.monogon.dev/metropolis/pkg/event/etcd"
-	"source.monogon.dev/metropolis/pkg/pki"
 	cpb "source.monogon.dev/metropolis/proto/common"
+	"source.monogon.dev/osbase/event"
+	"source.monogon.dev/osbase/event/etcd"
+	"source.monogon.dev/osbase/pki"
 )
 
 // leaderCurator implements the Curator gRPC API (ipb.Curator) as a curator
diff --git a/metropolis/node/core/curator/impl_leader_test.go b/metropolis/node/core/curator/impl_leader_test.go
index 5c3c337..a5d1ea1 100644
--- a/metropolis/node/core/curator/impl_leader_test.go
+++ b/metropolis/node/core/curator/impl_leader_test.go
@@ -31,10 +31,10 @@
 	ppb "source.monogon.dev/metropolis/node/core/curator/proto/private"
 	"source.monogon.dev/metropolis/node/core/identity"
 	"source.monogon.dev/metropolis/node/core/rpc"
-	"source.monogon.dev/metropolis/pkg/logtree"
-	"source.monogon.dev/metropolis/pkg/pki"
 	apb "source.monogon.dev/metropolis/proto/api"
 	cpb "source.monogon.dev/metropolis/proto/common"
+	"source.monogon.dev/osbase/logtree"
+	"source.monogon.dev/osbase/pki"
 )
 
 // fakeLeader creates a curatorLeader without any underlying leader election, in
diff --git a/metropolis/node/core/curator/listener.go b/metropolis/node/core/curator/listener.go
index 77fe0e0..ede28bb 100644
--- a/metropolis/node/core/curator/listener.go
+++ b/metropolis/node/core/curator/listener.go
@@ -16,9 +16,9 @@
 	cpb "source.monogon.dev/metropolis/node/core/curator/proto/api"
 	"source.monogon.dev/metropolis/node/core/identity"
 	"source.monogon.dev/metropolis/node/core/rpc"
-	"source.monogon.dev/metropolis/pkg/event/memory"
-	"source.monogon.dev/metropolis/pkg/supervisor"
 	apb "source.monogon.dev/metropolis/proto/api"
+	"source.monogon.dev/osbase/event/memory"
+	"source.monogon.dev/osbase/supervisor"
 )
 
 // listener is the curator runnable responsible for listening for gRPC
diff --git a/metropolis/node/core/curator/state_node.go b/metropolis/node/core/curator/state_node.go
index 5e5ef2e..0cfc87e 100644
--- a/metropolis/node/core/curator/state_node.go
+++ b/metropolis/node/core/curator/state_node.go
@@ -33,7 +33,7 @@
 	"source.monogon.dev/metropolis/node/core/consensus"
 	"source.monogon.dev/metropolis/node/core/identity"
 	"source.monogon.dev/metropolis/node/core/rpc"
-	"source.monogon.dev/metropolis/pkg/pki"
+	"source.monogon.dev/osbase/pki"
 
 	ppb "source.monogon.dev/metropolis/node/core/curator/proto/private"
 	cpb "source.monogon.dev/metropolis/proto/common"
diff --git a/metropolis/node/core/curator/state_pki.go b/metropolis/node/core/curator/state_pki.go
index 2384158..7eb3f76 100644
--- a/metropolis/node/core/curator/state_pki.go
+++ b/metropolis/node/core/curator/state_pki.go
@@ -2,7 +2,7 @@
 
 import (
 	"source.monogon.dev/metropolis/node/core/identity"
-	"source.monogon.dev/metropolis/pkg/pki"
+	"source.monogon.dev/osbase/pki"
 )
 
 var (
diff --git a/metropolis/node/core/debug_service_disabled.go b/metropolis/node/core/debug_service_disabled.go
index da64266..815a0dd 100644
--- a/metropolis/node/core/debug_service_disabled.go
+++ b/metropolis/node/core/debug_service_disabled.go
@@ -5,7 +5,7 @@
 
 	"source.monogon.dev/metropolis/node/core/localstorage"
 	"source.monogon.dev/metropolis/node/core/roleserve"
-	"source.monogon.dev/metropolis/pkg/logtree"
+	"source.monogon.dev/osbase/logtree"
 )
 
 // runDebugService runs the debug service if this is a debug build. Otherwise
diff --git a/metropolis/node/core/debug_service_enabled.go b/metropolis/node/core/debug_service_enabled.go
index 75e92bc..4759dab 100644
--- a/metropolis/node/core/debug_service_enabled.go
+++ b/metropolis/node/core/debug_service_enabled.go
@@ -34,8 +34,8 @@
 	"source.monogon.dev/metropolis/node/core/localstorage"
 	"source.monogon.dev/metropolis/node/core/mgmt"
 	"source.monogon.dev/metropolis/node/core/roleserve"
-	"source.monogon.dev/metropolis/pkg/logtree"
-	"source.monogon.dev/metropolis/pkg/supervisor"
+	"source.monogon.dev/osbase/logtree"
+	"source.monogon.dev/osbase/supervisor"
 
 	common "source.monogon.dev/metropolis/node"
 	apb "source.monogon.dev/metropolis/proto/api"
diff --git a/metropolis/node/core/devmgr/BUILD.bazel b/metropolis/node/core/devmgr/BUILD.bazel
index 15b8065..bc4a293 100644
--- a/metropolis/node/core/devmgr/BUILD.bazel
+++ b/metropolis/node/core/devmgr/BUILD.bazel
@@ -6,8 +6,8 @@
     importpath = "source.monogon.dev/metropolis/node/core/devmgr",
     visibility = ["//visibility:public"],
     deps = [
-        "//metropolis/pkg/kmod",
-        "//metropolis/pkg/supervisor",
+        "//osbase/kmod",
+        "//osbase/supervisor",
         "@com_github_mdlayher_kobject//:kobject",
     ],
 )
diff --git a/metropolis/node/core/devmgr/devmgr.go b/metropolis/node/core/devmgr/devmgr.go
index 8133c77..b5c1835 100644
--- a/metropolis/node/core/devmgr/devmgr.go
+++ b/metropolis/node/core/devmgr/devmgr.go
@@ -15,8 +15,8 @@
 
 	"github.com/mdlayher/kobject"
 
-	"source.monogon.dev/metropolis/pkg/kmod"
-	"source.monogon.dev/metropolis/pkg/supervisor"
+	"source.monogon.dev/osbase/kmod"
+	"source.monogon.dev/osbase/supervisor"
 )
 
 type Service struct{}
diff --git a/metropolis/node/core/localstorage/BUILD.bazel b/metropolis/node/core/localstorage/BUILD.bazel
index 075a07c..4249297 100644
--- a/metropolis/node/core/localstorage/BUILD.bazel
+++ b/metropolis/node/core/localstorage/BUILD.bazel
@@ -15,11 +15,11 @@
         "//metropolis/node/core/localstorage/crypt",
         "//metropolis/node/core/localstorage/declarative",
         "//metropolis/node/core/update",
-        "//metropolis/pkg/tpm",
         "//metropolis/proto/api",
         "//metropolis/proto/common",
         "//metropolis/proto/private",
         "//net/proto",
+        "//osbase/tpm",
         "@org_golang_google_protobuf//proto",
         "@org_golang_x_sys//unix",
     ],
diff --git a/metropolis/node/core/localstorage/crypt/BUILD.bazel b/metropolis/node/core/localstorage/crypt/BUILD.bazel
index d8e9881..39b6a53 100644
--- a/metropolis/node/core/localstorage/crypt/BUILD.bazel
+++ b/metropolis/node/core/localstorage/crypt/BUILD.bazel
@@ -1,5 +1,5 @@
 load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
-load("//metropolis/test/ktest:ktest.bzl", "ktest")
+load("//osbase/test/ktest:ktest.bzl", "ktest")
 
 go_library(
     name = "crypt",
@@ -14,12 +14,12 @@
     visibility = ["//metropolis/node/core/localstorage:__subpackages__"],
     deps = [
         "//metropolis/node/core/update",
-        "//metropolis/pkg/blockdev",
-        "//metropolis/pkg/devicemapper",
-        "//metropolis/pkg/efivarfs",
-        "//metropolis/pkg/gpt",
-        "//metropolis/pkg/supervisor",
-        "//metropolis/pkg/sysfs",
+        "//osbase/blockdev",
+        "//osbase/devicemapper",
+        "//osbase/efivarfs",
+        "//osbase/gpt",
+        "//osbase/supervisor",
+        "//osbase/sysfs",
         "@com_github_google_uuid//:uuid",
         "@org_golang_x_sys//unix",
     ],
diff --git a/metropolis/node/core/localstorage/crypt/blockdev.go b/metropolis/node/core/localstorage/crypt/blockdev.go
index d021d9c..6466afc 100644
--- a/metropolis/node/core/localstorage/crypt/blockdev.go
+++ b/metropolis/node/core/localstorage/crypt/blockdev.go
@@ -28,11 +28,11 @@
 	"golang.org/x/sys/unix"
 
 	"source.monogon.dev/metropolis/node/core/update"
-	"source.monogon.dev/metropolis/pkg/blockdev"
-	"source.monogon.dev/metropolis/pkg/efivarfs"
-	"source.monogon.dev/metropolis/pkg/gpt"
-	"source.monogon.dev/metropolis/pkg/supervisor"
-	"source.monogon.dev/metropolis/pkg/sysfs"
+	"source.monogon.dev/osbase/blockdev"
+	"source.monogon.dev/osbase/efivarfs"
+	"source.monogon.dev/osbase/gpt"
+	"source.monogon.dev/osbase/supervisor"
+	"source.monogon.dev/osbase/sysfs"
 )
 
 // NodeDataPartitionType is the partition type value for a Metropolis Node
diff --git a/metropolis/node/core/localstorage/crypt/crypt.go b/metropolis/node/core/localstorage/crypt/crypt.go
index 0336832..bd20efd 100644
--- a/metropolis/node/core/localstorage/crypt/crypt.go
+++ b/metropolis/node/core/localstorage/crypt/crypt.go
@@ -38,7 +38,7 @@
 import (
 	"fmt"
 
-	"source.monogon.dev/metropolis/pkg/blockdev"
+	"source.monogon.dev/osbase/blockdev"
 )
 
 // Mode of block device encryption and/or authentication, if any. See the
diff --git a/metropolis/node/core/localstorage/crypt/crypt_encryption.go b/metropolis/node/core/localstorage/crypt/crypt_encryption.go
index c5f246c..fb8a129 100644
--- a/metropolis/node/core/localstorage/crypt/crypt_encryption.go
+++ b/metropolis/node/core/localstorage/crypt/crypt_encryption.go
@@ -7,8 +7,8 @@
 
 	"golang.org/x/sys/unix"
 
-	"source.monogon.dev/metropolis/pkg/blockdev"
-	"source.monogon.dev/metropolis/pkg/devicemapper"
+	"source.monogon.dev/osbase/blockdev"
+	"source.monogon.dev/osbase/devicemapper"
 )
 
 func encryptionDevPath(name string) string {
diff --git a/metropolis/node/core/localstorage/crypt/crypt_integrity.go b/metropolis/node/core/localstorage/crypt/crypt_integrity.go
index 7276a3e..b2e43f8 100644
--- a/metropolis/node/core/localstorage/crypt/crypt_integrity.go
+++ b/metropolis/node/core/localstorage/crypt/crypt_integrity.go
@@ -7,8 +7,8 @@
 
 	"golang.org/x/sys/unix"
 
-	"source.monogon.dev/metropolis/pkg/blockdev"
-	"source.monogon.dev/metropolis/pkg/devicemapper"
+	"source.monogon.dev/osbase/blockdev"
+	"source.monogon.dev/osbase/devicemapper"
 )
 
 func integrityDevPath(name string) string {
diff --git a/metropolis/node/core/localstorage/directory_data.go b/metropolis/node/core/localstorage/directory_data.go
index 5607e0a..7637056 100644
--- a/metropolis/node/core/localstorage/directory_data.go
+++ b/metropolis/node/core/localstorage/directory_data.go
@@ -25,9 +25,9 @@
 
 	"source.monogon.dev/metropolis/node/core/localstorage/crypt"
 	"source.monogon.dev/metropolis/node/core/localstorage/declarative"
-	"source.monogon.dev/metropolis/pkg/tpm"
 	cpb "source.monogon.dev/metropolis/proto/common"
 	ppb "source.monogon.dev/metropolis/proto/private"
+	"source.monogon.dev/osbase/tpm"
 )
 
 var keySize uint16 = 256 / 8
diff --git a/metropolis/node/core/localstorage/storage_esp.go b/metropolis/node/core/localstorage/storage_esp.go
index 945e6ca..8fc71d0 100644
--- a/metropolis/node/core/localstorage/storage_esp.go
+++ b/metropolis/node/core/localstorage/storage_esp.go
@@ -24,11 +24,11 @@
 	"google.golang.org/protobuf/proto"
 
 	"source.monogon.dev/metropolis/node/core/localstorage/declarative"
-	"source.monogon.dev/metropolis/pkg/tpm"
 	apb "source.monogon.dev/metropolis/proto/api"
 	cpb "source.monogon.dev/metropolis/proto/common"
 	ppb "source.monogon.dev/metropolis/proto/private"
 	npb "source.monogon.dev/net/proto"
+	"source.monogon.dev/osbase/tpm"
 )
 
 // ESPDirectory is the EFI System Partition. It is a cleartext partition
diff --git a/metropolis/node/core/main.go b/metropolis/node/core/main.go
index 9a7e39e..e9d1ad1 100644
--- a/metropolis/node/core/main.go
+++ b/metropolis/node/core/main.go
@@ -36,10 +36,10 @@
 	"source.monogon.dev/metropolis/node/core/rpc/resolver"
 	timesvc "source.monogon.dev/metropolis/node/core/time"
 	"source.monogon.dev/metropolis/node/core/update"
-	"source.monogon.dev/metropolis/pkg/logtree"
-	"source.monogon.dev/metropolis/pkg/supervisor"
-	"source.monogon.dev/metropolis/pkg/tpm"
 	mversion "source.monogon.dev/metropolis/version"
+	"source.monogon.dev/osbase/logtree"
+	"source.monogon.dev/osbase/supervisor"
+	"source.monogon.dev/osbase/tpm"
 	"source.monogon.dev/version"
 )
 
diff --git a/metropolis/node/core/metrics/BUILD.bazel b/metropolis/node/core/metrics/BUILD.bazel
index 9169ae6..6385bb8 100644
--- a/metropolis/node/core/metrics/BUILD.bazel
+++ b/metropolis/node/core/metrics/BUILD.bazel
@@ -15,7 +15,7 @@
         "//metropolis/node/core/curator/proto/api",
         "//metropolis/node/core/curator/watcher",
         "//metropolis/node/core/identity",
-        "//metropolis/pkg/supervisor",
+        "//osbase/supervisor",
     ],
 )
 
@@ -30,9 +30,9 @@
     deps = [
         "//metropolis/node",
         "//metropolis/node/core/curator/proto/api",
-        "//metropolis/pkg/freeport",
-        "//metropolis/pkg/supervisor",
         "//metropolis/test/util",
+        "//osbase/freeport",
+        "//osbase/supervisor",
         "@com_zx2c4_golang_wireguard_wgctrl//wgtypes",
         "@io_bazel_rules_go//go/runfiles:go_default_library",
     ],
diff --git a/metropolis/node/core/metrics/discovery.go b/metropolis/node/core/metrics/discovery.go
index ff3fcf1..116291e 100644
--- a/metropolis/node/core/metrics/discovery.go
+++ b/metropolis/node/core/metrics/discovery.go
@@ -9,7 +9,7 @@
 
 	"source.monogon.dev/go/types/mapsets"
 	"source.monogon.dev/metropolis/node/core/curator/watcher"
-	"source.monogon.dev/metropolis/pkg/supervisor"
+	"source.monogon.dev/osbase/supervisor"
 
 	ipb "source.monogon.dev/metropolis/node/core/curator/proto/api"
 )
diff --git a/metropolis/node/core/metrics/exporters.go b/metropolis/node/core/metrics/exporters.go
index c14abcc..2dd2cfc 100644
--- a/metropolis/node/core/metrics/exporters.go
+++ b/metropolis/node/core/metrics/exporters.go
@@ -6,7 +6,7 @@
 	"net/http"
 
 	"source.monogon.dev/metropolis/node"
-	"source.monogon.dev/metropolis/pkg/supervisor"
+	"source.monogon.dev/osbase/supervisor"
 )
 
 // An Exporter is a Prometheus binary running under the Metrics service which
diff --git a/metropolis/node/core/metrics/metrics.go b/metropolis/node/core/metrics/metrics.go
index bd229fa..b07a980 100644
--- a/metropolis/node/core/metrics/metrics.go
+++ b/metropolis/node/core/metrics/metrics.go
@@ -11,7 +11,7 @@
 
 	"source.monogon.dev/metropolis/node"
 	"source.monogon.dev/metropolis/node/core/identity"
-	"source.monogon.dev/metropolis/pkg/supervisor"
+	"source.monogon.dev/osbase/supervisor"
 )
 
 // Service is the Metropolis Metrics Service.
diff --git a/metropolis/node/core/metrics/metrics_test.go b/metropolis/node/core/metrics/metrics_test.go
index b0a9310..06b1d4e 100644
--- a/metropolis/node/core/metrics/metrics_test.go
+++ b/metropolis/node/core/metrics/metrics_test.go
@@ -18,9 +18,9 @@
 	apb "source.monogon.dev/metropolis/node/core/curator/proto/api"
 
 	"source.monogon.dev/metropolis/node"
-	"source.monogon.dev/metropolis/pkg/freeport"
-	"source.monogon.dev/metropolis/pkg/supervisor"
 	"source.monogon.dev/metropolis/test/util"
+	"source.monogon.dev/osbase/freeport"
+	"source.monogon.dev/osbase/supervisor"
 )
 
 func fakeExporter(name, value string) *Exporter {
diff --git a/metropolis/node/core/mgmt/BUILD.bazel b/metropolis/node/core/mgmt/BUILD.bazel
index ecec6d1..65a54a6 100644
--- a/metropolis/node/core/mgmt/BUILD.bazel
+++ b/metropolis/node/core/mgmt/BUILD.bazel
@@ -14,11 +14,11 @@
         "//metropolis/node/core/identity",
         "//metropolis/node/core/rpc",
         "//metropolis/node/core/update",
-        "//metropolis/pkg/logtree",
-        "//metropolis/pkg/logtree/proto",
-        "//metropolis/pkg/supervisor",
         "//metropolis/proto/api",
         "//metropolis/proto/common",
+        "//osbase/logtree",
+        "//osbase/logtree/proto",
+        "//osbase/supervisor",
         "@com_github_vishvananda_netlink//:netlink",
         "@org_golang_google_grpc//:go_default_library",
         "@org_golang_google_grpc//codes",
@@ -32,10 +32,10 @@
     srcs = ["svc_logs_test.go"],
     embed = [":mgmt"],
     deps = [
-        "//metropolis/pkg/logtree",
-        "//metropolis/pkg/logtree/proto",
         "//metropolis/proto/api",
         "//metropolis/proto/common",
+        "//osbase/logtree",
+        "//osbase/logtree/proto",
         "@com_github_google_go_cmp//cmp",
         "@org_golang_google_grpc//:go_default_library",
         "@org_golang_google_grpc//credentials/insecure",
diff --git a/metropolis/node/core/mgmt/mgmt.go b/metropolis/node/core/mgmt/mgmt.go
index e9c3f91..b0632bb 100644
--- a/metropolis/node/core/mgmt/mgmt.go
+++ b/metropolis/node/core/mgmt/mgmt.go
@@ -14,8 +14,8 @@
 	"source.monogon.dev/metropolis/node/core/identity"
 	"source.monogon.dev/metropolis/node/core/rpc"
 	"source.monogon.dev/metropolis/node/core/update"
-	"source.monogon.dev/metropolis/pkg/logtree"
-	"source.monogon.dev/metropolis/pkg/supervisor"
+	"source.monogon.dev/osbase/logtree"
+	"source.monogon.dev/osbase/supervisor"
 
 	apb "source.monogon.dev/metropolis/proto/api"
 )
diff --git a/metropolis/node/core/mgmt/svc_logs.go b/metropolis/node/core/mgmt/svc_logs.go
index 1a884b3..ef5c1d7 100644
--- a/metropolis/node/core/mgmt/svc_logs.go
+++ b/metropolis/node/core/mgmt/svc_logs.go
@@ -7,10 +7,10 @@
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/status"
 
-	"source.monogon.dev/metropolis/pkg/logtree"
-	lpb "source.monogon.dev/metropolis/pkg/logtree/proto"
 	"source.monogon.dev/metropolis/proto/api"
 	cpb "source.monogon.dev/metropolis/proto/common"
+	"source.monogon.dev/osbase/logtree"
+	lpb "source.monogon.dev/osbase/logtree/proto"
 )
 
 const (
diff --git a/metropolis/node/core/mgmt/svc_logs_test.go b/metropolis/node/core/mgmt/svc_logs_test.go
index 162de57..dec1459 100644
--- a/metropolis/node/core/mgmt/svc_logs_test.go
+++ b/metropolis/node/core/mgmt/svc_logs_test.go
@@ -16,10 +16,10 @@
 	"google.golang.org/grpc/test/bufconn"
 	"google.golang.org/protobuf/testing/protocmp"
 
-	"source.monogon.dev/metropolis/pkg/logtree"
-	lpb "source.monogon.dev/metropolis/pkg/logtree/proto"
 	"source.monogon.dev/metropolis/proto/api"
 	cpb "source.monogon.dev/metropolis/proto/common"
+	"source.monogon.dev/osbase/logtree"
+	lpb "source.monogon.dev/osbase/logtree/proto"
 )
 
 func dut(t *testing.T) (*Service, *grpc.ClientConn) {
diff --git a/metropolis/node/core/network/BUILD.bazel b/metropolis/node/core/network/BUILD.bazel
index 52e4614..07120e4 100644
--- a/metropolis/node/core/network/BUILD.bazel
+++ b/metropolis/node/core/network/BUILD.bazel
@@ -14,11 +14,11 @@
         "//metropolis/node/core/network/dhcp4c",
         "//metropolis/node/core/network/dhcp4c/callback",
         "//metropolis/node/core/network/dns",
-        "//metropolis/pkg/event/memory",
-        "//metropolis/pkg/logtree",
-        "//metropolis/pkg/supervisor",
-        "//metropolis/pkg/sysctl",
         "//net/proto",
+        "//osbase/event/memory",
+        "//osbase/logtree",
+        "//osbase/supervisor",
+        "//osbase/sysctl",
         "@com_github_google_nftables//:nftables",
         "@com_github_google_nftables//expr",
         "@com_github_insomniacslk_dhcp//dhcpv4",
diff --git a/metropolis/node/core/network/dhcp4c/BUILD.bazel b/metropolis/node/core/network/dhcp4c/BUILD.bazel
index d514f2d..24dbe83 100644
--- a/metropolis/node/core/network/dhcp4c/BUILD.bazel
+++ b/metropolis/node/core/network/dhcp4c/BUILD.bazel
@@ -15,7 +15,7 @@
     ],
     deps = [
         "//metropolis/node/core/network/dhcp4c/transport",
-        "//metropolis/pkg/supervisor",
+        "//osbase/supervisor",
         "@com_github_cenkalti_backoff_v4//:backoff",
         "@com_github_insomniacslk_dhcp//dhcpv4",
         "@com_github_insomniacslk_dhcp//iana",
diff --git a/metropolis/node/core/network/dhcp4c/callback/BUILD.bazel b/metropolis/node/core/network/dhcp4c/callback/BUILD.bazel
index d841c6e..89ad035 100644
--- a/metropolis/node/core/network/dhcp4c/callback/BUILD.bazel
+++ b/metropolis/node/core/network/dhcp4c/callback/BUILD.bazel
@@ -1,5 +1,5 @@
 load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
-load("//metropolis/test/ktest:ktest.bzl", "ktest")
+load("//osbase/test/ktest:ktest.bzl", "ktest")
 
 go_library(
     name = "callback",
diff --git a/metropolis/node/core/network/dhcp4c/dhcpc.go b/metropolis/node/core/network/dhcp4c/dhcpc.go
index 76347e8..d14dee8 100644
--- a/metropolis/node/core/network/dhcp4c/dhcpc.go
+++ b/metropolis/node/core/network/dhcp4c/dhcpc.go
@@ -37,7 +37,7 @@
 	"github.com/insomniacslk/dhcp/iana"
 
 	"source.monogon.dev/metropolis/node/core/network/dhcp4c/transport"
-	"source.monogon.dev/metropolis/pkg/supervisor"
+	"source.monogon.dev/osbase/supervisor"
 )
 
 type state int
diff --git a/metropolis/node/core/network/dns/BUILD.bazel b/metropolis/node/core/network/dns/BUILD.bazel
index 4dccf49..6d51db1 100644
--- a/metropolis/node/core/network/dns/BUILD.bazel
+++ b/metropolis/node/core/network/dns/BUILD.bazel
@@ -9,8 +9,8 @@
     importpath = "source.monogon.dev/metropolis/node/core/network/dns",
     visibility = ["//metropolis/node:__subpackages__"],
     deps = [
-        "//metropolis/pkg/fileargs",
-        "//metropolis/pkg/supervisor",
+        "//osbase/fileargs",
+        "//osbase/supervisor",
         "@org_golang_x_sys//unix",
     ],
 )
diff --git a/metropolis/node/core/network/dns/coredns.go b/metropolis/node/core/network/dns/coredns.go
index af4562b..472ab5c 100644
--- a/metropolis/node/core/network/dns/coredns.go
+++ b/metropolis/node/core/network/dns/coredns.go
@@ -29,8 +29,8 @@
 
 	"golang.org/x/sys/unix"
 
-	"source.monogon.dev/metropolis/pkg/fileargs"
-	"source.monogon.dev/metropolis/pkg/supervisor"
+	"source.monogon.dev/osbase/fileargs"
+	"source.monogon.dev/osbase/supervisor"
 )
 
 const corefileBase = `
diff --git a/metropolis/node/core/network/hostsfile/BUILD.bazel b/metropolis/node/core/network/hostsfile/BUILD.bazel
index 51b7f4f..2490493 100644
--- a/metropolis/node/core/network/hostsfile/BUILD.bazel
+++ b/metropolis/node/core/network/hostsfile/BUILD.bazel
@@ -10,9 +10,9 @@
         "//metropolis/node/core/curator/watcher",
         "//metropolis/node/core/localstorage",
         "//metropolis/node/core/network",
-        "//metropolis/pkg/event",
-        "//metropolis/pkg/supervisor",
         "//metropolis/proto/common",
+        "//osbase/event",
+        "//osbase/supervisor",
         "@org_golang_google_grpc//:go_default_library",
         "@org_golang_google_protobuf//proto",
         "@org_golang_x_sys//unix",
diff --git a/metropolis/node/core/network/hostsfile/hostsfile.go b/metropolis/node/core/network/hostsfile/hostsfile.go
index c09ea29..883d8cf 100644
--- a/metropolis/node/core/network/hostsfile/hostsfile.go
+++ b/metropolis/node/core/network/hostsfile/hostsfile.go
@@ -30,8 +30,8 @@
 	"source.monogon.dev/metropolis/node/core/curator/watcher"
 	"source.monogon.dev/metropolis/node/core/localstorage"
 	"source.monogon.dev/metropolis/node/core/network"
-	"source.monogon.dev/metropolis/pkg/event"
-	"source.monogon.dev/metropolis/pkg/supervisor"
+	"source.monogon.dev/osbase/event"
+	"source.monogon.dev/osbase/supervisor"
 
 	ipb "source.monogon.dev/metropolis/node/core/curator/proto/api"
 	cpb "source.monogon.dev/metropolis/proto/common"
diff --git a/metropolis/node/core/network/main.go b/metropolis/node/core/network/main.go
index cc57aa5..487a0eb 100644
--- a/metropolis/node/core/network/main.go
+++ b/metropolis/node/core/network/main.go
@@ -30,9 +30,9 @@
 	"source.monogon.dev/metropolis/node/core/network/dhcp4c"
 	dhcpcb "source.monogon.dev/metropolis/node/core/network/dhcp4c/callback"
 	"source.monogon.dev/metropolis/node/core/network/dns"
-	"source.monogon.dev/metropolis/pkg/event/memory"
-	"source.monogon.dev/metropolis/pkg/supervisor"
-	"source.monogon.dev/metropolis/pkg/sysctl"
+	"source.monogon.dev/osbase/event/memory"
+	"source.monogon.dev/osbase/supervisor"
+	"source.monogon.dev/osbase/sysctl"
 
 	netpb "source.monogon.dev/net/proto"
 )
diff --git a/metropolis/node/core/network/quirks.go b/metropolis/node/core/network/quirks.go
index 6dd5808..6a3f5cc 100644
--- a/metropolis/node/core/network/quirks.go
+++ b/metropolis/node/core/network/quirks.go
@@ -9,7 +9,7 @@
 	"github.com/vishvananda/netlink"
 	"golang.org/x/sys/unix"
 
-	"source.monogon.dev/metropolis/pkg/logtree"
+	"source.monogon.dev/osbase/logtree"
 )
 
 // applyQuirks applies settings to drivers and/or hardware to make it work
diff --git a/metropolis/node/core/network/static.go b/metropolis/node/core/network/static.go
index 0bbbab6..d69815c 100644
--- a/metropolis/node/core/network/static.go
+++ b/metropolis/node/core/network/static.go
@@ -19,9 +19,9 @@
 	"source.monogon.dev/metropolis/node/core/network/dhcp4c"
 	dhcpcb "source.monogon.dev/metropolis/node/core/network/dhcp4c/callback"
 	"source.monogon.dev/metropolis/node/core/network/dns"
-	"source.monogon.dev/metropolis/pkg/logtree"
-	"source.monogon.dev/metropolis/pkg/supervisor"
-	"source.monogon.dev/metropolis/pkg/sysctl"
+	"source.monogon.dev/osbase/logtree"
+	"source.monogon.dev/osbase/supervisor"
+	"source.monogon.dev/osbase/sysctl"
 
 	netpb "source.monogon.dev/net/proto"
 )
diff --git a/metropolis/node/core/nodeparams.go b/metropolis/node/core/nodeparams.go
index cb365d5..bc3c5f0 100644
--- a/metropolis/node/core/nodeparams.go
+++ b/metropolis/node/core/nodeparams.go
@@ -16,7 +16,7 @@
 	apb "source.monogon.dev/metropolis/proto/api"
 
 	"source.monogon.dev/metropolis/node/core/localstorage"
-	"source.monogon.dev/metropolis/pkg/supervisor"
+	"source.monogon.dev/osbase/supervisor"
 )
 
 func nodeParamsFWCFG(ctx context.Context) (*apb.NodeParameters, error) {
diff --git a/metropolis/node/core/panichandler.go b/metropolis/node/core/panichandler.go
index b336103..8a6ef6c 100644
--- a/metropolis/node/core/panichandler.go
+++ b/metropolis/node/core/panichandler.go
@@ -14,7 +14,7 @@
 
 	"golang.org/x/sys/unix"
 
-	"source.monogon.dev/metropolis/pkg/logtree"
+	"source.monogon.dev/osbase/logtree"
 )
 
 // This hooks into a global variable which is checked by runtime.write and used
diff --git a/metropolis/node/core/pstore.go b/metropolis/node/core/pstore.go
index c8863e5..dfa30f0 100644
--- a/metropolis/node/core/pstore.go
+++ b/metropolis/node/core/pstore.go
@@ -3,8 +3,8 @@
 import (
 	"context"
 
-	"source.monogon.dev/metropolis/pkg/pstore"
-	"source.monogon.dev/metropolis/pkg/supervisor"
+	"source.monogon.dev/osbase/pstore"
+	"source.monogon.dev/osbase/supervisor"
 )
 
 // dumpAndCleanPstore dumps all files accumulated in the pstore into the log
diff --git a/metropolis/node/core/roleserve/BUILD.bazel b/metropolis/node/core/roleserve/BUILD.bazel
index afad843..5d72c70 100644
--- a/metropolis/node/core/roleserve/BUILD.bazel
+++ b/metropolis/node/core/roleserve/BUILD.bazel
@@ -36,13 +36,13 @@
         "//metropolis/node/kubernetes",
         "//metropolis/node/kubernetes/containerd",
         "//metropolis/node/kubernetes/pki",
-        "//metropolis/pkg/event",
-        "//metropolis/pkg/event/memory",
-        "//metropolis/pkg/logtree",
-        "//metropolis/pkg/pki",
-        "//metropolis/pkg/supervisor",
         "//metropolis/proto/common",
         "//metropolis/version",
+        "//osbase/event",
+        "//osbase/event/memory",
+        "//osbase/logtree",
+        "//osbase/pki",
+        "//osbase/supervisor",
         "@org_golang_google_grpc//:go_default_library",
         "@org_golang_google_protobuf//encoding/prototext",
         "@org_golang_google_protobuf//proto",
@@ -60,10 +60,10 @@
         "//metropolis/node/core/consensus",
         "//metropolis/node/core/curator",
         "//metropolis/node/core/curator/proto/api",
-        "//metropolis/pkg/supervisor",
         "//metropolis/proto/common",
         "//metropolis/test/util",
         "//metropolis/version",
+        "//osbase/supervisor",
         "@com_github_cenkalti_backoff_v4//:backoff",
         "@com_github_google_go_cmp//cmp",
         "@org_golang_google_grpc//:go_default_library",
diff --git a/metropolis/node/core/roleserve/roleserve.go b/metropolis/node/core/roleserve/roleserve.go
index ad8207b..f03a83b 100644
--- a/metropolis/node/core/roleserve/roleserve.go
+++ b/metropolis/node/core/roleserve/roleserve.go
@@ -47,10 +47,10 @@
 	"source.monogon.dev/metropolis/node/core/network"
 	"source.monogon.dev/metropolis/node/core/rpc/resolver"
 	"source.monogon.dev/metropolis/node/core/update"
-	"source.monogon.dev/metropolis/pkg/event/memory"
-	"source.monogon.dev/metropolis/pkg/logtree"
-	"source.monogon.dev/metropolis/pkg/supervisor"
 	cpb "source.monogon.dev/metropolis/proto/common"
+	"source.monogon.dev/osbase/event/memory"
+	"source.monogon.dev/osbase/logtree"
+	"source.monogon.dev/osbase/supervisor"
 )
 
 // Config is the configuration of the role server.
diff --git a/metropolis/node/core/roleserve/worker_clusternet.go b/metropolis/node/core/roleserve/worker_clusternet.go
index 1eb5649..a2db859 100644
--- a/metropolis/node/core/roleserve/worker_clusternet.go
+++ b/metropolis/node/core/roleserve/worker_clusternet.go
@@ -7,8 +7,8 @@
 	"source.monogon.dev/metropolis/node/core/clusternet"
 	"source.monogon.dev/metropolis/node/core/localstorage"
 	"source.monogon.dev/metropolis/node/core/network"
-	"source.monogon.dev/metropolis/pkg/event/memory"
-	"source.monogon.dev/metropolis/pkg/supervisor"
+	"source.monogon.dev/osbase/event/memory"
+	"source.monogon.dev/osbase/supervisor"
 
 	ipb "source.monogon.dev/metropolis/node/core/curator/proto/api"
 )
diff --git a/metropolis/node/core/roleserve/worker_controlplane.go b/metropolis/node/core/roleserve/worker_controlplane.go
index 777f887..dbae49f 100644
--- a/metropolis/node/core/roleserve/worker_controlplane.go
+++ b/metropolis/node/core/roleserve/worker_controlplane.go
@@ -12,11 +12,11 @@
 	"source.monogon.dev/metropolis/node/core/identity"
 	"source.monogon.dev/metropolis/node/core/localstorage"
 	"source.monogon.dev/metropolis/node/core/rpc/resolver"
-	"source.monogon.dev/metropolis/pkg/event"
-	"source.monogon.dev/metropolis/pkg/event/memory"
-	"source.monogon.dev/metropolis/pkg/pki"
-	"source.monogon.dev/metropolis/pkg/supervisor"
 	cpb "source.monogon.dev/metropolis/proto/common"
+	"source.monogon.dev/osbase/event"
+	"source.monogon.dev/osbase/event/memory"
+	"source.monogon.dev/osbase/pki"
+	"source.monogon.dev/osbase/supervisor"
 )
 
 // workerControlPlane is the Control Plane Worker, responsible for maintaining a
diff --git a/metropolis/node/core/roleserve/worker_heartbeat.go b/metropolis/node/core/roleserve/worker_heartbeat.go
index 6ba07c5..7a77cb9 100644
--- a/metropolis/node/core/roleserve/worker_heartbeat.go
+++ b/metropolis/node/core/roleserve/worker_heartbeat.go
@@ -9,8 +9,8 @@
 	"source.monogon.dev/metropolis/node/core/curator"
 	ipb "source.monogon.dev/metropolis/node/core/curator/proto/api"
 	"source.monogon.dev/metropolis/node/core/network"
-	"source.monogon.dev/metropolis/pkg/event/memory"
-	"source.monogon.dev/metropolis/pkg/supervisor"
+	"source.monogon.dev/osbase/event/memory"
+	"source.monogon.dev/osbase/supervisor"
 )
 
 // workerHeartbeat is a service that periodically updates node's heartbeat
diff --git a/metropolis/node/core/roleserve/worker_hostsfile.go b/metropolis/node/core/roleserve/worker_hostsfile.go
index 8574235..6e1a1ca 100644
--- a/metropolis/node/core/roleserve/worker_hostsfile.go
+++ b/metropolis/node/core/roleserve/worker_hostsfile.go
@@ -6,8 +6,8 @@
 	"source.monogon.dev/metropolis/node/core/localstorage"
 	"source.monogon.dev/metropolis/node/core/network"
 	"source.monogon.dev/metropolis/node/core/network/hostsfile"
-	"source.monogon.dev/metropolis/pkg/event/memory"
-	"source.monogon.dev/metropolis/pkg/supervisor"
+	"source.monogon.dev/osbase/event/memory"
+	"source.monogon.dev/osbase/supervisor"
 
 	ipb "source.monogon.dev/metropolis/node/core/curator/proto/api"
 )
diff --git a/metropolis/node/core/roleserve/worker_kubernetes.go b/metropolis/node/core/roleserve/worker_kubernetes.go
index c3fcfb3..eaa092a 100644
--- a/metropolis/node/core/roleserve/worker_kubernetes.go
+++ b/metropolis/node/core/roleserve/worker_kubernetes.go
@@ -13,10 +13,10 @@
 	"source.monogon.dev/metropolis/node/kubernetes"
 	"source.monogon.dev/metropolis/node/kubernetes/containerd"
 	kpki "source.monogon.dev/metropolis/node/kubernetes/pki"
-	"source.monogon.dev/metropolis/pkg/event"
-	"source.monogon.dev/metropolis/pkg/event/memory"
-	"source.monogon.dev/metropolis/pkg/supervisor"
 	cpb "source.monogon.dev/metropolis/proto/common"
+	"source.monogon.dev/osbase/event"
+	"source.monogon.dev/osbase/event/memory"
+	"source.monogon.dev/osbase/supervisor"
 )
 
 // workerKubernetes is the Kubernetes Worker, responsible for launching
diff --git a/metropolis/node/core/roleserve/worker_metrics.go b/metropolis/node/core/roleserve/worker_metrics.go
index 26ec940..e2a78b8 100644
--- a/metropolis/node/core/roleserve/worker_metrics.go
+++ b/metropolis/node/core/roleserve/worker_metrics.go
@@ -8,8 +8,8 @@
 	cpb "source.monogon.dev/metropolis/proto/common"
 
 	"source.monogon.dev/metropolis/node/core/metrics"
-	"source.monogon.dev/metropolis/pkg/event/memory"
-	"source.monogon.dev/metropolis/pkg/supervisor"
+	"source.monogon.dev/osbase/event/memory"
+	"source.monogon.dev/osbase/supervisor"
 )
 
 // workerMetrics runs the Metrics Service, which runs local Prometheus collectors
diff --git a/metropolis/node/core/roleserve/worker_nodemgmt.go b/metropolis/node/core/roleserve/worker_nodemgmt.go
index 17fd0d4..52f1086 100644
--- a/metropolis/node/core/roleserve/worker_nodemgmt.go
+++ b/metropolis/node/core/roleserve/worker_nodemgmt.go
@@ -5,9 +5,9 @@
 
 	"source.monogon.dev/metropolis/node/core/mgmt"
 	"source.monogon.dev/metropolis/node/core/update"
-	"source.monogon.dev/metropolis/pkg/event/memory"
-	"source.monogon.dev/metropolis/pkg/logtree"
-	"source.monogon.dev/metropolis/pkg/supervisor"
+	"source.monogon.dev/osbase/event/memory"
+	"source.monogon.dev/osbase/logtree"
+	"source.monogon.dev/osbase/supervisor"
 )
 
 type workerNodeMgmt struct {
diff --git a/metropolis/node/core/roleserve/worker_rolefetch.go b/metropolis/node/core/roleserve/worker_rolefetch.go
index aaac076..726fc69 100644
--- a/metropolis/node/core/roleserve/worker_rolefetch.go
+++ b/metropolis/node/core/roleserve/worker_rolefetch.go
@@ -7,8 +7,8 @@
 
 	"source.monogon.dev/metropolis/node/core/curator/watcher"
 	"source.monogon.dev/metropolis/node/core/localstorage"
-	"source.monogon.dev/metropolis/pkg/event/memory"
-	"source.monogon.dev/metropolis/pkg/supervisor"
+	"source.monogon.dev/osbase/event/memory"
+	"source.monogon.dev/osbase/supervisor"
 
 	ipb "source.monogon.dev/metropolis/node/core/curator/proto/api"
 	cpb "source.monogon.dev/metropolis/proto/common"
diff --git a/metropolis/node/core/roleserve/worker_statuspush.go b/metropolis/node/core/roleserve/worker_statuspush.go
index 63ab227..1b269c6 100644
--- a/metropolis/node/core/roleserve/worker_statuspush.go
+++ b/metropolis/node/core/roleserve/worker_statuspush.go
@@ -8,10 +8,10 @@
 
 	common "source.monogon.dev/metropolis/node"
 	"source.monogon.dev/metropolis/node/core/network"
-	"source.monogon.dev/metropolis/pkg/event"
-	"source.monogon.dev/metropolis/pkg/event/memory"
-	"source.monogon.dev/metropolis/pkg/supervisor"
 	"source.monogon.dev/metropolis/version"
+	"source.monogon.dev/osbase/event"
+	"source.monogon.dev/osbase/event/memory"
+	"source.monogon.dev/osbase/supervisor"
 
 	ipb "source.monogon.dev/metropolis/node/core/curator/proto/api"
 	cpb "source.monogon.dev/metropolis/proto/common"
diff --git a/metropolis/node/core/roleserve/worker_statuspush_test.go b/metropolis/node/core/roleserve/worker_statuspush_test.go
index 06e48e8..39222bd 100644
--- a/metropolis/node/core/roleserve/worker_statuspush_test.go
+++ b/metropolis/node/core/roleserve/worker_statuspush_test.go
@@ -18,9 +18,9 @@
 	common "source.monogon.dev/metropolis/node"
 	"source.monogon.dev/metropolis/node/core/consensus"
 	"source.monogon.dev/metropolis/node/core/curator"
-	"source.monogon.dev/metropolis/pkg/supervisor"
 	"source.monogon.dev/metropolis/test/util"
 	mversion "source.monogon.dev/metropolis/version"
+	"source.monogon.dev/osbase/supervisor"
 
 	ipb "source.monogon.dev/metropolis/node/core/curator/proto/api"
 	cpb "source.monogon.dev/metropolis/proto/common"
diff --git a/metropolis/node/core/rpc/BUILD.bazel b/metropolis/node/core/rpc/BUILD.bazel
index bdddb1e..b1b4bfc 100644
--- a/metropolis/node/core/rpc/BUILD.bazel
+++ b/metropolis/node/core/rpc/BUILD.bazel
@@ -14,9 +14,9 @@
     visibility = ["//visibility:public"],
     deps = [
         "//metropolis/node/core/identity",
-        "//metropolis/pkg/logtree",
         "//metropolis/proto/api",
         "//metropolis/proto/ext",
+        "//osbase/logtree",
         "@org_golang_google_grpc//:go_default_library",
         "@org_golang_google_grpc//codes",
         "@org_golang_google_grpc//credentials",
@@ -38,10 +38,10 @@
     embed = [":rpc"],
     deps = [
         "//metropolis/node/core/curator/proto/api",
-        "//metropolis/pkg/logtree",
         "//metropolis/proto/api",
         "//metropolis/proto/ext",
         "//metropolis/test/util",
+        "//osbase/logtree",
         "@org_golang_google_grpc//:go_default_library",
         "@org_golang_google_grpc//codes",
         "@org_golang_google_grpc//status",
diff --git a/metropolis/node/core/rpc/server_authentication.go b/metropolis/node/core/rpc/server_authentication.go
index f847126..c7d6e91 100644
--- a/metropolis/node/core/rpc/server_authentication.go
+++ b/metropolis/node/core/rpc/server_authentication.go
@@ -13,7 +13,7 @@
 	"google.golang.org/grpc/status"
 
 	"source.monogon.dev/metropolis/node/core/identity"
-	"source.monogon.dev/metropolis/pkg/logtree"
+	"source.monogon.dev/osbase/logtree"
 )
 
 // ServerSecurity are the security options of a RPC server that will run
diff --git a/metropolis/node/core/rpc/trace.go b/metropolis/node/core/rpc/trace.go
index 0d43806..a686c06 100644
--- a/metropolis/node/core/rpc/trace.go
+++ b/metropolis/node/core/rpc/trace.go
@@ -10,7 +10,7 @@
 	"google.golang.org/protobuf/encoding/prototext"
 	"google.golang.org/protobuf/proto"
 
-	"source.monogon.dev/metropolis/pkg/logtree"
+	"source.monogon.dev/osbase/logtree"
 )
 
 // Span implements a compatible subset of
diff --git a/metropolis/node/core/rpc/trace_test.go b/metropolis/node/core/rpc/trace_test.go
index 750ffe8..9bed935 100644
--- a/metropolis/node/core/rpc/trace_test.go
+++ b/metropolis/node/core/rpc/trace_test.go
@@ -6,7 +6,7 @@
 	"strings"
 	"testing"
 
-	"source.monogon.dev/metropolis/pkg/logtree"
+	"source.monogon.dev/osbase/logtree"
 )
 
 // TestSpanRecording exercises the span->logtree forwarding functionality by
diff --git a/metropolis/node/core/sysctl.go b/metropolis/node/core/sysctl.go
index eb72aa3..01b576c 100644
--- a/metropolis/node/core/sysctl.go
+++ b/metropolis/node/core/sysctl.go
@@ -4,8 +4,8 @@
 	"context"
 	"strconv"
 
-	"source.monogon.dev/metropolis/pkg/supervisor"
-	"source.monogon.dev/metropolis/pkg/sysctl"
+	"source.monogon.dev/osbase/supervisor"
+	"source.monogon.dev/osbase/sysctl"
 )
 
 func nodeSysctls(ctx context.Context) error {
diff --git a/metropolis/node/core/time/BUILD.bazel b/metropolis/node/core/time/BUILD.bazel
index fb9af73..f113153 100644
--- a/metropolis/node/core/time/BUILD.bazel
+++ b/metropolis/node/core/time/BUILD.bazel
@@ -7,7 +7,7 @@
     visibility = ["//visibility:public"],
     deps = [
         "//metropolis/node",
-        "//metropolis/pkg/fileargs",
-        "//metropolis/pkg/supervisor",
+        "//osbase/fileargs",
+        "//osbase/supervisor",
     ],
 )
diff --git a/metropolis/node/core/time/time.go b/metropolis/node/core/time/time.go
index f57e892..3e37a0c 100644
--- a/metropolis/node/core/time/time.go
+++ b/metropolis/node/core/time/time.go
@@ -18,8 +18,8 @@
 	"strings"
 
 	"source.monogon.dev/metropolis/node"
-	"source.monogon.dev/metropolis/pkg/fileargs"
-	"source.monogon.dev/metropolis/pkg/supervisor"
+	"source.monogon.dev/osbase/fileargs"
+	"source.monogon.dev/osbase/supervisor"
 )
 
 // Service implements the time service. See package documentation for further
diff --git a/metropolis/node/core/update/BUILD.bazel b/metropolis/node/core/update/BUILD.bazel
index bc5561a..30ca20b 100644
--- a/metropolis/node/core/update/BUILD.bazel
+++ b/metropolis/node/core/update/BUILD.bazel
@@ -11,11 +11,11 @@
     deps = [
         "//metropolis/node/build/mkimage/osimage",
         "//metropolis/node/core/abloader/spec",
-        "//metropolis/pkg/blockdev",
-        "//metropolis/pkg/efivarfs",
-        "//metropolis/pkg/gpt",
-        "//metropolis/pkg/kexec",
-        "//metropolis/pkg/logtree",
+        "//osbase/blockdev",
+        "//osbase/efivarfs",
+        "//osbase/gpt",
+        "//osbase/kexec",
+        "//osbase/logtree",
         "@com_github_cenkalti_backoff_v4//:backoff",
         "@org_golang_google_grpc//codes",
         "@org_golang_google_grpc//status",
diff --git a/metropolis/node/core/update/e2e/BUILD.bazel b/metropolis/node/core/update/e2e/BUILD.bazel
index 0b3fbce..b96006c 100644
--- a/metropolis/node/core/update/e2e/BUILD.bazel
+++ b/metropolis/node/core/update/e2e/BUILD.bazel
@@ -16,8 +16,8 @@
     ],
     deps = [
         "//metropolis/node/build/mkimage/osimage",
-        "//metropolis/pkg/blkio",
-        "//metropolis/pkg/blockdev",
+        "//osbase/blkio",
+        "//osbase/blockdev",
         "@io_bazel_rules_go//go/runfiles:go_default_library",
     ],
 )
diff --git a/metropolis/node/core/update/e2e/e2e_test.go b/metropolis/node/core/update/e2e/e2e_test.go
index 0f712a8..07c6bf4 100644
--- a/metropolis/node/core/update/e2e/e2e_test.go
+++ b/metropolis/node/core/update/e2e/e2e_test.go
@@ -19,8 +19,8 @@
 	"github.com/bazelbuild/rules_go/go/runfiles"
 
 	"source.monogon.dev/metropolis/node/build/mkimage/osimage"
-	"source.monogon.dev/metropolis/pkg/blkio"
-	"source.monogon.dev/metropolis/pkg/blockdev"
+	"source.monogon.dev/osbase/blkio"
+	"source.monogon.dev/osbase/blockdev"
 )
 
 const Mi = 1024 * 1024
diff --git a/metropolis/node/core/update/e2e/testos/BUILD.bazel b/metropolis/node/core/update/e2e/testos/BUILD.bazel
index 79fd0f9..275a44c 100644
--- a/metropolis/node/core/update/e2e/testos/BUILD.bazel
+++ b/metropolis/node/core/update/e2e/testos/BUILD.bazel
@@ -16,10 +16,10 @@
         "//metropolis/node/build/mkimage/osimage",
         "//metropolis/node/core/network",
         "//metropolis/node/core/update",
-        "//metropolis/pkg/blockdev",
-        "//metropolis/pkg/gpt",
-        "//metropolis/pkg/logtree",
-        "//metropolis/pkg/supervisor",
+        "//osbase/blockdev",
+        "//osbase/gpt",
+        "//osbase/logtree",
+        "//osbase/supervisor",
         "@org_golang_x_sys//unix",
     ],
 )
diff --git a/metropolis/node/core/update/e2e/testos/main.go b/metropolis/node/core/update/e2e/testos/main.go
index 95f865c..e77a4e2 100644
--- a/metropolis/node/core/update/e2e/testos/main.go
+++ b/metropolis/node/core/update/e2e/testos/main.go
@@ -11,10 +11,10 @@
 	"source.monogon.dev/metropolis/node/build/mkimage/osimage"
 	"source.monogon.dev/metropolis/node/core/network"
 	"source.monogon.dev/metropolis/node/core/update"
-	"source.monogon.dev/metropolis/pkg/blockdev"
-	"source.monogon.dev/metropolis/pkg/gpt"
-	"source.monogon.dev/metropolis/pkg/logtree"
-	"source.monogon.dev/metropolis/pkg/supervisor"
+	"source.monogon.dev/osbase/blockdev"
+	"source.monogon.dev/osbase/gpt"
+	"source.monogon.dev/osbase/logtree"
+	"source.monogon.dev/osbase/supervisor"
 )
 
 var Variant = "U"
diff --git a/metropolis/node/core/update/update.go b/metropolis/node/core/update/update.go
index 8490c78..28b2381 100644
--- a/metropolis/node/core/update/update.go
+++ b/metropolis/node/core/update/update.go
@@ -25,11 +25,11 @@
 
 	"source.monogon.dev/metropolis/node/build/mkimage/osimage"
 	abloaderpb "source.monogon.dev/metropolis/node/core/abloader/spec"
-	"source.monogon.dev/metropolis/pkg/blockdev"
-	"source.monogon.dev/metropolis/pkg/efivarfs"
-	"source.monogon.dev/metropolis/pkg/gpt"
-	"source.monogon.dev/metropolis/pkg/kexec"
-	"source.monogon.dev/metropolis/pkg/logtree"
+	"source.monogon.dev/osbase/blockdev"
+	"source.monogon.dev/osbase/efivarfs"
+	"source.monogon.dev/osbase/gpt"
+	"source.monogon.dev/osbase/kexec"
+	"source.monogon.dev/osbase/logtree"
 )
 
 // Service contains data and functionality to perform A/B updates on a
diff --git a/metropolis/node/kubernetes/BUILD.bazel b/metropolis/node/kubernetes/BUILD.bazel
index 854f6c3..08666ab 100644
--- a/metropolis/node/kubernetes/BUILD.bazel
+++ b/metropolis/node/kubernetes/BUILD.bazel
@@ -33,14 +33,14 @@
         "//metropolis/node/kubernetes/pki",
         "//metropolis/node/kubernetes/plugins/kvmdevice",
         "//metropolis/node/kubernetes/reconciler",
-        "//metropolis/pkg/event",
-        "//metropolis/pkg/event/memory",
-        "//metropolis/pkg/fileargs",
-        "//metropolis/pkg/fsquota",
-        "//metropolis/pkg/logtree",
-        "//metropolis/pkg/loop",
-        "//metropolis/pkg/supervisor",
         "//metropolis/proto/api",
+        "//osbase/event",
+        "//osbase/event/memory",
+        "//osbase/fileargs",
+        "//osbase/fsquota",
+        "//osbase/logtree",
+        "//osbase/loop",
+        "//osbase/supervisor",
         "@com_github_container_storage_interface_spec//lib/go/csi",
         "@io_k8s_api//core/v1:core",
         "@io_k8s_api//storage/v1:storage",
diff --git a/metropolis/node/kubernetes/apiproxy.go b/metropolis/node/kubernetes/apiproxy.go
index d937824..0289111 100644
--- a/metropolis/node/kubernetes/apiproxy.go
+++ b/metropolis/node/kubernetes/apiproxy.go
@@ -8,7 +8,7 @@
 	"source.monogon.dev/metropolis/node"
 	ipb "source.monogon.dev/metropolis/node/core/curator/proto/api"
 	"source.monogon.dev/metropolis/node/core/curator/watcher"
-	"source.monogon.dev/metropolis/pkg/event/memory"
+	"source.monogon.dev/osbase/event/memory"
 )
 
 // updateLoadBalancerAPIServers provides a tinylb BackendSet memory value with
diff --git a/metropolis/node/kubernetes/apiserver.go b/metropolis/node/kubernetes/apiserver.go
index 70c4707..45b2582 100644
--- a/metropolis/node/kubernetes/apiserver.go
+++ b/metropolis/node/kubernetes/apiserver.go
@@ -34,8 +34,8 @@
 	common "source.monogon.dev/metropolis/node"
 	"source.monogon.dev/metropolis/node/core/localstorage"
 	"source.monogon.dev/metropolis/node/kubernetes/pki"
-	"source.monogon.dev/metropolis/pkg/fileargs"
-	"source.monogon.dev/metropolis/pkg/supervisor"
+	"source.monogon.dev/osbase/fileargs"
+	"source.monogon.dev/osbase/supervisor"
 )
 
 type apiserverService struct {
diff --git a/metropolis/node/kubernetes/authproxy/BUILD.bazel b/metropolis/node/kubernetes/authproxy/BUILD.bazel
index 9cf57cb..263e846 100644
--- a/metropolis/node/kubernetes/authproxy/BUILD.bazel
+++ b/metropolis/node/kubernetes/authproxy/BUILD.bazel
@@ -9,7 +9,7 @@
         "//metropolis/node",
         "//metropolis/node/core/identity",
         "//metropolis/node/kubernetes/pki",
-        "//metropolis/pkg/supervisor",
+        "//osbase/supervisor",
         "@io_k8s_apimachinery//pkg/apis/meta/v1:meta",
     ],
 )
diff --git a/metropolis/node/kubernetes/authproxy/authproxy.go b/metropolis/node/kubernetes/authproxy/authproxy.go
index c4a844e..9b24d19 100644
--- a/metropolis/node/kubernetes/authproxy/authproxy.go
+++ b/metropolis/node/kubernetes/authproxy/authproxy.go
@@ -20,7 +20,7 @@
 	"source.monogon.dev/metropolis/node"
 	"source.monogon.dev/metropolis/node/core/identity"
 	"source.monogon.dev/metropolis/node/kubernetes/pki"
-	"source.monogon.dev/metropolis/pkg/supervisor"
+	"source.monogon.dev/osbase/supervisor"
 )
 
 type Service struct {
diff --git a/metropolis/node/kubernetes/clusternet/BUILD.bazel b/metropolis/node/kubernetes/clusternet/BUILD.bazel
index 2452b74..6b4d744 100644
--- a/metropolis/node/kubernetes/clusternet/BUILD.bazel
+++ b/metropolis/node/kubernetes/clusternet/BUILD.bazel
@@ -7,9 +7,9 @@
     visibility = ["//metropolis/node/kubernetes:__subpackages__"],
     deps = [
         "//metropolis/node/core/clusternet",
-        "//metropolis/pkg/event",
-        "//metropolis/pkg/logtree",
-        "//metropolis/pkg/supervisor",
+        "//osbase/event",
+        "//osbase/logtree",
+        "//osbase/supervisor",
         "@io_k8s_api//core/v1:core",
         "@io_k8s_apimachinery//pkg/fields",
         "@io_k8s_client_go//kubernetes",
diff --git a/metropolis/node/kubernetes/clusternet/clusternet.go b/metropolis/node/kubernetes/clusternet/clusternet.go
index 7b51c30..28e268d 100644
--- a/metropolis/node/kubernetes/clusternet/clusternet.go
+++ b/metropolis/node/kubernetes/clusternet/clusternet.go
@@ -42,9 +42,9 @@
 	"k8s.io/client-go/tools/cache"
 
 	oclusternet "source.monogon.dev/metropolis/node/core/clusternet"
-	"source.monogon.dev/metropolis/pkg/event"
-	"source.monogon.dev/metropolis/pkg/logtree"
-	"source.monogon.dev/metropolis/pkg/supervisor"
+	"source.monogon.dev/osbase/event"
+	"source.monogon.dev/osbase/logtree"
+	"source.monogon.dev/osbase/supervisor"
 )
 
 type Service struct {
diff --git a/metropolis/node/kubernetes/containerd/BUILD.bazel b/metropolis/node/kubernetes/containerd/BUILD.bazel
index ef6606d..0c61925 100644
--- a/metropolis/node/kubernetes/containerd/BUILD.bazel
+++ b/metropolis/node/kubernetes/containerd/BUILD.bazel
@@ -7,7 +7,7 @@
     visibility = ["//metropolis/node/core:__subpackages__"],
     deps = [
         "//metropolis/node/core/localstorage",
-        "//metropolis/pkg/supervisor",
+        "//osbase/supervisor",
         "@com_github_containerd_containerd//:containerd",
         "@com_github_containerd_containerd//namespaces",
     ],
diff --git a/metropolis/node/kubernetes/containerd/main.go b/metropolis/node/kubernetes/containerd/main.go
index 28fc98d..6782137 100644
--- a/metropolis/node/kubernetes/containerd/main.go
+++ b/metropolis/node/kubernetes/containerd/main.go
@@ -30,7 +30,7 @@
 	"github.com/containerd/containerd/namespaces"
 
 	"source.monogon.dev/metropolis/node/core/localstorage"
-	"source.monogon.dev/metropolis/pkg/supervisor"
+	"source.monogon.dev/osbase/supervisor"
 )
 
 const (
diff --git a/metropolis/node/kubernetes/controller-manager.go b/metropolis/node/kubernetes/controller-manager.go
index 71918f9..0a49ce1 100644
--- a/metropolis/node/kubernetes/controller-manager.go
+++ b/metropolis/node/kubernetes/controller-manager.go
@@ -24,8 +24,8 @@
 	"os/exec"
 
 	"source.monogon.dev/metropolis/node/kubernetes/pki"
-	"source.monogon.dev/metropolis/pkg/fileargs"
-	"source.monogon.dev/metropolis/pkg/supervisor"
+	"source.monogon.dev/osbase/fileargs"
+	"source.monogon.dev/osbase/supervisor"
 )
 
 type controllerManagerConfig struct {
diff --git a/metropolis/node/kubernetes/csi.go b/metropolis/node/kubernetes/csi.go
index c0d81bb..6f6c3a3 100644
--- a/metropolis/node/kubernetes/csi.go
+++ b/metropolis/node/kubernetes/csi.go
@@ -34,10 +34,10 @@
 	pluginregistration "k8s.io/kubelet/pkg/apis/pluginregistration/v1"
 
 	"source.monogon.dev/metropolis/node/core/localstorage"
-	"source.monogon.dev/metropolis/pkg/fsquota"
-	"source.monogon.dev/metropolis/pkg/logtree"
-	"source.monogon.dev/metropolis/pkg/loop"
-	"source.monogon.dev/metropolis/pkg/supervisor"
+	"source.monogon.dev/osbase/fsquota"
+	"source.monogon.dev/osbase/logtree"
+	"source.monogon.dev/osbase/loop"
+	"source.monogon.dev/osbase/supervisor"
 )
 
 // Derived from K8s spec for acceptable names, but shortened to 130 characters
diff --git a/metropolis/node/kubernetes/kubelet.go b/metropolis/node/kubernetes/kubelet.go
index 1fefbca..19a79b2 100644
--- a/metropolis/node/kubernetes/kubelet.go
+++ b/metropolis/node/kubernetes/kubelet.go
@@ -33,8 +33,8 @@
 	"source.monogon.dev/metropolis/node/core/localstorage"
 	"source.monogon.dev/metropolis/node/kubernetes/pki"
 	"source.monogon.dev/metropolis/node/kubernetes/reconciler"
-	"source.monogon.dev/metropolis/pkg/fileargs"
-	"source.monogon.dev/metropolis/pkg/supervisor"
+	"source.monogon.dev/osbase/fileargs"
+	"source.monogon.dev/osbase/supervisor"
 )
 
 type kubeletService struct {
diff --git a/metropolis/node/kubernetes/metricsproxy/BUILD.bazel b/metropolis/node/kubernetes/metricsproxy/BUILD.bazel
index 95f8b2c..7f89450 100644
--- a/metropolis/node/kubernetes/metricsproxy/BUILD.bazel
+++ b/metropolis/node/kubernetes/metricsproxy/BUILD.bazel
@@ -8,7 +8,7 @@
     deps = [
         "//metropolis/node",
         "//metropolis/node/kubernetes/pki",
-        "//metropolis/pkg/supervisor",
+        "//osbase/supervisor",
         "@io_k8s_kubernetes//cmd/kubeadm/app/constants",
     ],
 )
diff --git a/metropolis/node/kubernetes/metricsproxy/metricsproxy.go b/metropolis/node/kubernetes/metricsproxy/metricsproxy.go
index bc44a34..afa142d 100644
--- a/metropolis/node/kubernetes/metricsproxy/metricsproxy.go
+++ b/metropolis/node/kubernetes/metricsproxy/metricsproxy.go
@@ -16,7 +16,7 @@
 
 	"source.monogon.dev/metropolis/node"
 	"source.monogon.dev/metropolis/node/kubernetes/pki"
-	"source.monogon.dev/metropolis/pkg/supervisor"
+	"source.monogon.dev/osbase/supervisor"
 )
 
 type Service struct {
diff --git a/metropolis/node/kubernetes/nfproxy/BUILD.bazel b/metropolis/node/kubernetes/nfproxy/BUILD.bazel
index 539516a..b445c63 100644
--- a/metropolis/node/kubernetes/nfproxy/BUILD.bazel
+++ b/metropolis/node/kubernetes/nfproxy/BUILD.bazel
@@ -6,7 +6,7 @@
     importpath = "source.monogon.dev/metropolis/node/kubernetes/nfproxy",
     visibility = ["//metropolis/node/kubernetes:__subpackages__"],
     deps = [
-        "//metropolis/pkg/supervisor",
+        "//osbase/supervisor",
         "@com_github_sbezverk_nfproxy//pkg/controller",
         "@com_github_sbezverk_nfproxy//pkg/nftables",
         "@com_github_sbezverk_nfproxy//pkg/proxy",
diff --git a/metropolis/node/kubernetes/nfproxy/nfproxy.go b/metropolis/node/kubernetes/nfproxy/nfproxy.go
index 637be7b..8e0450b 100644
--- a/metropolis/node/kubernetes/nfproxy/nfproxy.go
+++ b/metropolis/node/kubernetes/nfproxy/nfproxy.go
@@ -40,7 +40,7 @@
 	"k8s.io/client-go/tools/cache"
 	"k8s.io/client-go/tools/record"
 
-	"source.monogon.dev/metropolis/pkg/supervisor"
+	"source.monogon.dev/osbase/supervisor"
 )
 
 type Service struct {
diff --git a/metropolis/node/kubernetes/pki/BUILD.bazel b/metropolis/node/kubernetes/pki/BUILD.bazel
index 1471fd3..60121c7 100644
--- a/metropolis/node/kubernetes/pki/BUILD.bazel
+++ b/metropolis/node/kubernetes/pki/BUILD.bazel
@@ -8,7 +8,7 @@
     deps = [
         "//metropolis/node",
         "//metropolis/node/core/consensus",
-        "//metropolis/pkg/pki",
+        "//osbase/pki",
         "@io_etcd_go_etcd_client_v3//:client",
         "@io_k8s_client_go//tools/clientcmd",
         "@io_k8s_client_go//tools/clientcmd/api",
diff --git a/metropolis/node/kubernetes/pki/kubernetes.go b/metropolis/node/kubernetes/pki/kubernetes.go
index 66731ae..5ea1e3e 100644
--- a/metropolis/node/kubernetes/pki/kubernetes.go
+++ b/metropolis/node/kubernetes/pki/kubernetes.go
@@ -14,7 +14,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// package pki builds upon metropolis/pkg/pki/ to provide an
+// package pki builds upon osbase/pki/ to provide an
 // etcd-backed implementation of all x509 PKI Certificates/CAs required to run
 // Kubernetes.
 // Most elements of the PKI are 'static' long-standing certificates/credentials
@@ -41,7 +41,7 @@
 
 	common "source.monogon.dev/metropolis/node"
 	"source.monogon.dev/metropolis/node/core/consensus"
-	opki "source.monogon.dev/metropolis/pkg/pki"
+	opki "source.monogon.dev/osbase/pki"
 )
 
 // KubeCertificateName is an enum-like unique name of a static Kubernetes
diff --git a/metropolis/node/kubernetes/plugins/kvmdevice/BUILD.bazel b/metropolis/node/kubernetes/plugins/kvmdevice/BUILD.bazel
index 7d9b43f..c8b4ca4 100644
--- a/metropolis/node/kubernetes/plugins/kvmdevice/BUILD.bazel
+++ b/metropolis/node/kubernetes/plugins/kvmdevice/BUILD.bazel
@@ -7,8 +7,8 @@
     visibility = ["//visibility:public"],
     deps = [
         "//metropolis/node/core/localstorage",
-        "//metropolis/pkg/logtree",
-        "//metropolis/pkg/supervisor",
+        "//osbase/logtree",
+        "//osbase/supervisor",
         "@io_k8s_api//core/v1:core",
         "@io_k8s_kubelet//pkg/apis/deviceplugin/v1beta1",
         "@io_k8s_kubelet//pkg/apis/pluginregistration/v1:pluginregistration",
diff --git a/metropolis/node/kubernetes/plugins/kvmdevice/kvmdevice.go b/metropolis/node/kubernetes/plugins/kvmdevice/kvmdevice.go
index 90fdc44..f285c47 100644
--- a/metropolis/node/kubernetes/plugins/kvmdevice/kvmdevice.go
+++ b/metropolis/node/kubernetes/plugins/kvmdevice/kvmdevice.go
@@ -40,8 +40,8 @@
 	pluginregistration "k8s.io/kubelet/pkg/apis/pluginregistration/v1"
 
 	"source.monogon.dev/metropolis/node/core/localstorage"
-	"source.monogon.dev/metropolis/pkg/logtree"
-	"source.monogon.dev/metropolis/pkg/supervisor"
+	"source.monogon.dev/osbase/logtree"
+	"source.monogon.dev/osbase/supervisor"
 )
 
 // Name is the name of the KVM devices this plugin exposes
diff --git a/metropolis/node/kubernetes/provisioner.go b/metropolis/node/kubernetes/provisioner.go
index 38de7df..923302e 100644
--- a/metropolis/node/kubernetes/provisioner.go
+++ b/metropolis/node/kubernetes/provisioner.go
@@ -40,9 +40,9 @@
 	"k8s.io/client-go/util/workqueue"
 
 	"source.monogon.dev/metropolis/node/core/localstorage"
-	"source.monogon.dev/metropolis/pkg/fsquota"
-	"source.monogon.dev/metropolis/pkg/logtree"
-	"source.monogon.dev/metropolis/pkg/supervisor"
+	"source.monogon.dev/osbase/fsquota"
+	"source.monogon.dev/osbase/logtree"
+	"source.monogon.dev/osbase/supervisor"
 )
 
 // ONCHANGE(//metropolis/node/kubernetes/reconciler:resources_csi.go): needs to
diff --git a/metropolis/node/kubernetes/reconciler/BUILD.bazel b/metropolis/node/kubernetes/reconciler/BUILD.bazel
index f7bb47f..1616787 100644
--- a/metropolis/node/kubernetes/reconciler/BUILD.bazel
+++ b/metropolis/node/kubernetes/reconciler/BUILD.bazel
@@ -16,10 +16,10 @@
         "//metropolis/node/core/consensus/client",
         "//metropolis/node/core/curator",
         "//metropolis/node/core/curator/proto/private",
-        "//metropolis/pkg/event/etcd",
-        "//metropolis/pkg/event/memory",
-        "//metropolis/pkg/supervisor",
         "//metropolis/version",
+        "//osbase/event/etcd",
+        "//osbase/event/memory",
+        "//osbase/supervisor",
         "//version",
         "//version/spec",
         "@com_github_cenkalti_backoff_v4//:backoff",
@@ -50,9 +50,9 @@
         "//metropolis/node/core/consensus/client",
         "//metropolis/node/core/curator",
         "//metropolis/node/core/curator/proto/private",
-        "//metropolis/pkg/supervisor",
         "//metropolis/proto/common",
         "//metropolis/version",
+        "//osbase/supervisor",
         "//version",
         "//version/spec",
         "@io_etcd_go_etcd_tests_v3//integration",
diff --git a/metropolis/node/kubernetes/reconciler/reconciler.go b/metropolis/node/kubernetes/reconciler/reconciler.go
index 3bfaa4d..523b31b 100644
--- a/metropolis/node/kubernetes/reconciler/reconciler.go
+++ b/metropolis/node/kubernetes/reconciler/reconciler.go
@@ -38,7 +38,7 @@
 	meta "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/client-go/kubernetes"
 
-	"source.monogon.dev/metropolis/pkg/supervisor"
+	"source.monogon.dev/osbase/supervisor"
 )
 
 // True is a sad workaround for all the pointer booleans in K8s specs
diff --git a/metropolis/node/kubernetes/reconciler/reconciler_status.go b/metropolis/node/kubernetes/reconciler/reconciler_status.go
index 4abf6f8..b139e0a 100644
--- a/metropolis/node/kubernetes/reconciler/reconciler_status.go
+++ b/metropolis/node/kubernetes/reconciler/reconciler_status.go
@@ -16,10 +16,10 @@
 	"source.monogon.dev/metropolis/node/core/consensus/client"
 	"source.monogon.dev/metropolis/node/core/curator"
 	ppb "source.monogon.dev/metropolis/node/core/curator/proto/private"
-	"source.monogon.dev/metropolis/pkg/event/etcd"
-	"source.monogon.dev/metropolis/pkg/event/memory"
-	"source.monogon.dev/metropolis/pkg/supervisor"
 	mversion "source.monogon.dev/metropolis/version"
+	"source.monogon.dev/osbase/event/etcd"
+	"source.monogon.dev/osbase/event/memory"
+	"source.monogon.dev/osbase/supervisor"
 	"source.monogon.dev/version"
 	vpb "source.monogon.dev/version/spec"
 )
diff --git a/metropolis/node/kubernetes/reconciler/reconciler_status_test.go b/metropolis/node/kubernetes/reconciler/reconciler_status_test.go
index bd627a2..9eedfbe 100644
--- a/metropolis/node/kubernetes/reconciler/reconciler_status_test.go
+++ b/metropolis/node/kubernetes/reconciler/reconciler_status_test.go
@@ -12,9 +12,9 @@
 	"source.monogon.dev/metropolis/node/core/consensus/client"
 	"source.monogon.dev/metropolis/node/core/curator"
 	ppb "source.monogon.dev/metropolis/node/core/curator/proto/private"
-	"source.monogon.dev/metropolis/pkg/supervisor"
 	cpb "source.monogon.dev/metropolis/proto/common"
 	mversion "source.monogon.dev/metropolis/version"
+	"source.monogon.dev/osbase/supervisor"
 	"source.monogon.dev/version"
 	vpb "source.monogon.dev/version/spec"
 )
diff --git a/metropolis/node/kubernetes/reconciler/reconciler_test.go b/metropolis/node/kubernetes/reconciler/reconciler_test.go
index b791dbe..f457859 100644
--- a/metropolis/node/kubernetes/reconciler/reconciler_test.go
+++ b/metropolis/node/kubernetes/reconciler/reconciler_test.go
@@ -33,7 +33,7 @@
 	installrbac "k8s.io/kubernetes/pkg/apis/rbac/install"
 	installstorage "k8s.io/kubernetes/pkg/apis/storage/install"
 
-	"source.monogon.dev/metropolis/pkg/supervisor"
+	"source.monogon.dev/osbase/supervisor"
 )
 
 // TestExpectedUniqueNames ensures that all the Expected objects of any
diff --git a/metropolis/node/kubernetes/scheduler.go b/metropolis/node/kubernetes/scheduler.go
index 1b9b12c..cfa338a 100644
--- a/metropolis/node/kubernetes/scheduler.go
+++ b/metropolis/node/kubernetes/scheduler.go
@@ -23,8 +23,8 @@
 	"os/exec"
 
 	"source.monogon.dev/metropolis/node/kubernetes/pki"
-	"source.monogon.dev/metropolis/pkg/fileargs"
-	"source.monogon.dev/metropolis/pkg/supervisor"
+	"source.monogon.dev/osbase/fileargs"
+	"source.monogon.dev/osbase/supervisor"
 )
 
 type schedulerConfig struct {
diff --git a/metropolis/node/kubernetes/service_controller.go b/metropolis/node/kubernetes/service_controller.go
index 6b4360b..8ce36aa 100644
--- a/metropolis/node/kubernetes/service_controller.go
+++ b/metropolis/node/kubernetes/service_controller.go
@@ -35,8 +35,8 @@
 	"source.monogon.dev/metropolis/node/kubernetes/metricsproxy"
 	"source.monogon.dev/metropolis/node/kubernetes/pki"
 	"source.monogon.dev/metropolis/node/kubernetes/reconciler"
-	"source.monogon.dev/metropolis/pkg/supervisor"
 	apb "source.monogon.dev/metropolis/proto/api"
+	"source.monogon.dev/osbase/supervisor"
 )
 
 type ConfigController struct {
diff --git a/metropolis/node/kubernetes/service_worker.go b/metropolis/node/kubernetes/service_worker.go
index 7ad985f..b4daba1 100644
--- a/metropolis/node/kubernetes/service_worker.go
+++ b/metropolis/node/kubernetes/service_worker.go
@@ -21,9 +21,9 @@
 	"source.monogon.dev/metropolis/node/kubernetes/nfproxy"
 	kpki "source.monogon.dev/metropolis/node/kubernetes/pki"
 	"source.monogon.dev/metropolis/node/kubernetes/plugins/kvmdevice"
-	"source.monogon.dev/metropolis/pkg/event"
-	"source.monogon.dev/metropolis/pkg/event/memory"
-	"source.monogon.dev/metropolis/pkg/supervisor"
+	"source.monogon.dev/osbase/event"
+	"source.monogon.dev/osbase/event/memory"
+	"source.monogon.dev/osbase/supervisor"
 
 	ipb "source.monogon.dev/metropolis/node/core/curator/proto/api"
 )
diff --git a/metropolis/pkg/blkio/BUILD.bazel b/metropolis/pkg/blkio/BUILD.bazel
deleted file mode 100644
index 8b071ec..0000000
--- a/metropolis/pkg/blkio/BUILD.bazel
+++ /dev/null
@@ -1,8 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library")
-
-go_library(
-    name = "blkio",
-    srcs = ["blkio.go"],
-    importpath = "source.monogon.dev/metropolis/pkg/blkio",
-    visibility = ["//visibility:public"],
-)
diff --git a/metropolis/pkg/blkio/blkio.go b/metropolis/pkg/blkio/blkio.go
deleted file mode 100644
index d0b7174..0000000
--- a/metropolis/pkg/blkio/blkio.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package blkio
-
-import (
-	"fmt"
-	"io"
-	"os"
-)
-
-type ReaderWithSize struct {
-	io.Reader
-	size int64
-}
-
-// SizedReader is an io.Reader with a known size
-type SizedReader interface {
-	io.Reader
-	Size() int64
-}
-
-// NewSizedReader returns a SizedReader given a reader and a size.
-// The returned SizedReader is a ReaderWithSize.
-func NewSizedReader(r io.Reader, size int64) SizedReader {
-	return &ReaderWithSize{r, size}
-}
-
-func (r *ReaderWithSize) Size() int64 {
-	return r.size
-}
-
-// LazyFileReader implements a SizedReader which opens a file on first read
-// and closes it again after the reader has reached EOF.
-type LazyFileReader struct {
-	name string
-	size int64
-	f    *os.File
-	done bool
-}
-
-func (r *LazyFileReader) init() error {
-	f, err := os.Open(r.name)
-	if err != nil {
-		return fmt.Errorf("failed to open file for reading: %w", err)
-	}
-	r.f = f
-	return nil
-}
-
-func (r *LazyFileReader) Size() int64 {
-	return r.size
-}
-
-func (r *LazyFileReader) Read(b []byte) (n int, err error) {
-	if r.done {
-		return 0, io.EOF
-	}
-	if r.f == nil {
-		if err = r.init(); err != nil {
-			return
-		}
-	}
-	n, err = r.f.Read(b)
-	if err == io.EOF {
-		r.done = true
-		r.f.Close()
-	}
-	return
-}
-
-func (r *LazyFileReader) Close() {
-	r.done = true
-	r.f.Close()
-}
-
-func NewFileReader(name string) (*LazyFileReader, error) {
-	info, err := os.Stat(name)
-	if err != nil {
-		return nil, fmt.Errorf("failed to stat: %w", err)
-	}
-	return &LazyFileReader{
-		size: info.Size(),
-		name: name,
-	}, nil
-}
diff --git a/metropolis/pkg/blockdev/BUILD.bazel b/metropolis/pkg/blockdev/BUILD.bazel
deleted file mode 100644
index 4eff0d4..0000000
--- a/metropolis/pkg/blockdev/BUILD.bazel
+++ /dev/null
@@ -1,28 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library")
-
-go_library(
-    name = "blockdev",
-    srcs = [
-        "blockdev.go",
-        "blockdev_darwin.go",
-        "blockdev_linux.go",
-        "memory.go",
-    ],
-    importpath = "source.monogon.dev/metropolis/pkg/blockdev",
-    visibility = ["//visibility:public"],
-    deps = select({
-        "@io_bazel_rules_go//go/platform:android": [
-            "@org_golang_x_sys//unix",
-        ],
-        "@io_bazel_rules_go//go/platform:darwin": [
-            "@org_golang_x_sys//unix",
-        ],
-        "@io_bazel_rules_go//go/platform:ios": [
-            "@org_golang_x_sys//unix",
-        ],
-        "@io_bazel_rules_go//go/platform:linux": [
-            "@org_golang_x_sys//unix",
-        ],
-        "//conditions:default": [],
-    }),
-)
diff --git a/metropolis/pkg/blockdev/blockdev.go b/metropolis/pkg/blockdev/blockdev.go
deleted file mode 100644
index 0e3c6e1..0000000
--- a/metropolis/pkg/blockdev/blockdev.go
+++ /dev/null
@@ -1,199 +0,0 @@
-package blockdev
-
-import (
-	"errors"
-	"fmt"
-	"io"
-)
-
-var ErrNotBlockDevice = errors.New("not a block device")
-
-// BlockDev represents a generic block device made up of equally-sized blocks.
-// All offsets and intervals are expressed in bytes and must be aligned to
-// BlockSize and are recommended to be aligned to OptimalBlockSize if feasible.
-// Unless stated otherwise, intervals are inclusive-exclusive, i.e. the
-// start byte is included but the end byte is not.
-type BlockDev interface {
-	io.ReaderAt
-	io.WriterAt
-	// BlockSize returns the block size of the block device in bytes. This must
-	// be a power of two and is commonly (but not always) either 512 or 4096.
-	BlockSize() int64
-
-	// BlockCount returns the number of blocks on the block device or -1 if it
-	// is an image with an undefined size.
-	BlockCount() int64
-
-	// OptimalBlockSize returns the optimal block size in bytes for aligning
-	// to as well as issuing I/O. IO operations with block sizes below this
-	// one might incur read-write overhead. This is the larger of the physical
-	// block size and a device-reported value if available.
-	OptimalBlockSize() int64
-
-	// Discard discards a continuous set of blocks. Discarding means the
-	// underlying device gets notified that the data in these blocks is no
-	// longer needed. This can improve performance of the device device (as it
-	// no longer needs to preserve the unused data) as well as bulk erase
-	// operations. This command is advisory and not all implementations support
-	// it. The contents of discarded blocks are implementation-defined.
-	Discard(startByte int64, endByte int64) error
-
-	// Zero zeroes a continouous set of blocks. On certain implementations this
-	// can be significantly faster than just calling Write with zeroes.
-	Zero(startByte, endByte int64) error
-}
-
-func NewRWS(b BlockDev) *ReadWriteSeeker {
-	return &ReadWriteSeeker{b: b}
-}
-
-// ReadWriteSeeker provides an adapter implementing ReadWriteSeeker on top of
-// a blockdev.
-type ReadWriteSeeker struct {
-	b       BlockDev
-	currPos int64
-}
-
-func (s *ReadWriteSeeker) Read(p []byte) (n int, err error) {
-	n, err = s.b.ReadAt(p, s.currPos)
-	s.currPos += int64(n)
-	return
-}
-
-func (s *ReadWriteSeeker) Write(p []byte) (n int, err error) {
-	n, err = s.b.WriteAt(p, s.currPos)
-	s.currPos += int64(n)
-	return
-}
-
-func (s *ReadWriteSeeker) Seek(offset int64, whence int) (int64, error) {
-	switch whence {
-	case io.SeekCurrent:
-		s.currPos += offset
-	case io.SeekStart:
-		s.currPos = offset
-	case io.SeekEnd:
-		s.currPos = (s.b.BlockCount() * s.b.BlockSize()) - offset
-	}
-	return s.currPos, nil
-}
-
-var ErrOutOfBounds = errors.New("write out of bounds")
-
-// NewSection returns a new Section, implementing BlockDev over that subset
-// of blocks. The interval is inclusive-exclusive.
-func NewSection(b BlockDev, startBlock, endBlock int64) *Section {
-	return &Section{
-		b:          b,
-		startBlock: startBlock,
-		endBlock:   endBlock,
-	}
-}
-
-// Section implements BlockDev on a slice of another BlockDev given a startBlock
-// and endBlock.
-type Section struct {
-	b                    BlockDev
-	startBlock, endBlock int64
-}
-
-func (s *Section) ReadAt(p []byte, off int64) (n int, err error) {
-	bOff := off + (s.startBlock * s.b.BlockSize())
-	bytesToEnd := (s.endBlock * s.b.BlockSize()) - bOff
-	if bytesToEnd <= 0 {
-		return 0, io.EOF
-	}
-	if bytesToEnd < int64(len(p)) {
-		return s.b.ReadAt(p[:bytesToEnd], bOff)
-	}
-	return s.b.ReadAt(p, bOff)
-}
-
-func (s *Section) WriteAt(p []byte, off int64) (n int, err error) {
-	bOff := off + (s.startBlock * s.b.BlockSize())
-	bytesToEnd := (s.endBlock * s.b.BlockSize()) - bOff
-	if bytesToEnd <= 0 {
-		return 0, ErrOutOfBounds
-	}
-	if bytesToEnd < int64(len(p)) {
-		n, err := s.b.WriteAt(p[:bytesToEnd], off+(s.startBlock*s.b.BlockSize()))
-		if err != nil {
-			// If an error happened, prioritize that error
-			return n, err
-		}
-		// Otherwise, return ErrOutOfBounds as even short writes must return an
-		// error.
-		return n, ErrOutOfBounds
-	}
-	return s.b.WriteAt(p, off+(s.startBlock*s.b.BlockSize()))
-}
-
-func (s *Section) BlockCount() int64 {
-	return s.endBlock - s.startBlock
-}
-
-func (s *Section) BlockSize() int64 {
-	return s.b.BlockSize()
-}
-
-func (s *Section) inRange(startByte, endByte int64) error {
-	if startByte > endByte {
-		return fmt.Errorf("invalid range: startByte (%d) bigger than endByte (%d)", startByte, endByte)
-	}
-	sectionLen := s.BlockCount() * s.BlockSize()
-	if startByte >= sectionLen {
-		return fmt.Errorf("startByte (%d) out of range (%d)", startByte, sectionLen)
-	}
-	if endByte > sectionLen {
-		return fmt.Errorf("endBlock (%d) out of range (%d)", endByte, sectionLen)
-	}
-	return nil
-}
-
-func (s *Section) Discard(startByte, endByte int64) error {
-	if err := s.inRange(startByte, endByte); err != nil {
-		return err
-	}
-	offset := s.startBlock * s.b.BlockSize()
-	return s.b.Discard(offset+startByte, offset+endByte)
-}
-
-func (s *Section) OptimalBlockSize() int64 {
-	return s.b.OptimalBlockSize()
-}
-
-func (s *Section) Zero(startByte, endByte int64) error {
-	if err := s.inRange(startByte, endByte); err != nil {
-		return err
-	}
-	offset := s.startBlock * s.b.BlockSize()
-	return s.b.Zero(offset+startByte, offset+endByte)
-}
-
-// GenericZero implements software-based zeroing. This can be used to implement
-// Zero when no acceleration is available or desired.
-func GenericZero(b BlockDev, startByte, endByte int64) error {
-	if startByte%b.BlockSize() != 0 {
-		return fmt.Errorf("startByte (%d) needs to be aligned to block size (%d)", startByte, b.BlockSize())
-	}
-	if endByte%b.BlockSize() != 0 {
-		return fmt.Errorf("endByte (%d) needs to be aligned to block size (%d)", endByte, b.BlockSize())
-	}
-	// Choose buffer size close to 16MiB or the range to be zeroed, whatever
-	// is smaller.
-	bufSizeTarget := int64(16 * 1024 * 1024)
-	if endByte-startByte < bufSizeTarget {
-		bufSizeTarget = endByte - startByte
-	}
-	bufSize := (bufSizeTarget / b.BlockSize()) * b.BlockSize()
-	buf := make([]byte, bufSize)
-	for i := startByte; i < endByte; i += bufSize {
-		if endByte-i < bufSize {
-			buf = buf[:endByte-i]
-		}
-		if _, err := b.WriteAt(buf, i); err != nil {
-			return fmt.Errorf("while writing zeroes: %w", err)
-		}
-	}
-	return nil
-}
diff --git a/metropolis/pkg/blockdev/blockdev_darwin.go b/metropolis/pkg/blockdev/blockdev_darwin.go
deleted file mode 100644
index 5422e55..0000000
--- a/metropolis/pkg/blockdev/blockdev_darwin.go
+++ /dev/null
@@ -1,171 +0,0 @@
-//go:build darwin
-
-package blockdev
-
-import (
-	"errors"
-	"fmt"
-	"math/bits"
-	"os"
-	"syscall"
-
-	"golang.org/x/sys/unix"
-)
-
-// TODO(lorenz): Upstream these to x/sys/unix.
-const (
-	DKIOCGETBLOCKSIZE  = 0x40046418
-	DKIOCGETBLOCKCOUNT = 0x40086419
-)
-
-type Device struct {
-	backend    *os.File
-	rawConn    syscall.RawConn
-	blockSize  int64
-	blockCount int64
-}
-
-func (d *Device) ReadAt(p []byte, off int64) (n int, err error) {
-	return d.backend.ReadAt(p, off)
-}
-
-func (d *Device) WriteAt(p []byte, off int64) (n int, err error) {
-	return d.backend.WriteAt(p, off)
-}
-
-func (d *Device) Close() error {
-	return d.backend.Close()
-}
-
-func (d *Device) BlockCount() int64 {
-	return d.blockCount
-}
-
-func (d *Device) BlockSize() int64 {
-	return d.blockSize
-}
-
-func (d *Device) Discard(startByte int64, endByte int64) error {
-	// Can be implemented using DKIOCUNMAP, but needs x/sys/unix extension.
-	// Not mandatory, so this is fine for now.
-	return errors.ErrUnsupported
-}
-
-func (d *Device) OptimalBlockSize() int64 {
-	return d.blockSize
-}
-
-func (d *Device) Zero(startByte int64, endByte int64) error {
-	// It doesn't look like MacOS even has any zeroing acceleration, so just
-	// use the generic one.
-	return GenericZero(d, startByte, endByte)
-}
-
-// Open opens a block device given a path to its inode.
-func Open(path string) (*Device, error) {
-	outFile, err := os.OpenFile(path, os.O_RDWR, 0640)
-	if err != nil {
-		return nil, fmt.Errorf("failed to open block device: %w", err)
-	}
-	return FromFileHandle(outFile)
-}
-
-// FromFileHandle creates a blockdev from a device handle. The device handle is
-// not duplicated, closing the returned Device will close it. If the handle is
-// not a block device, i.e does not implement block device ioctls, an error is
-// returned.
-func FromFileHandle(handle *os.File) (*Device, error) {
-	outFileC, err := handle.SyscallConn()
-	if err != nil {
-		return nil, fmt.Errorf("error getting SyscallConn: %w", err)
-	}
-	var blockSize int
-	outFileC.Control(func(fd uintptr) {
-		blockSize, err = unix.IoctlGetInt(int(fd), DKIOCGETBLOCKSIZE)
-	})
-	if errors.Is(err, unix.ENOTTY) || errors.Is(err, unix.EINVAL) {
-		return nil, ErrNotBlockDevice
-	} else if err != nil {
-		return nil, fmt.Errorf("when querying disk block size: %w", err)
-	}
-
-	var blockCount int
-	var getSizeErr error
-	outFileC.Control(func(fd uintptr) {
-		blockCount, getSizeErr = unix.IoctlGetInt(int(fd), DKIOCGETBLOCKCOUNT)
-	})
-
-	if getSizeErr != nil {
-		return nil, fmt.Errorf("when querying disk block count: %w", err)
-	}
-	return &Device{
-		backend:    handle,
-		rawConn:    outFileC,
-		blockSize:  int64(blockSize),
-		blockCount: int64(blockCount),
-	}, nil
-}
-
-type File struct {
-	backend    *os.File
-	rawConn    syscall.RawConn
-	blockSize  int64
-	blockCount int64
-}
-
-func CreateFile(name string, blockSize int64, blockCount int64) (*File, error) {
-	if blockSize < 512 {
-		return nil, fmt.Errorf("blockSize must be bigger than 512 bytes")
-	}
-	if bits.OnesCount64(uint64(blockSize)) != 1 {
-		return nil, fmt.Errorf("blockSize must be a power of two")
-	}
-	out, err := os.Create(name)
-	if err != nil {
-		return nil, fmt.Errorf("when creating backing file: %w", err)
-	}
-	rawConn, err := out.SyscallConn()
-	if err != nil {
-		return nil, fmt.Errorf("unable to get SyscallConn: %w", err)
-	}
-	return &File{
-		backend:    out,
-		blockSize:  blockSize,
-		rawConn:    rawConn,
-		blockCount: blockCount,
-	}, nil
-}
-
-func (d *File) ReadAt(p []byte, off int64) (n int, err error) {
-	return d.backend.ReadAt(p, off)
-}
-
-func (d *File) WriteAt(p []byte, off int64) (n int, err error) {
-	return d.backend.WriteAt(p, off)
-}
-
-func (d *File) Close() error {
-	return d.backend.Close()
-}
-
-func (d *File) BlockCount() int64 {
-	return d.blockCount
-}
-
-func (d *File) BlockSize() int64 {
-	return d.blockSize
-}
-
-func (d *File) Discard(startByte int64, endByte int64) error {
-	// Can be supported in the future via fnctl.
-	return errors.ErrUnsupported
-}
-
-func (d *File) OptimalBlockSize() int64 {
-	return d.blockSize
-}
-
-func (d *File) Zero(startByte int64, endByte int64) error {
-	// Can possibly be accelerated in the future via fnctl.
-	return GenericZero(d, startByte, endByte)
-}
diff --git a/metropolis/pkg/blockdev/blockdev_linux.go b/metropolis/pkg/blockdev/blockdev_linux.go
deleted file mode 100644
index c5fa784..0000000
--- a/metropolis/pkg/blockdev/blockdev_linux.go
+++ /dev/null
@@ -1,248 +0,0 @@
-//go:build linux
-
-package blockdev
-
-import (
-	"errors"
-	"fmt"
-	"math/bits"
-	"os"
-	"syscall"
-	"unsafe"
-
-	"golang.org/x/sys/unix"
-)
-
-type Device struct {
-	backend    *os.File
-	rawConn    syscall.RawConn
-	blockSize  int64
-	blockCount int64
-}
-
-func (d *Device) ReadAt(p []byte, off int64) (n int, err error) {
-	return d.backend.ReadAt(p, off)
-}
-
-func (d *Device) WriteAt(p []byte, off int64) (n int, err error) {
-	return d.backend.WriteAt(p, off)
-}
-
-func (d *Device) Close() error {
-	return d.backend.Close()
-}
-
-func (d *Device) BlockCount() int64 {
-	return d.blockCount
-}
-
-func (d *Device) BlockSize() int64 {
-	return d.blockSize
-}
-
-func (d *Device) Discard(startByte int64, endByte int64) error {
-	var args [2]uint64
-	var err unix.Errno
-	args[0] = uint64(startByte)
-	args[1] = uint64(endByte - startByte)
-	if ctrlErr := d.rawConn.Control(func(fd uintptr) {
-		_, _, err = unix.Syscall(unix.SYS_IOCTL, fd, unix.BLKDISCARD, uintptr(unsafe.Pointer(&args[0])))
-	}); ctrlErr != nil {
-		return ctrlErr
-	}
-	if err == unix.EOPNOTSUPP {
-		return errors.ErrUnsupported
-	}
-	if err != unix.Errno(0) {
-		return fmt.Errorf("failed to discard: %w", err)
-	}
-	return nil
-}
-
-func (d *Device) OptimalBlockSize() int64 {
-	return d.blockSize
-}
-
-func (d *Device) Zero(startByte int64, endByte int64) error {
-	var args [2]uint64
-	var err error
-	args[0] = uint64(startByte)
-	args[1] = uint64(endByte - startByte)
-	ctrlErr := d.rawConn.Control(func(fd uintptr) {
-		// Attempts to leverage discard guarantees to provide extremely quick
-		// metadata-only zeroing.
-		err = unix.Fallocate(int(fd), unix.FALLOC_FL_PUNCH_HOLE|unix.FALLOC_FL_KEEP_SIZE, startByte, endByte-startByte)
-		if errors.Is(err, unix.EOPNOTSUPP) {
-			// Tries Write Same and friends and then just falls back to writing
-			// zeroes.
-			_, _, errNo := unix.Syscall(unix.SYS_IOCTL, fd, unix.BLKZEROOUT, uintptr(unsafe.Pointer(&args[0])))
-			if errNo == unix.Errno(0) {
-				err = nil
-			} else {
-				err = errNo
-			}
-		}
-	})
-	if ctrlErr != nil {
-		return ctrlErr
-	}
-	if err != nil {
-		return fmt.Errorf("failed to zero out: %w", err)
-	}
-	return nil
-}
-
-// RefreshPartitionTable refreshes the kernel's view of the partition table
-// after changes made from userspace.
-func (d *Device) RefreshPartitionTable() error {
-	var err unix.Errno
-	if ctrlErr := d.rawConn.Control(func(fd uintptr) {
-		_, _, err = unix.Syscall(unix.SYS_IOCTL, fd, unix.BLKRRPART, 0)
-	}); ctrlErr != nil {
-		return ctrlErr
-	}
-	if err != unix.Errno(0) {
-		return fmt.Errorf("ioctl(BLKRRPART): %w", err)
-	}
-	return nil
-}
-
-// Open opens a block device given a path to its inode.
-// TODO: exclusive, O_DIRECT
-func Open(path string) (*Device, error) {
-	outFile, err := os.OpenFile(path, os.O_RDWR, 0640)
-	if err != nil {
-		return nil, fmt.Errorf("failed to open block device: %w", err)
-	}
-	return FromFileHandle(outFile)
-}
-
-// FromFileHandle creates a blockdev from a device handle. The device handle is
-// not duplicated, closing the returned Device will close it. If the handle is
-// not a block device, i.e does not implement block device ioctls, an error is
-// returned.
-func FromFileHandle(handle *os.File) (*Device, error) {
-	outFileC, err := handle.SyscallConn()
-	if err != nil {
-		return nil, fmt.Errorf("error getting SyscallConn: %w", err)
-	}
-	var blockSize uint32
-	outFileC.Control(func(fd uintptr) {
-		blockSize, err = unix.IoctlGetUint32(int(fd), unix.BLKSSZGET)
-	})
-	if errors.Is(err, unix.ENOTTY) || errors.Is(err, unix.EINVAL) {
-		return nil, ErrNotBlockDevice
-	} else if err != nil {
-		return nil, fmt.Errorf("when querying disk block size: %w", err)
-	}
-
-	var sizeBytes uint64
-	var getSizeErr syscall.Errno
-	outFileC.Control(func(fd uintptr) {
-		_, _, getSizeErr = unix.Syscall(unix.SYS_IOCTL, fd, unix.BLKGETSIZE64, uintptr(unsafe.Pointer(&sizeBytes)))
-	})
-
-	if getSizeErr != unix.Errno(0) {
-		return nil, fmt.Errorf("when querying disk block count: %w", err)
-	}
-	if sizeBytes%uint64(blockSize) != 0 {
-		return nil, fmt.Errorf("block device size is not an integer multiple of its block size (%d %% %d = %d)", sizeBytes, blockSize, sizeBytes%uint64(blockSize))
-	}
-	return &Device{
-		backend:    handle,
-		rawConn:    outFileC,
-		blockSize:  int64(blockSize),
-		blockCount: int64(sizeBytes) / int64(blockSize),
-	}, nil
-}
-
-type File struct {
-	backend    *os.File
-	rawConn    syscall.RawConn
-	blockSize  int64
-	blockCount int64
-}
-
-func CreateFile(name string, blockSize int64, blockCount int64) (*File, error) {
-	if blockSize < 512 {
-		return nil, fmt.Errorf("blockSize must be bigger than 512 bytes")
-	}
-	if bits.OnesCount64(uint64(blockSize)) != 1 {
-		return nil, fmt.Errorf("blockSize must be a power of two")
-	}
-	out, err := os.Create(name)
-	if err != nil {
-		return nil, fmt.Errorf("when creating backing file: %w", err)
-	}
-	rawConn, err := out.SyscallConn()
-	if err != nil {
-		return nil, fmt.Errorf("unable to get SyscallConn: %w", err)
-	}
-	return &File{
-		backend:    out,
-		blockSize:  blockSize,
-		rawConn:    rawConn,
-		blockCount: blockCount,
-	}, nil
-}
-
-func (d *File) ReadAt(p []byte, off int64) (n int, err error) {
-	return d.backend.ReadAt(p, off)
-}
-
-func (d *File) WriteAt(p []byte, off int64) (n int, err error) {
-	return d.backend.WriteAt(p, off)
-}
-
-func (d *File) Close() error {
-	return d.backend.Close()
-}
-
-func (d *File) BlockCount() int64 {
-	return d.blockCount
-}
-
-func (d *File) BlockSize() int64 {
-	return d.blockSize
-}
-
-func (d *File) Discard(startByte int64, endByte int64) error {
-	var err error
-	if ctrlErr := d.rawConn.Control(func(fd uintptr) {
-		// There is FALLOC_FL_NO_HIDE_STALE, but it's not implemented by
-		// any filesystem right now, so let's not attempt it for the time being.
-		err = unix.Fallocate(int(fd), unix.FALLOC_FL_PUNCH_HOLE|unix.FALLOC_FL_KEEP_SIZE, startByte, endByte-startByte)
-	}); ctrlErr != nil {
-		return ctrlErr
-	}
-	if errors.Is(err, unix.EOPNOTSUPP) {
-		return errors.ErrUnsupported
-	}
-	if err != nil {
-		return fmt.Errorf("failed to discard: %w", err)
-	}
-	return nil
-}
-
-func (d *File) OptimalBlockSize() int64 {
-	return d.blockSize
-}
-
-func (d *File) Zero(startByte int64, endByte int64) error {
-	var err error
-	if ctrlErr := d.rawConn.Control(func(fd uintptr) {
-		// Tell the filesystem to punch out the given blocks.
-		err = unix.Fallocate(int(fd), unix.FALLOC_FL_PUNCH_HOLE|unix.FALLOC_FL_KEEP_SIZE, startByte, endByte-startByte)
-	}); ctrlErr != nil {
-		return ctrlErr
-	}
-	// If unsupported or the syscall is not available (for example in a sandbox)
-	// fall back to the generic software implementation.
-	if errors.Is(err, unix.EOPNOTSUPP) || errors.Is(err, unix.ENOSYS) {
-		return GenericZero(d, startByte, endByte)
-	}
-	if err != nil {
-		return fmt.Errorf("failed to zero out: %w", err)
-	}
-	return nil
-}
diff --git a/metropolis/pkg/blockdev/memory.go b/metropolis/pkg/blockdev/memory.go
deleted file mode 100644
index 193f93c..0000000
--- a/metropolis/pkg/blockdev/memory.go
+++ /dev/null
@@ -1,128 +0,0 @@
-package blockdev
-
-import (
-	"errors"
-	"fmt"
-	"io"
-	"math/bits"
-)
-
-// Memory is a memory-backed implementation of BlockDev. It is optimal
-// for testing and temporary use, as it is fast and platform-independent.
-type Memory struct {
-	blockSize  int64
-	blockCount int64
-	data       []byte
-}
-
-// NewMemory returns a new memory-backed block device with the given geometry.
-func NewMemory(blockSize, blockCount int64) (*Memory, error) {
-	if blockSize <= 0 {
-		return nil, errors.New("block size cannot be zero or negative")
-	}
-	if bits.OnesCount64(uint64(blockSize)) > 1 {
-		return nil, fmt.Errorf("block size must be a power of two (got %d)", blockSize)
-	}
-	if blockCount < 0 {
-		return nil, errors.New("block count cannot be negative")
-	}
-	return &Memory{
-		blockSize:  blockSize,
-		blockCount: blockCount,
-		data:       make([]byte, blockSize*blockCount),
-	}, nil
-}
-
-// MustNewMemory works exactly like NewMemory, but panics when NewMemory would
-// return an error. Intended for use in tests.
-func MustNewMemory(blockSize, blockCount int64) *Memory {
-	m, err := NewMemory(blockSize, blockCount)
-	if err != nil {
-		panic(err)
-	}
-	return m
-}
-
-func (m *Memory) ReadAt(p []byte, off int64) (int, error) {
-	devSize := m.blockSize * m.blockCount
-	if off > devSize {
-		return 0, io.EOF
-	}
-	// TODO: Alignment checks?
-	copy(p, m.data[off:])
-	n := len(m.data[off:])
-	if n < len(p) {
-		return n, io.EOF
-	}
-	return len(p), nil
-}
-
-func (m *Memory) WriteAt(p []byte, off int64) (int, error) {
-	devSize := m.blockSize * m.blockCount
-	if off > devSize {
-		return 0, io.EOF
-	}
-	// TODO: Alignment checks?
-	copy(m.data[off:], p)
-	n := len(m.data[off:])
-	if n < len(p) {
-		return n, io.EOF
-	}
-	return len(p), nil
-}
-
-func (m *Memory) BlockSize() int64 {
-	return m.blockSize
-}
-
-func (m *Memory) BlockCount() int64 {
-	return m.blockCount
-}
-
-func (m *Memory) OptimalBlockSize() int64 {
-	return m.blockSize
-}
-
-func (m *Memory) validRange(startByte, endByte int64) error {
-	if startByte > endByte {
-		return fmt.Errorf("startByte (%d) larger than endByte (%d), invalid interval", startByte, endByte)
-	}
-	devSize := m.blockSize * m.blockCount
-	if startByte >= devSize || startByte < 0 {
-		return fmt.Errorf("startByte (%d) out of range (0-%d)", endByte, devSize)
-	}
-	if endByte > devSize || endByte < 0 {
-		return fmt.Errorf("endByte (%d) out of range (0-%d)", endByte, devSize)
-	}
-	// Alignment check works for powers of two by looking at every bit below
-	// the bit set in the block size.
-	if startByte&(m.blockSize-1) != 0 {
-		return fmt.Errorf("startByte (%d) is not aligned to blockSize (%d)", startByte, m.blockSize)
-	}
-	if endByte&(m.blockSize-1) != 0 {
-		return fmt.Errorf("endByte (%d) is not aligned to blockSize (%d)", startByte, m.blockSize)
-	}
-	return nil
-}
-
-func (m *Memory) Discard(startByte, endByte int64) error {
-	if err := m.validRange(startByte, endByte); err != nil {
-		return err
-	}
-	for i := startByte; i < endByte; i++ {
-		// Intentionally don't set to zero as Discard doesn't guarantee
-		// any specific contents. Call Zero if you need this.
-		m.data[i] = 0xaa
-	}
-	return nil
-}
-
-func (m *Memory) Zero(startByte, endByte int64) error {
-	if err := m.validRange(startByte, endByte); err != nil {
-		return err
-	}
-	for i := startByte; i < endByte; i++ {
-		m.data[i] = 0x00
-	}
-	return nil
-}
diff --git a/metropolis/pkg/bootparam/BUILD.bazel b/metropolis/pkg/bootparam/BUILD.bazel
deleted file mode 100644
index 4b63f28..0000000
--- a/metropolis/pkg/bootparam/BUILD.bazel
+++ /dev/null
@@ -1,25 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
-
-go_library(
-    name = "bootparam",
-    srcs = [
-        "bootparam.go",
-        "params.go",
-    ],
-    importpath = "source.monogon.dev/metropolis/pkg/bootparam",
-    visibility = ["//visibility:public"],
-)
-
-go_test(
-    name = "bootparam_test",
-    srcs = [
-        "bootparam_test.go",
-        "params_test.go",
-    ],
-    embed = [":bootparam"],
-    gc_goopts = ["-d=libfuzzer"],
-    deps = [
-        "//metropolis/pkg/bootparam/ref",
-        "@com_github_google_go_cmp//cmp",
-    ],
-)
diff --git a/metropolis/pkg/bootparam/bootparam.go b/metropolis/pkg/bootparam/bootparam.go
deleted file mode 100644
index 240abf3..0000000
--- a/metropolis/pkg/bootparam/bootparam.go
+++ /dev/null
@@ -1,215 +0,0 @@
-// Package bootparam implements encoding and decoding of Linux kernel command
-// lines as documented in
-// https://docs.kernel.org/admin-guide/kernel-parameters.html
-//
-// The format is quite quirky and thus the implementation is mostly based
-// on the code in the Linux kernel implementing the decoder and not the
-// specification.
-package bootparam
-
-import (
-	"errors"
-	"fmt"
-	"strings"
-)
-
-// Param represents a single boot parameter with or without a value
-type Param struct {
-	Param, Value string
-	HasValue     bool
-}
-
-// Params represents a list of kernel boot parameters
-type Params []Param
-
-// Linux has for historical reasons an unusual definition of this function
-// Taken from @linux//lib:ctype.c
-func isSpace(r byte) bool {
-	switch r {
-	case '\t', '\n', '\v', '\f', '\r', ' ', 0xa0:
-		return true
-	default:
-		return false
-	}
-}
-
-// TrimLeftSpace spaces as defined by Linux from the left of the string.
-// This is only exported for tests, do not use this. Because of import loops
-// as well as cgo restrictions this cannot be an internal function used by
-// tests.
-func TrimLeftSpace(s string) string {
-	start := 0
-	for ; start < len(s); start++ {
-		c := s[start]
-		if !isSpace(c) {
-			break
-		}
-	}
-
-	return s[start:]
-}
-
-func containsSpace(s string) bool {
-	for i := 0; i < len(s); i++ {
-		if isSpace(s[i]) {
-			return true
-		}
-	}
-	return false
-}
-
-func parseToken(token string) (p Param, err error) {
-	if strings.HasPrefix(token, `=`) || strings.HasPrefix(token, `"=`) {
-		return Param{}, errors.New("param contains `=` at first position, this causes broken behavior")
-	}
-	param, value, hasValue := strings.Cut(token, "=")
-
-	if strings.HasPrefix(param, `"`) {
-		p.Param = strings.TrimPrefix(param, `"`)
-		if !hasValue {
-			p.Param = strings.TrimSuffix(p.Param, `"`)
-		}
-	} else {
-		p.Param = param
-	}
-	if hasValue {
-		if strings.HasPrefix(value, `"`) {
-			p.Value = strings.TrimSuffix(strings.TrimPrefix(value, `"`), `"`)
-		} else if strings.HasPrefix(param, `"`) {
-			p.Value = strings.TrimSuffix(value, `"`)
-		} else {
-			p.Value = value
-		}
-	}
-	return
-}
-
-// Unmarshal decodes a Linux kernel command line and returns a list of kernel
-// parameters as well as a rest section after the "--" parsing terminator.
-func Unmarshal(cmdline string) (params Params, rest string, err error) {
-	cmdline = TrimLeftSpace(cmdline)
-	if pos := strings.IndexByte(cmdline, 0x00); pos != -1 {
-		cmdline = cmdline[:pos]
-	}
-	var lastIdx int
-	var inQuote bool
-	var p Param
-	for i := 0; i < len(cmdline); i++ {
-		if isSpace(cmdline[i]) && !inQuote {
-			token := cmdline[lastIdx:i]
-			lastIdx = i + 1
-			if TrimLeftSpace(token) == "" {
-				continue
-			}
-			p, err = parseToken(token)
-			if err != nil {
-				return
-			}
-
-			// Stop processing and return everything left as rest
-			if p.Param == "--" {
-				rest = TrimLeftSpace(cmdline[lastIdx:])
-				return
-			}
-			params = append(params, p)
-		}
-		if cmdline[i] == '"' {
-			inQuote = !inQuote
-		}
-	}
-	if len(cmdline)-lastIdx > 0 {
-		token := cmdline[lastIdx:]
-		if TrimLeftSpace(token) == "" {
-			return
-		}
-		p, err = parseToken(token)
-		if err != nil {
-			return
-		}
-
-		// Stop processing, do not set rest as there is none
-		if p.Param == "--" {
-			return
-		}
-		params = append(params, p)
-	}
-	return
-}
-
-// Marshal encodes a set of kernel parameters and an optional rest string into
-// a Linux kernel command line. It rejects data which is not encodable, which
-// includes null bytes, double quotes in params as well as characters which
-// contain 0xa0 in their UTF-8 representation (historical Linux quirk of
-// treating that as a space, inherited from Latin-1).
-func Marshal(params Params, rest string) (string, error) {
-	if strings.IndexByte(rest, 0x00) != -1 {
-		return "", errors.New("rest contains 0x00 byte, this is disallowed")
-	}
-	var strb strings.Builder
-	for _, p := range params {
-		if strings.ContainsRune(p.Param, '=') {
-			return "", fmt.Errorf("invalid '=' character in param %q", p.Param)
-		}
-		// Technically a weird subset of double quotes can be encoded, but
-		// this should probably not be done so just reject them all.
-		if strings.ContainsRune(p.Param, '"') {
-			return "", fmt.Errorf("invalid '\"' character in param %q", p.Param)
-		}
-		if strings.ContainsRune(p.Value, '"') {
-			return "", fmt.Errorf("invalid '\"' character in value %q", p.Value)
-		}
-		if strings.IndexByte(p.Param, 0x00) != -1 {
-			return "", fmt.Errorf("invalid null byte in param %q", p.Param)
-		}
-		if strings.IndexByte(p.Value, 0x00) != -1 {
-			return "", fmt.Errorf("invalid null byte in value %q", p.Value)
-		}
-		// Linux treats 0xa0 as a space, even though it is a valid UTF-8
-		// surrogate. This is unfortunate, but passing it through would
-		// break the whole command line.
-		if strings.IndexByte(p.Param, 0xa0) != -1 {
-			return "", fmt.Errorf("invalid 0xa0 byte in param %q", p.Param)
-		}
-		if strings.IndexByte(p.Value, 0xa0) != -1 {
-			return "", fmt.Errorf("invalid 0xa0 byte in value %q", p.Value)
-		}
-		if strings.ContainsRune(p.Param, '"') {
-			return "", fmt.Errorf("invalid '\"' character in value %q", p.Value)
-		}
-		// This should be allowed according to the docs, but is in fact broken.
-		if p.Value != "" && containsSpace(p.Param) {
-			return "", fmt.Errorf("param %q contains spaces and value, this is unsupported", p.Param)
-		}
-		if p.Param == "--" {
-			return "", errors.New("param '--' is reserved and cannot be used")
-		}
-		if p.Param == "" {
-			return "", errors.New("empty params are not supported")
-		}
-		if containsSpace(p.Param) {
-			strb.WriteRune('"')
-			strb.WriteString(p.Param)
-			strb.WriteRune('"')
-		} else {
-			strb.WriteString(p.Param)
-		}
-		if p.Value != "" {
-			strb.WriteRune('=')
-			if containsSpace(p.Value) {
-				strb.WriteRune('"')
-				strb.WriteString(p.Value)
-				strb.WriteRune('"')
-			} else {
-				strb.WriteString(p.Value)
-			}
-		}
-		strb.WriteRune(' ')
-	}
-	if len(rest) > 0 {
-		strb.WriteString("-- ")
-		// Starting whitespace will be dropped by the decoder anyways, do it
-		// here to make the resulting command line nicer.
-		strb.WriteString(TrimLeftSpace(rest))
-	}
-	return strb.String(), nil
-}
diff --git a/metropolis/pkg/bootparam/bootparam_test.go b/metropolis/pkg/bootparam/bootparam_test.go
deleted file mode 100644
index a0032a4..0000000
--- a/metropolis/pkg/bootparam/bootparam_test.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// If this is bootparam we have an import cycle
-package bootparam_test
-
-import (
-	"strings"
-	"testing"
-
-	"github.com/google/go-cmp/cmp"
-
-	"source.monogon.dev/metropolis/pkg/bootparam"
-	"source.monogon.dev/metropolis/pkg/bootparam/ref"
-)
-
-// Fuzzers can be run with
-// bazel test //metropolis/pkg/bootparam:bootparam_test
-//   --test_arg=-test.fuzz=FuzzMarshal
-//   --test_arg=-test.fuzzcachedir=/tmp/fuzz
-//   --test_arg=-test.fuzztime=60s
-
-func FuzzUnmarshal(f *testing.F) {
-	f.Add(`initrd="\test\some=value" root=yolo "definitely quoted" ro rootflags=`)
-	f.Fuzz(func(t *testing.T, a string) {
-		refOut, refRest := ref.Parse(a)
-		out, rest, err := bootparam.Unmarshal(a)
-		if err != nil {
-			return
-		}
-		if diff := cmp.Diff(refOut, out); diff != "" {
-			t.Errorf("Parse(%q): params mismatch (-want +got):\n%s", a, diff)
-		}
-		if refRest != rest {
-			t.Errorf("Parse(%q): expected rest to be %q, got %q", a, refRest, rest)
-		}
-	})
-}
-
-func FuzzMarshal(f *testing.F) {
-	// Choose delimiters which mean nothing to the parser
-	f.Add("a:b;assd:9dsf;1234", "some fancy rest")
-	f.Fuzz(func(t *testing.T, paramsRaw string, rest string) {
-		paramsSeparated := strings.Split(paramsRaw, ";")
-		var params bootparam.Params
-		for _, p := range paramsSeparated {
-			a, b, _ := strings.Cut(p, ":")
-			params = append(params, bootparam.Param{Param: a, Value: b})
-		}
-		rest = bootparam.TrimLeftSpace(rest)
-		encoded, err := bootparam.Marshal(params, rest)
-		if err != nil {
-			return // Invalid input
-		}
-		refOut, refRest := ref.Parse(encoded)
-		if diff := cmp.Diff(refOut, params); diff != "" {
-			t.Errorf("Marshal(%q): params mismatch (-want +got):\n%s", paramsRaw, diff)
-		}
-		if refRest != rest {
-			t.Errorf("Parse(%q, %q): expected rest to be %q, got %q", paramsRaw, rest, refRest, rest)
-		}
-	})
-}
diff --git a/metropolis/pkg/bootparam/params.go b/metropolis/pkg/bootparam/params.go
deleted file mode 100644
index bbb4fae..0000000
--- a/metropolis/pkg/bootparam/params.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package bootparam
-
-import (
-	"regexp"
-	"strings"
-)
-
-var validTTYRegexp = regexp.MustCompile(`^[a-zA-Z0-9]+$`)
-
-// Consoles returns the set of consoles passed to the kernel, i.e. the values
-// passed to the console= directive. It normalizes away any possibly present
-// /dev/ prefix, returning values like ttyS0. It returns an empty set in case
-// no valid console parameters exist.
-func (p Params) Consoles() map[string]bool {
-	consoles := make(map[string]bool)
-	for _, pa := range p {
-		if pa.Param == "console" {
-			consoleParts := strings.Split(pa.Value, ",")
-			consoleName := strings.TrimPrefix(consoleParts[0], "/dev/")
-			if validTTYRegexp.MatchString(consoleName) {
-				consoles[consoleName] = true
-			}
-		}
-	}
-	return consoles
-}
diff --git a/metropolis/pkg/bootparam/params_test.go b/metropolis/pkg/bootparam/params_test.go
deleted file mode 100644
index c76dd88..0000000
--- a/metropolis/pkg/bootparam/params_test.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package bootparam
-
-import "testing"
-
-func TestConsoles(t *testing.T) {
-	cases := []struct {
-		name     string
-		cmdline  string
-		consoles []string
-	}{
-		{"Empty", "", []string{}},
-		{"None", "notconsole=test", []string{}},
-		{"Single", "asdf=ttyS1 console=ttyS0,115200", []string{"ttyS0"}},
-		{"MultipleSame", "console=ttyS0 noop console=ttyS0", []string{"ttyS0"}},
-		{"MultipleDiff", "console=tty27 console=ttyACM0", []string{"tty27", "ttyACM0"}},
-		{"WithDev", "console=/dev/ttyXYZ0", []string{"ttyXYZ0"}},
-		{"BrokenBadDev", "console=/etc/passwd", []string{}},
-		{"BrokenNoValue", "console=", []string{}},
-	}
-	for _, c := range cases {
-		t.Run(c.name, func(t *testing.T) {
-			p, _, err := Unmarshal(c.cmdline)
-			if err != nil {
-				t.Fatalf("Failed to parse cmdline %q: %v", c.cmdline, err)
-			}
-			consoles := p.Consoles()
-			wantConsoles := make(map[string]bool)
-			for _, con := range c.consoles {
-				wantConsoles[con] = true
-			}
-			for con := range wantConsoles {
-				if !consoles[con] {
-					t.Errorf("Expected console %s to be returned but it wasn't", con)
-				}
-			}
-			for con := range consoles {
-				if !wantConsoles[con] {
-					t.Errorf("Didn't expect console %s to be returned but it was", con)
-				}
-			}
-		})
-	}
-}
diff --git a/metropolis/pkg/bootparam/ref/BUILD.bazel b/metropolis/pkg/bootparam/ref/BUILD.bazel
deleted file mode 100644
index d22540a..0000000
--- a/metropolis/pkg/bootparam/ref/BUILD.bazel
+++ /dev/null
@@ -1,11 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library")
-
-go_library(
-    name = "ref",
-    srcs = ["ref.go"],
-    cgo = True,
-    gc_goopts = ["-d=libfuzzer"],
-    importpath = "source.monogon.dev/metropolis/pkg/bootparam/ref",
-    visibility = ["//visibility:public"],
-    deps = ["//metropolis/pkg/bootparam"],
-)
diff --git a/metropolis/pkg/bootparam/ref/ref.go b/metropolis/pkg/bootparam/ref/ref.go
deleted file mode 100644
index 9842ecd..0000000
--- a/metropolis/pkg/bootparam/ref/ref.go
+++ /dev/null
@@ -1,140 +0,0 @@
-// Package ref provides the reference implementation for kernel command line
-// parsing as present in the Linux kernel. This is a separate package and
-// not part of the bootparam tests because Go does not let you use cgo in
-// tests.
-package ref
-
-// Reference implementation from the kernel
-
-/*
-#include <stdlib.h>
-#include <ctype.h>
-#include <stddef.h>
-
-#define _U	0x01
-#define _L	0x02
-#define _D	0x04
-#define _C	0x08
-#define _P	0x10
-#define _S	0x20
-#define _X	0x40
-#define _SP	0x80
-
-#define __ismask(x) (_ctype[(int)(unsigned char)(x)])
-#define kisspace(c)	((__ismask(c)&(_S)) != 0)
-
-const unsigned char _ctype[] = {
-_C,_C,_C,_C,_C,_C,_C,_C,
-_C,_C|_S,_C|_S,_C|_S,_C|_S,_C|_S,_C,_C,
-_C,_C,_C,_C,_C,_C,_C,_C,
-_C,_C,_C,_C,_C,_C,_C,_C,
-_S|_SP,_P,_P,_P,_P,_P,_P,_P,
-_P,_P,_P,_P,_P,_P,_P,_P,
-_D,_D,_D,_D,_D,_D,_D,_D,
-_D,_D,_P,_P,_P,_P,_P,_P,
-_P,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U,
-_U,_U,_U,_U,_U,_U,_U,_U,
-_U,_U,_U,_U,_U,_U,_U,_U,
-_U,_U,_U,_P,_P,_P,_P,_P,
-_P,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L,
-_L,_L,_L,_L,_L,_L,_L,_L,
-_L,_L,_L,_L,_L,_L,_L,_L,
-_L,_L,_L,_P,_P,_P,_P,_C,
-0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
-0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
-_S|_SP,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,
-_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,
-_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,
-_U,_U,_U,_U,_U,_U,_U,_P,_U,_U,_U,_U,_U,_U,_U,_L,
-_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,
-_L,_L,_L,_L,_L,_L,_L,_P,_L,_L,_L,_L,_L,_L,_L,_L};
-
-
-
-char *skip_spaces(const char *str)
-{
-	while (kisspace(*str))
-		++str;
-	return (char *)str;
-}
-
-
-// * Parse a string to get a param value pair.
-// * You can use " around spaces, but can't escape ".
-// * Hyphens and underscores equivalent in parameter names.
- char *next_arg(char *args, char **param, char **val)
- {
-	 unsigned int i, equals = 0;
-	 int in_quote = 0, quoted = 0;
-
-	 if (*args == '"') {
-		 args++;
-		 in_quote = 1;
-		 quoted = 1;
-	 }
-
-	 for (i = 0; args[i]; i++) {
-		 if (kisspace(args[i]) && !in_quote)
-			 break;
-		 if (equals == 0) {
-			 if (args[i] == '=')
-				 equals = i;
-		 }
-		 if (args[i] == '"')
-			 in_quote = !in_quote;
-	 }
-
-	 *param = args;
-	 if (!equals)
-		 *val = NULL;
-	 else {
-		 args[equals] = '\0';
-		 *val = args + equals + 1;
-
-		 // Don't include quotes in value.
-		 if (**val == '"') {
-			 (*val)++;
-			 if (args[i-1] == '"')
-				 args[i-1] = '\0';
-		 }
-	 }
-	 if (quoted && i > 0 && args[i-1] == '"')
-		 args[i-1] = '\0';
-
-	 if (args[i]) {
-		 args[i] = '\0';
-		 args += i + 1;
-	 } else
-		 args += i;
-
-	 // Chew up trailing spaces.
-	 return skip_spaces(args);
- }
-*/
-import "C"
-import (
-	"unsafe"
-
-	"source.monogon.dev/metropolis/pkg/bootparam"
-)
-
-func Parse(str string) (params bootparam.Params, rest string) {
-	cs := C.CString(bootparam.TrimLeftSpace(str))
-	csAllocPtr := cs
-	var param, val *C.char
-	for *cs != 0 {
-		var p bootparam.Param
-		cs = C.next_arg(cs, &param, &val)
-		p.Param = C.GoString(param)
-		if val != nil {
-			p.Value = C.GoString(val)
-		}
-		if p.Param == "--" {
-			rest = C.GoString(cs)
-			return
-		}
-		params = append(params, p)
-	}
-	C.free(unsafe.Pointer(csAllocPtr))
-	return
-}
diff --git a/metropolis/pkg/cmd/BUILD.bazel b/metropolis/pkg/cmd/BUILD.bazel
deleted file mode 100644
index 7d5bbeb..0000000
--- a/metropolis/pkg/cmd/BUILD.bazel
+++ /dev/null
@@ -1,11 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library")
-
-go_library(
-    name = "cmd",
-    srcs = ["run.go"],
-    importpath = "source.monogon.dev/metropolis/pkg/cmd",
-    visibility = ["//visibility:public"],
-    deps = [
-        "//metropolis/pkg/logbuffer",
-    ],
-)
diff --git a/metropolis/pkg/cmd/run.go b/metropolis/pkg/cmd/run.go
deleted file mode 100644
index 808a7c0..0000000
--- a/metropolis/pkg/cmd/run.go
+++ /dev/null
@@ -1,108 +0,0 @@
-// Package cmd contains helpers that abstract away the chore of starting new
-// processes, tracking their lifetime, inspecting their output, etc.
-package cmd
-
-import (
-	"context"
-	"fmt"
-	"io"
-	"os"
-	"os/exec"
-	"strings"
-
-	"source.monogon.dev/metropolis/pkg/logbuffer"
-)
-
-// RunCommand starts a new process and waits until either its completion, or
-// until the supplied predicate function pf returns true. The function is called
-// for each line produced by the new process.
-//
-// The returned boolean value equals the last value returned by pf.
-//
-// The process will be killed both in the event the context is cancelled, and
-// when expectedOutput is found.
-func RunCommand(ctx context.Context, path string, args []string, pf func(string) bool) (bool, error) {
-	// Make a sub-context to ensure the process exits when this function is done.
-	ctx, ctxC := context.WithCancel(ctx)
-	defer ctxC()
-
-	// Copy the stdout and stderr output to a single channel of lines so that they
-	// can then be matched against expectedOutput.
-
-	// Since LineBuffer can write its buffered contents on a deferred Close,
-	// after the reader loop is broken, avoid deadlocks by making lineC a
-	// buffered channel.
-	lineC := make(chan string, 2)
-	lineCB := func(l *logbuffer.Line) {
-		// If the context is canceled, no-one is listening on lineC anymore, so we would
-		// block.
-		select {
-		case <-ctx.Done():
-			return
-		case lineC <- l.Data:
-		}
-	}
-	outBuffer := logbuffer.NewLineBuffer(1024, lineCB)
-	defer outBuffer.Close()
-	errBuffer := logbuffer.NewLineBuffer(1024, lineCB)
-	defer errBuffer.Close()
-
-	// Prepare the command context, and start the process.
-	cmd := exec.CommandContext(ctx, path, args...)
-	// Tee std{out,err} into the linebuffers above and the process' std{out,err}, to
-	// allow easier debugging.
-	cmd.Stdout = io.MultiWriter(os.Stdout, outBuffer)
-	cmd.Stderr = io.MultiWriter(os.Stderr, errBuffer)
-	if err := cmd.Start(); err != nil {
-		return false, fmt.Errorf("couldn't start the process: %w", err)
-	}
-
-	// Handle the case in which the process finishes before pf takes the chance to
-	// kill it.
-	complC := make(chan error, 1)
-	go func() {
-		complC <- cmd.Wait()
-	}()
-
-	// Try matching against expectedOutput and return the result.
-	for {
-		select {
-		case <-ctx.Done():
-			return false, ctx.Err()
-		case line := <-lineC:
-			if pf(line) {
-				cmd.Process.Kill()
-				<-complC
-				return true, nil
-			}
-		case err := <-complC:
-			return false, err
-		}
-	}
-}
-
-// TerminateIfFound creates RunCommand predicates that instantly terminate
-// program execution in the event the given string is found in any line
-// produced. RunCommand will return true, if the string searched for was found,
-// and false otherwise. If logf isn't nil, it will be called whenever a new
-// line is received.
-func TerminateIfFound(needle string, logf func(string)) func(string) bool {
-	return func(haystack string) bool {
-		if logf != nil {
-			logf(haystack)
-		}
-		return strings.Contains(haystack, needle)
-	}
-}
-
-// WaitUntilCompletion creates a RunCommand predicate that will make it wait
-// for the process to exit on its own. If logf isn't nil, it will be called
-// whenever a new line is received.
-func WaitUntilCompletion(logf func(string)) func(string) bool {
-	return func(line string) bool {
-		if logf != nil {
-			logf(line)
-		}
-		return false
-	}
-}
diff --git a/metropolis/pkg/devicemapper/BUILD.bazel b/metropolis/pkg/devicemapper/BUILD.bazel
deleted file mode 100644
index 7d4c132..0000000
--- a/metropolis/pkg/devicemapper/BUILD.bazel
+++ /dev/null
@@ -1,16 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library")
-
-go_library(
-    name = "devicemapper",
-    srcs = [
-        "ctype.go",
-        "devicemapper.go",
-    ],
-    importpath = "source.monogon.dev/metropolis/pkg/devicemapper",
-    visibility = ["//metropolis:__subpackages__"],
-    deps = [
-        "@com_github_pkg_errors//:errors",
-        "@com_github_yalue_native_endian//:native_endian",
-        "@org_golang_x_sys//unix",
-    ],
-)
diff --git a/metropolis/pkg/devicemapper/ctype.go b/metropolis/pkg/devicemapper/ctype.go
deleted file mode 100644
index 05e6934..0000000
--- a/metropolis/pkg/devicemapper/ctype.go
+++ /dev/null
@@ -1,40 +0,0 @@
-package devicemapper
-
-// Linux kernel ctype data from @linux//include/linux:ctype.h
-
-const (
-	_U  = 0x01 /* upper */
-	_L  = 0x02 /* lower */
-	_D  = 0x04 /* digit */
-	_C  = 0x08 /* cntrl */
-	_P  = 0x10 /* punct */
-	_S  = 0x20 /* white space (space/lf/tab) */
-	_X  = 0x40 /* hex digit */
-	_SP = 0x80 /* hard space (0x20) */
-)
-
-var ctypeLookup = [256]byte{
-	_C, _C, _C, _C, _C, _C, _C, _C, /* 0-7 */
-	_C, _C | _S, _C | _S, _C | _S, _C | _S, _C | _S, _C, _C, /* 8-15 */
-	_C, _C, _C, _C, _C, _C, _C, _C, /* 16-23 */
-	_C, _C, _C, _C, _C, _C, _C, _C, /* 24-31 */
-	_S | _SP, _P, _P, _P, _P, _P, _P, _P, /* 32-39 */
-	_P, _P, _P, _P, _P, _P, _P, _P, /* 40-47 */
-	_D, _D, _D, _D, _D, _D, _D, _D, /* 48-55 */
-	_D, _D, _P, _P, _P, _P, _P, _P, /* 56-63 */
-	_P, _U | _X, _U | _X, _U | _X, _U | _X, _U | _X, _U | _X, _U, /* 64-71 */
-	_U, _U, _U, _U, _U, _U, _U, _U, /* 72-79 */
-	_U, _U, _U, _U, _U, _U, _U, _U, /* 80-87 */
-	_U, _U, _U, _P, _P, _P, _P, _P, /* 88-95 */
-	_P, _L | _X, _L | _X, _L | _X, _L | _X, _L | _X, _L | _X, _L, /* 96-103 */
-	_L, _L, _L, _L, _L, _L, _L, _L, /* 104-111 */
-	_L, _L, _L, _L, _L, _L, _L, _L, /* 112-119 */
-	_L, _L, _L, _P, _P, _P, _P, _C, /* 120-127 */
-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 128-143 */
-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 144-159 */
-	_S | _SP, _P, _P, _P, _P, _P, _P, _P, _P, _P, _P, _P, _P, _P, _P, _P, /* 160-175 */
-	_P, _P, _P, _P, _P, _P, _P, _P, _P, _P, _P, _P, _P, _P, _P, _P, /* 176-191 */
-	_U, _U, _U, _U, _U, _U, _U, _U, _U, _U, _U, _U, _U, _U, _U, _U, /* 192-207 */
-	_U, _U, _U, _U, _U, _U, _U, _P, _U, _U, _U, _U, _U, _U, _U, _L, /* 208-223 */
-	_L, _L, _L, _L, _L, _L, _L, _L, _L, _L, _L, _L, _L, _L, _L, _L, /* 224-239 */
-	_L, _L, _L, _L, _L, _L, _L, _P, _L, _L, _L, _L, _L, _L, _L, _L} /* 240-255 */
diff --git a/metropolis/pkg/devicemapper/devicemapper.go b/metropolis/pkg/devicemapper/devicemapper.go
deleted file mode 100644
index 1999a00..0000000
--- a/metropolis/pkg/devicemapper/devicemapper.go
+++ /dev/null
@@ -1,345 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// package devicemapper is a thin wrapper for the devicemapper ioctl API.
-// See: https://github.com/torvalds/linux/blob/master/include/uapi/linux/dm-ioctl.h
-package devicemapper
-
-import (
-	"bytes"
-	"encoding/binary"
-	"fmt"
-	"os"
-	"runtime"
-	"strings"
-	"sync"
-	"unsafe"
-
-	"github.com/pkg/errors"
-	"github.com/yalue/native_endian"
-	"golang.org/x/sys/unix"
-)
-
-type DMIoctl struct {
-	Version     Version
-	DataSize    uint32
-	DataStart   uint32
-	TargetCount uint32
-	OpenCount   int32
-	Flags       uint32
-	EventNumber uint32
-	_padding1   uint32
-	Dev         uint64
-	Name        [128]byte
-	UUID        [129]byte
-	_padding2   [7]byte
-	Data        [16384]byte
-}
-
-type DMTargetSpec struct {
-	SectorStart uint64
-	Length      uint64
-	Status      int32
-	Next        uint32
-	TargetType  [16]byte
-}
-
-type DMTargetDeps struct {
-	Count   uint32
-	Padding uint32
-	Dev     []uint64
-}
-
-type DMNameList struct {
-	Dev  uint64
-	Next uint32
-	Name []byte
-}
-
-type DMTargetVersions struct {
-	Next    uint32
-	Version [3]uint32
-}
-
-type DMTargetMessage struct {
-	Sector  uint64
-	Message []byte
-}
-
-type Version [3]uint32
-
-const (
-	/* Top level cmds */
-	DM_VERSION_CMD uintptr = (0xc138fd << 8) + iota
-	DM_REMOVE_ALL_CMD
-	DM_LIST_DEVICES_CMD
-
-	/* device level cmds */
-	DM_DEV_CREATE_CMD
-	DM_DEV_REMOVE_CMD
-	DM_DEV_RENAME_CMD
-	DM_DEV_SUSPEND_CMD
-	DM_DEV_STATUS_CMD
-	DM_DEV_WAIT_CMD
-
-	/* Table level cmds */
-	DM_TABLE_LOAD_CMD
-	DM_TABLE_CLEAR_CMD
-	DM_TABLE_DEPS_CMD
-	DM_TABLE_STATUS_CMD
-
-	/* Added later */
-	DM_LIST_VERSIONS_CMD
-	DM_TARGET_MSG_CMD
-	DM_DEV_SET_GEOMETRY_CMD
-	DM_DEV_ARM_POLL_CMD
-)
-
-const (
-	DM_READONLY_FLAG       = 1 << 0 /* In/Out */
-	DM_SUSPEND_FLAG        = 1 << 1 /* In/Out */
-	DM_PERSISTENT_DEV_FLAG = 1 << 3 /* In */
-)
-
-const baseDataSize = uint32(unsafe.Sizeof(DMIoctl{})) - 16384
-
-func newReq() DMIoctl {
-	return DMIoctl{
-		Version:   Version{4, 0, 0},
-		DataSize:  baseDataSize,
-		DataStart: baseDataSize,
-	}
-}
-
-// stringToDelimitedBuf copies src to dst and returns an error if len(src) >
-// len(dst), or when the string contains a null byte.
-func stringToDelimitedBuf(dst []byte, src string) error {
-	if len(src) > len(dst)-1 {
-		return fmt.Errorf("string longer than target buffer (%v > %v)", len(src), len(dst)-1)
-	}
-	for i := 0; i < len(src); i++ {
-		if src[i] == 0x00 {
-			return errors.New("string contains null byte, this is unsupported by DM")
-		}
-		dst[i] = src[i]
-	}
-	return nil
-}
-
-// marshalParams marshals a list of strings into a single string according to
-// the rules in the kernel-side decoder. Strings with null bytes or only
-// whitespace characters cannot be encoded and will return an errors.
-func marshalParams(params []string) (string, error) {
-	var strb strings.Builder
-	for _, param := range params {
-		var hasNonWhitespace bool
-		for i := 0; i < len(param); i++ {
-			b := param[i]
-			if b == 0x00 {
-				return "", errors.New("parameter with null bytes cannot be encoded")
-			}
-			isWhitespace := ctypeLookup[b]&_S != 0
-			if !isWhitespace {
-				hasNonWhitespace = true
-			}
-			if isWhitespace || b == '\\' {
-				strb.WriteByte('\\')
-			}
-			strb.WriteByte(b)
-		}
-		if !hasNonWhitespace {
-			return "", errors.New("parameter with only whitespace cannot be encoded")
-		}
-		strb.WriteByte(' ')
-	}
-	return strb.String(), nil
-}
-
-var ctrlFile *os.File
-var ctrlFileError error
-var ctrlFileOnce sync.Once
-
-func initCtrlFile() {
-	ctrlFile, ctrlFileError = os.Open("/dev/mapper/control")
-	if os.IsNotExist(ctrlFileError) {
-		_ = os.MkdirAll("/dev/mapper", 0755)
-		ctrlFileError = unix.Mknod("/dev/mapper/control", unix.S_IFCHR|0600, int(unix.Mkdev(10, 236)))
-		if ctrlFileError != nil {
-			ctrlFileError = fmt.Errorf("devicemapper control device doesn't exist and can't be mknod()ed: %w", ctrlFileError)
-			return
-		}
-		ctrlFile, ctrlFileError = os.Open("/dev/mapper/control")
-	}
-	if ctrlFileError != nil {
-		ctrlFileError = fmt.Errorf("failed to open devicemapper control device: %w", ctrlFileError)
-	}
-}
-
-func GetVersion() (Version, error) {
-	req := newReq()
-	ctrlFileOnce.Do(initCtrlFile)
-	if ctrlFileError != nil {
-		return Version{}, ctrlFileError
-	}
-	if _, _, err := unix.Syscall(unix.SYS_IOCTL, ctrlFile.Fd(), DM_VERSION_CMD, uintptr(unsafe.Pointer(&req))); err != 0 {
-		return Version{}, err
-	}
-	return req.Version, nil
-}
-
-func CreateDevice(name string) (uint64, error) {
-	req := newReq()
-	if err := stringToDelimitedBuf(req.Name[:], name); err != nil {
-		return 0, err
-	}
-	ctrlFileOnce.Do(initCtrlFile)
-	if ctrlFileError != nil {
-		return 0, ctrlFileError
-	}
-	if _, _, err := unix.Syscall(unix.SYS_IOCTL, ctrlFile.Fd(), DM_DEV_CREATE_CMD, uintptr(unsafe.Pointer(&req))); err != 0 {
-		return 0, err
-	}
-	return req.Dev, nil
-}
-
-func RemoveDevice(name string) error {
-	req := newReq()
-	if err := stringToDelimitedBuf(req.Name[:], name); err != nil {
-		return err
-	}
-	ctrlFileOnce.Do(initCtrlFile)
-	if ctrlFileError != nil {
-		return ctrlFileError
-	}
-	if _, _, err := unix.Syscall(unix.SYS_IOCTL, ctrlFile.Fd(), DM_DEV_REMOVE_CMD, uintptr(unsafe.Pointer(&req))); err != 0 {
-		return err
-	}
-	runtime.KeepAlive(req)
-	return nil
-}
-
-// Target represents a byte region inside a devicemapper table for a given
-// device provided by a given target implementation.
-type Target struct {
-	// StartSector is the first sector (defined as being 512 bytes long) this
-	// target covers.
-	StartSector uint64
-	// Length is the number of sectors (defined as being 512 bytes long) this
-	// target covers, starting from StartSector.
-	Length uint64
-	// Type is the type of target handling this byte region.
-	// Types implemented by the Linux kernel can be found at
-	// @linux//drivers/md/... by looking for dm_register_target() calls.
-	Type string
-	// Parameters are additional parameters specific to the target type.
-	// Note that null bytes and parameters consisting only of whitespace
-	// characters cannot be encoded and will return an error.
-	Parameters []string
-}
-
-func LoadTable(name string, readOnly bool, targets []Target) error {
-	req := newReq()
-	if err := stringToDelimitedBuf(req.Name[:], name); err != nil {
-		return err
-	}
-	var data bytes.Buffer
-	for _, target := range targets {
-		encodedParams, err := marshalParams(target.Parameters)
-		if err != nil {
-			return fmt.Errorf("cannot encode parameters: %w", err)
-		}
-		// Gives the size of the spec and the null-terminated params aligned to 8 bytes
-		padding := len(encodedParams) % 8
-		targetSize := uint32(int(unsafe.Sizeof(DMTargetSpec{})) + (len(encodedParams) + 1) + padding)
-
-		targetSpec := DMTargetSpec{
-			SectorStart: target.StartSector,
-			Length:      target.Length,
-			Next:        targetSize,
-		}
-		if err := stringToDelimitedBuf(targetSpec.TargetType[:], target.Type); err != nil {
-			return err
-		}
-		if err := binary.Write(&data, native_endian.NativeEndian(), &targetSpec); err != nil {
-			panic(err)
-		}
-		data.WriteString(encodedParams)
-		data.WriteByte(0x00)
-		for i := 0; i < padding; i++ {
-			data.WriteByte(0x00)
-		}
-	}
-	req.TargetCount = uint32(len(targets))
-	if data.Len() >= 16384 {
-		return errors.New("table too large for allocated memory")
-	}
-	req.DataSize = baseDataSize + uint32(data.Len())
-	copy(req.Data[:], data.Bytes())
-	if readOnly {
-		req.Flags = DM_READONLY_FLAG
-	}
-	ctrlFileOnce.Do(initCtrlFile)
-	if ctrlFileError != nil {
-		return ctrlFileError
-	}
-	if _, _, err := unix.Syscall(unix.SYS_IOCTL, ctrlFile.Fd(), DM_TABLE_LOAD_CMD, uintptr(unsafe.Pointer(&req))); err != 0 {
-		return err
-	}
-	runtime.KeepAlive(req)
-	return nil
-}
-
-func suspendResume(name string, suspend bool) error {
-	req := newReq()
-	if err := stringToDelimitedBuf(req.Name[:], name); err != nil {
-		return err
-	}
-	if suspend {
-		req.Flags = DM_SUSPEND_FLAG
-	}
-	ctrlFileOnce.Do(initCtrlFile)
-	if ctrlFileError != nil {
-		return ctrlFileError
-	}
-	if _, _, err := unix.Syscall(unix.SYS_IOCTL, ctrlFile.Fd(), DM_DEV_SUSPEND_CMD, uintptr(unsafe.Pointer(&req))); err != 0 {
-		return err
-	}
-	runtime.KeepAlive(req)
-	return nil
-}
-
-func Suspend(name string) error {
-	return suspendResume(name, true)
-}
-func Resume(name string) error {
-	return suspendResume(name, false)
-}
-
-func CreateActiveDevice(name string, readOnly bool, targets []Target) (uint64, error) {
-	dev, err := CreateDevice(name)
-	if err != nil {
-		return 0, fmt.Errorf("DM_DEV_CREATE failed: %w", err)
-	}
-	if err := LoadTable(name, readOnly, targets); err != nil {
-		_ = RemoveDevice(name)
-		return 0, fmt.Errorf("DM_TABLE_LOAD failed: %w", err)
-	}
-	if err := Resume(name); err != nil {
-		_ = RemoveDevice(name)
-		return 0, fmt.Errorf("DM_DEV_SUSPEND failed: %w", err)
-	}
-	return dev, nil
-}
diff --git a/metropolis/pkg/efivarfs/BUILD.bazel b/metropolis/pkg/efivarfs/BUILD.bazel
deleted file mode 100644
index 3e5339c..0000000
--- a/metropolis/pkg/efivarfs/BUILD.bazel
+++ /dev/null
@@ -1,32 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
-
-go_library(
-    name = "efivarfs",
-    srcs = [
-        "boot.go",
-        "devicepath.go",
-        "efivarfs.go",
-        "variables.go",
-    ],
-    importpath = "source.monogon.dev/metropolis/pkg/efivarfs",
-    visibility = ["//visibility:public"],
-    deps = [
-        "//metropolis/pkg/msguid",
-        "@com_github_google_uuid//:uuid",
-        "@org_golang_x_text//encoding/unicode",
-    ],
-)
-
-go_test(
-    name = "efivarfs_test",
-    srcs = [
-        "boot_test.go",
-        "devicepath_test.go",
-    ],
-    embed = [":efivarfs"],
-    gc_goopts = ["-d=libfuzzer"],
-    deps = [
-        "@com_github_google_go_cmp//cmp",
-        "@com_github_google_uuid//:uuid",
-    ],
-)
diff --git a/metropolis/pkg/efivarfs/boot.go b/metropolis/pkg/efivarfs/boot.go
deleted file mode 100644
index 8fe1a55..0000000
--- a/metropolis/pkg/efivarfs/boot.go
+++ /dev/null
@@ -1,202 +0,0 @@
-// MIT License
-//
-// Copyright (c) 2021 Philippe Voinov (philippevoinov@gmail.com)
-// Copyright 2021 The Monogon Project Authors.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in all
-// copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-// SOFTWARE.
-
-package efivarfs
-
-import (
-	"bytes"
-	"encoding/binary"
-	"errors"
-	"fmt"
-	"math"
-	"strings"
-)
-
-type LoadOptionCategory uint8
-
-const (
-	// Boot entries belonging to the Boot category are normal boot entries.
-	LoadOptionCategoryBoot LoadOptionCategory = 0x0
-	// Boot entries belonging to the App category are not booted as part of
-	// the normal boot order, but are only launched via menu or hotkey.
-	// This category is optional for bootloaders to support, before creating
-	// new boot entries of this category firmware support needs to be
-	// confirmed.
-	LoadOptionCategoryApp LoadOptionCategory = 0x1
-)
-
-// LoadOption contains information on a payload to be loaded by EFI.
-type LoadOption struct {
-	// Human-readable description of what this load option loads.
-	// This is what's being shown by the firmware when selecting a boot option.
-	Description string
-	// If set, firmware will skip this load option when it is in BootOrder.
-	// It is unspecificed whether this prevents the user from booting the entry
-	// manually.
-	Inactive bool
-	// If set, this load option will not be shown in any menu for load option
-	// selection. This does not affect other functionality.
-	Hidden bool
-	// Category contains the category of the load entry. The selected category
-	// affects various firmware behaviors, see the individual value
-	// descriptions for more information.
-	Category LoadOptionCategory
-	// Path to the UEFI PE executable to execute when this load option is being
-	// loaded.
-	FilePath DevicePath
-	// ExtraPaths contains additional device paths with vendor-specific
-	// behavior. Can generally be left empty.
-	ExtraPaths []DevicePath
-	// OptionalData gets passed as an argument to the executed PE executable.
-	// If zero-length a NULL value is passed to the executable.
-	OptionalData []byte
-}
-
-// Marshal encodes a LoadOption into a binary EFI_LOAD_OPTION.
-func (e *LoadOption) Marshal() ([]byte, error) {
-	var data []byte
-	var attrs uint32
-	attrs |= (uint32(e.Category) & 0x1f) << 8
-	if e.Hidden {
-		attrs |= 0x08
-	}
-	if !e.Inactive {
-		attrs |= 0x01
-	}
-	data = append32(data, attrs)
-	filePathRaw, err := e.FilePath.Marshal()
-	if err != nil {
-		return nil, fmt.Errorf("failed marshalling FilePath: %w", err)
-	}
-	for _, ep := range e.ExtraPaths {
-		epRaw, err := ep.Marshal()
-		if err != nil {
-			return nil, fmt.Errorf("failed marshalling ExtraPath: %w", err)
-		}
-		filePathRaw = append(filePathRaw, epRaw...)
-	}
-	if len(filePathRaw) > math.MaxUint16 {
-		return nil, fmt.Errorf("failed marshalling FilePath/ExtraPath: value too big (%d)", len(filePathRaw))
-	}
-	data = append16(data, uint16(len(filePathRaw)))
-	if strings.IndexByte(e.Description, 0x00) != -1 {
-		return nil, fmt.Errorf("failed to encode Description: contains invalid null bytes")
-	}
-	encodedDesc, err := Encoding.NewEncoder().Bytes([]byte(e.Description))
-	if err != nil {
-		return nil, fmt.Errorf("failed to encode Description: %w", err)
-	}
-	data = append(data, encodedDesc...)
-	data = append(data, 0x00, 0x00) // Final UTF-16/UCS-2 null code
-	data = append(data, filePathRaw...)
-	data = append(data, e.OptionalData...)
-	return data, nil
-}
-
-// UnmarshalLoadOption decodes a binary EFI_LOAD_OPTION into a LoadOption.
-func UnmarshalLoadOption(data []byte) (*LoadOption, error) {
-	if len(data) < 6 {
-		return nil, fmt.Errorf("invalid load option: minimum 6 bytes are required, got %d", len(data))
-	}
-	var opt LoadOption
-	attrs := binary.LittleEndian.Uint32(data[:4])
-	opt.Category = LoadOptionCategory((attrs >> 8) & 0x1f)
-	opt.Hidden = attrs&0x08 != 0
-	opt.Inactive = attrs&0x01 == 0
-	lenPath := binary.LittleEndian.Uint16(data[4:6])
-	// Search for UTF-16 null code
-	nullIdx := bytes.Index(data[6:], []byte{0x00, 0x00})
-	if nullIdx == -1 {
-		return nil, errors.New("no null code point marking end of Description found")
-	}
-	descriptionEnd := 6 + nullIdx + 1
-	descriptionRaw := data[6:descriptionEnd]
-	description, err := Encoding.NewDecoder().Bytes(descriptionRaw)
-	if err != nil {
-		return nil, fmt.Errorf("error decoding UTF-16 in Description: %w", err)
-	}
-	descriptionEnd += 2 // 2 null bytes terminating UTF-16 string
-	opt.Description = string(description)
-	if descriptionEnd+int(lenPath) > len(data) {
-		return nil, fmt.Errorf("declared length of FilePath (%d) overruns available data (%d)", lenPath, len(data)-descriptionEnd)
-	}
-	filePathData := data[descriptionEnd : descriptionEnd+int(lenPath)]
-	opt.FilePath, filePathData, err = UnmarshalDevicePath(filePathData)
-	if err != nil {
-		return nil, fmt.Errorf("failed unmarshaling FilePath: %w", err)
-	}
-	for len(filePathData) > 0 {
-		var extraPath DevicePath
-		extraPath, filePathData, err = UnmarshalDevicePath(filePathData)
-		if err != nil {
-			return nil, fmt.Errorf("failed unmarshaling ExtraPath: %w", err)
-		}
-		opt.ExtraPaths = append(opt.ExtraPaths, extraPath)
-	}
-
-	if descriptionEnd+int(lenPath) < len(data) {
-		opt.OptionalData = data[descriptionEnd+int(lenPath):]
-	}
-	return &opt, nil
-}
-
-// BootOrder represents the contents of the BootOrder EFI variable.
-type BootOrder []uint16
-
-// Marshal generates the binary representation of a BootOrder.
-func (t *BootOrder) Marshal() []byte {
-	var out []byte
-	for _, v := range *t {
-		out = append16(out, v)
-	}
-	return out
-}
-
-// UnmarshalBootOrder loads a BootOrder from its binary representation.
-func UnmarshalBootOrder(d []byte) (BootOrder, error) {
-	if len(d)%2 != 0 {
-		return nil, fmt.Errorf("invalid length: %v bytes", len(d))
-	}
-	l := len(d) / 2
-	out := make(BootOrder, l)
-	for i := 0; i < l; i++ {
-		out[i] = uint16(d[2*i]) | uint16(d[2*i+1])<<8
-	}
-	return out, nil
-}
-
-func append16(d []byte, v uint16) []byte {
-	return append(d,
-		byte(v&0xFF),
-		byte(v>>8&0xFF),
-	)
-}
-
-func append32(d []byte, v uint32) []byte {
-	return append(d,
-		byte(v&0xFF),
-		byte(v>>8&0xFF),
-		byte(v>>16&0xFF),
-		byte(v>>24&0xFF),
-	)
-}
diff --git a/metropolis/pkg/efivarfs/boot_test.go b/metropolis/pkg/efivarfs/boot_test.go
deleted file mode 100644
index 9abd8f9..0000000
--- a/metropolis/pkg/efivarfs/boot_test.go
+++ /dev/null
@@ -1,58 +0,0 @@
-package efivarfs
-
-import (
-	"bytes"
-	"encoding/hex"
-	"testing"
-
-	"github.com/google/go-cmp/cmp"
-	"github.com/google/uuid"
-)
-
-// Generated with old working marshaler and manually double-checked
-var ref, _ = hex.DecodeString(
-	"010000004a004500780061006d0070006c006500000004012a00010000000" +
-		"500000000000000080000000000000014b8a76bad9dd11180b400c04fd430" +
-		"c8020204041c005c0074006500730074005c0061002e00650066006900000" +
-		"07fff0400",
-)
-
-func TestEncoding(t *testing.T) {
-	opt := LoadOption{
-		Description: "Example",
-		FilePath: DevicePath{
-			&HardDrivePath{
-				PartitionNumber:     1,
-				PartitionStartBlock: 5,
-				PartitionSizeBlocks: 8,
-				PartitionMatch: PartitionGPT{
-					PartitionUUID: uuid.NameSpaceX500,
-				},
-			},
-			FilePath("/test/a.efi"),
-		},
-	}
-	got, err := opt.Marshal()
-	if err != nil {
-		t.Fatal(err)
-	}
-	if !bytes.Equal(ref, got) {
-		t.Fatalf("expected %x, got %x", ref, got)
-	}
-	got2, err := UnmarshalLoadOption(got)
-	if err != nil {
-		t.Fatalf("failed to unmarshal marshaled LoadOption: %v", err)
-	}
-	diff := cmp.Diff(&opt, got2)
-	if diff != "" {
-		t.Errorf("marshal/unmarshal wasn't transparent: %v", diff)
-	}
-}
-
-func FuzzDecode(f *testing.F) {
-	f.Add(ref)
-	f.Fuzz(func(t *testing.T, a []byte) {
-		// Just try to see if it crashes
-		_, _ = UnmarshalLoadOption(a)
-	})
-}
diff --git a/metropolis/pkg/efivarfs/devicepath.go b/metropolis/pkg/efivarfs/devicepath.go
deleted file mode 100644
index 38dd731..0000000
--- a/metropolis/pkg/efivarfs/devicepath.go
+++ /dev/null
@@ -1,316 +0,0 @@
-package efivarfs
-
-import (
-	"bytes"
-	"encoding/binary"
-	"errors"
-	"fmt"
-	"math"
-	"strings"
-
-	"github.com/google/uuid"
-
-	"source.monogon.dev/metropolis/pkg/msguid"
-)
-
-// DevicePath represents a path consisting of one or more elements to an
-// entity implementing an EFI protocol. It's very broadly used inside EFI
-// for representing all sorts of abstract paths. In the context of this
-// package it is used to represent paths to EFI loaders.
-// See https://uefi.org/specs/UEFI/2.10/10_Protocols_Device_Path_Protocol.html
-// for more information.
-type DevicePath []DevicePathElem
-
-// DevicePathElem is a common interface for all UEFI device path elements.
-type DevicePathElem interface {
-	typ() uint8
-	subType() uint8
-	data() ([]byte, error)
-}
-
-type pathElemUnmarshalFunc func([]byte) (DevicePathElem, error)
-
-// PartitionMBR matches a drive or partition formatted with legacy MBR
-// (Master Boot Record).
-type PartitionMBR struct {
-	// DiskSignature contains a 4-byte signature identifying the drive, located
-	// just after the 440 bytes of boot sector loading code.
-	// Note that since MBR does not have per-partition signatures, this is
-	// combined with PartitionNumber to select a partition.
-	DiskSignature [4]byte
-}
-
-func (p PartitionMBR) partitionSignature() (sig [16]byte) {
-	copy(sig[:4], p.DiskSignature[:])
-	return
-}
-
-func (p PartitionMBR) partitionFormat() uint8 {
-	return 0x01
-}
-
-func (p PartitionMBR) signatureType() uint8 {
-	return 0x01
-}
-
-// PartitionGPT matches a partition on a drive formatted with GPT.
-type PartitionGPT struct {
-	// UUID of the partition to be matched. Conversion into mixed-endian format
-	// is taken care of, a standard big-endian UUID can be put in here.
-	PartitionUUID uuid.UUID
-}
-
-func (p PartitionGPT) partitionSignature() [16]byte {
-	return msguid.From(p.PartitionUUID)
-}
-
-func (p PartitionGPT) partitionFormat() uint8 {
-	return 0x02
-}
-
-func (p PartitionGPT) signatureType() uint8 {
-	return 0x02
-}
-
-// PartitionUnknown is being used to represent unknown partitioning schemas or
-// combinations of PartitionFormat/SignatureType. It contains raw uninterpreted
-// data.
-type PartitionUnknown struct {
-	PartitionSignature [16]byte
-	PartitionFormat    uint8
-	SignatureType      uint8
-}
-
-func (p PartitionUnknown) partitionSignature() [16]byte {
-	return p.PartitionSignature
-}
-
-func (p PartitionUnknown) partitionFormat() uint8 {
-	return p.PartitionFormat
-}
-
-func (p PartitionUnknown) signatureType() uint8 {
-	return p.SignatureType
-}
-
-type PartitionMatch interface {
-	partitionSignature() [16]byte
-	partitionFormat() uint8
-	signatureType() uint8
-}
-
-// HardDrivePath matches whole drives or partitions on GPT/MBR formatted
-// drives.
-type HardDrivePath struct {
-	// Partition number, starting at 1. If zero or unset, the whole drive is
-	// selected.
-	PartitionNumber uint32
-	// Block address at which the partition starts. Not used for matching
-	// partitions in EDK2.
-	PartitionStartBlock uint64
-	// Number of blocks occupied by the partition starting from the
-	// PartitionStartBlock. Not used for matching partitions in EDK2.
-	PartitionSizeBlocks uint64
-	// PartitionMatch is used to match drive or partition signatures.
-	// Use PartitionMBR and PartitionGPT types here.
-	PartitionMatch PartitionMatch
-}
-
-func (h *HardDrivePath) typ() uint8 {
-	return 4
-}
-
-func (h *HardDrivePath) subType() uint8 {
-	return 1
-}
-
-func (h *HardDrivePath) data() ([]byte, error) {
-	out := make([]byte, 38)
-	le := binary.LittleEndian
-	le.PutUint32(out[0:4], h.PartitionNumber)
-	le.PutUint64(out[4:12], h.PartitionStartBlock)
-	le.PutUint64(out[12:20], h.PartitionSizeBlocks)
-	if h.PartitionMatch == nil {
-		return nil, errors.New("PartitionMatch needs to be set")
-	}
-	sig := h.PartitionMatch.partitionSignature()
-	copy(out[20:36], sig[:])
-	out[36] = h.PartitionMatch.partitionFormat()
-	out[37] = h.PartitionMatch.signatureType()
-	return out, nil
-}
-
-func unmarshalHardDrivePath(data []byte) (DevicePathElem, error) {
-	var h HardDrivePath
-	if len(data) != 38 {
-		return nil, fmt.Errorf("invalid HardDrivePath element, expected 38 bytes, got %d", len(data))
-	}
-	le := binary.LittleEndian
-	h.PartitionNumber = le.Uint32(data[0:4])
-	h.PartitionStartBlock = le.Uint64(data[4:12])
-	h.PartitionSizeBlocks = le.Uint64(data[12:20])
-	partitionFormat := data[36]
-	signatureType := data[37]
-	var rawSig [16]byte
-	copy(rawSig[:], data[20:36])
-	switch {
-	case partitionFormat == 1 && signatureType == 1:
-		// MBR
-		var mbr PartitionMBR
-		copy(mbr.DiskSignature[:], rawSig[:4])
-		h.PartitionMatch = mbr
-	case partitionFormat == 2 && signatureType == 2:
-		// GPT
-		h.PartitionMatch = PartitionGPT{
-			PartitionUUID: msguid.To(rawSig),
-		}
-	default:
-		// Unknown
-		h.PartitionMatch = PartitionUnknown{
-			PartitionSignature: rawSig,
-			PartitionFormat:    partitionFormat,
-			SignatureType:      signatureType,
-		}
-	}
-	return &h, nil
-}
-
-// FilePath contains a backslash-separated path or part of a path to a file on
-// a filesystem.
-type FilePath string
-
-func (f FilePath) typ() uint8 {
-	return 4
-}
-
-func (f FilePath) subType() uint8 {
-	return 4
-}
-
-func (f FilePath) data() ([]byte, error) {
-	if strings.IndexByte(string(f), 0x00) != -1 {
-		return nil, fmt.Errorf("contains invalid null bytes")
-	}
-	withBackslashes := bytes.ReplaceAll([]byte(f), []byte(`/`), []byte(`\`))
-	out, err := Encoding.NewEncoder().Bytes(withBackslashes)
-	if err != nil {
-		return nil, fmt.Errorf("failed to encode FilePath to UTF-16: %w", err)
-	}
-	return append(out, 0x00, 0x00), nil
-}
-
-func unmarshalFilePath(data []byte) (DevicePathElem, error) {
-	if len(data) < 2 {
-		return nil, fmt.Errorf("FilePath must be at least 2 bytes because of UTF-16 null terminator")
-	}
-	out, err := Encoding.NewDecoder().Bytes(data)
-	if err != nil {
-		return nil, fmt.Errorf("error decoding FilePath UTF-16 string: %w", err)
-	}
-	nullIdx := bytes.IndexByte(out, 0x00)
-	if nullIdx != len(out)-1 {
-		return nil, fmt.Errorf("FilePath not properly null-terminated")
-	}
-	withoutBackslashes := strings.ReplaceAll(string(out[:len(out)-1]), `\`, `/`)
-	return FilePath(withoutBackslashes), nil
-}
-
-// Map key contains type and subtype
-var pathElementUnmarshalMap = map[[2]byte]pathElemUnmarshalFunc{
-	{4, 1}: unmarshalHardDrivePath,
-	{4, 4}: unmarshalFilePath,
-}
-
-// UnknownPath is a generic structure for all types of path elements not
-// understood by this library. The UEFI-specified set of path element
-// types is vast and mostly unused, this generic type allows for parsing as
-// well as pass-through of not-understood path elements.
-type UnknownPath struct {
-	TypeVal    uint8
-	SubTypeVal uint8
-	DataVal    []byte
-}
-
-func (u UnknownPath) typ() uint8 {
-	return u.TypeVal
-}
-
-func (u UnknownPath) subType() uint8 {
-	return u.SubTypeVal
-}
-
-func (u UnknownPath) data() ([]byte, error) {
-	return u.DataVal, nil
-}
-
-// Marshal encodes the device path in binary form.
-func (d DevicePath) Marshal() ([]byte, error) {
-	var buf []byte
-	for _, p := range d {
-		buf = append(buf, p.typ(), p.subType())
-		elemBuf, err := p.data()
-		if err != nil {
-			return nil, fmt.Errorf("failed marshaling path element: %w", err)
-		}
-		// 4 is size of header which is included in length field
-		if len(elemBuf)+4 > math.MaxUint16 {
-			return nil, fmt.Errorf("path element payload over maximum size")
-		}
-		buf = append16(buf, uint16(len(elemBuf)+4))
-		buf = append(buf, elemBuf...)
-	}
-	// End of device path (Type 0x7f, SubType 0xFF)
-	buf = append(buf, 0x7f, 0xff, 0x04, 0x00)
-	return buf, nil
-}
-
-// UnmarshalDevicePath parses a binary device path until it encounters an end
-// device path structure. It returns that device path (excluding the final end
-// device path marker) as well as all all data following the end marker.
-func UnmarshalDevicePath(data []byte) (DevicePath, []byte, error) {
-	rest := data
-	var p DevicePath
-	for {
-		if len(rest) < 4 {
-			if len(rest) != 0 {
-				return nil, nil, fmt.Errorf("dangling bytes at the end of device path: %x", rest)
-			}
-			break
-		}
-		t := rest[0]
-		subT := rest[1]
-		dataLen := binary.LittleEndian.Uint16(rest[2:4])
-		if int(dataLen) > len(rest) {
-			return nil, nil, fmt.Errorf("path element larger than rest of buffer: %d > %d", dataLen, len(rest))
-		}
-		if dataLen < 4 {
-			return nil, nil, fmt.Errorf("path element must be at least 4 bytes (header), length indicates %d", dataLen)
-		}
-		elemData := rest[4:dataLen]
-		rest = rest[dataLen:]
-
-		// End of Device Path
-		if t == 0x7f && subT == 0xff {
-			return p, rest, nil
-		}
-
-		unmarshal, ok := pathElementUnmarshalMap[[2]byte{t, subT}]
-		if !ok {
-			p = append(p, &UnknownPath{
-				TypeVal:    t,
-				SubTypeVal: subT,
-				DataVal:    elemData,
-			})
-			continue
-		}
-		elem, err := unmarshal(elemData)
-		if err != nil {
-			return nil, nil, fmt.Errorf("failed decoding path element %d: %w", len(p), err)
-		}
-		p = append(p, elem)
-	}
-	if len(p) == 0 {
-		return nil, nil, errors.New("empty DevicePath without End Of Path element")
-	}
-	return nil, nil, fmt.Errorf("got DevicePath with %d elements, but without End Of Path element", len(p))
-}
diff --git a/metropolis/pkg/efivarfs/devicepath_test.go b/metropolis/pkg/efivarfs/devicepath_test.go
deleted file mode 100644
index ad81279..0000000
--- a/metropolis/pkg/efivarfs/devicepath_test.go
+++ /dev/null
@@ -1,93 +0,0 @@
-package efivarfs
-
-import (
-	"bytes"
-	"testing"
-
-	"github.com/google/uuid"
-)
-
-func TestMarshalExamples(t *testing.T) {
-	cases := []struct {
-		name        string
-		path        DevicePath
-		expected    []byte
-		expectError bool
-	}{
-		{
-			name: "TestNone",
-			path: DevicePath{},
-			expected: []byte{
-				0x7f, 0xff, // End of HW device path
-				0x04, 0x00, // Length: 4 bytes
-			},
-		},
-		{
-			// From UEFI Device Path Examples, extracted single entry
-			name: "TestHD",
-			path: DevicePath{
-				&HardDrivePath{
-					PartitionNumber:     1,
-					PartitionStartBlock: 0x22,
-					PartitionSizeBlocks: 0x2710000,
-					PartitionMatch: PartitionGPT{
-						PartitionUUID: uuid.MustParse("15E39A00-1DD2-1000-8D7F-00A0C92408FC"),
-					},
-				},
-			},
-			expected: []byte{
-				0x04, 0x01, // Hard Disk type
-				0x2a, 0x00, // Length
-				0x01, 0x00, 0x00, 0x00, // Partition Number
-				0x22, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Part Start
-				0x00, 0x00, 0x71, 0x02, 0x00, 0x00, 0x00, 0x00, // Part Size
-				0x00, 0x9a, 0xe3, 0x15, 0xd2, 0x1d, 0x00, 0x10,
-				0x8d, 0x7f, 0x00, 0xa0, 0xc9, 0x24, 0x08, 0xfc, // Signature
-				0x02,       // Part Format GPT
-				0x02,       // Signature GPT
-				0x7f, 0xff, // End of HW device path
-				0x04, 0x00, // Length: 4 bytes
-			},
-		},
-		{
-			name: "TestFilePath",
-			path: DevicePath{
-				FilePath("asdf"),
-			},
-			expected: []byte{
-				0x04, 0x04, // File Path type
-				0x0e, 0x00, // Length
-				'a', 0x00, 's', 0x00, 'd', 0x00, 'f', 0x00,
-				0x00, 0x00,
-				0x7f, 0xff, // End of HW device path
-				0x04, 0x00, // Length: 4 bytes
-			},
-		},
-	}
-
-	for _, c := range cases {
-		t.Run(c.name, func(t *testing.T) {
-			got, err := c.path.Marshal()
-			if err != nil && !c.expectError {
-				t.Fatalf("unexpected error: %v", err)
-			}
-			if err == nil && c.expectError {
-				t.Fatalf("expected error, got %x", got)
-			}
-			if err != nil && c.expectError {
-				// Do not compare result in case error is expected
-				return
-			}
-			if !bytes.Equal(got, c.expected) {
-				t.Fatalf("expected %x, got %x", c.expected, got)
-			}
-			_, rest, err := UnmarshalDevicePath(got)
-			if err != nil {
-				t.Errorf("failed to unmarshal value again: %v", err)
-			}
-			if len(rest) != 0 {
-				t.Errorf("rest is non-zero after single valid device path: %x", rest)
-			}
-		})
-	}
-}
diff --git a/metropolis/pkg/efivarfs/efivarfs.go b/metropolis/pkg/efivarfs/efivarfs.go
deleted file mode 100644
index ab6da26..0000000
--- a/metropolis/pkg/efivarfs/efivarfs.go
+++ /dev/null
@@ -1,164 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package efivarfs provides functions to read and manipulate UEFI runtime
-// variables. It uses Linux's efivarfs [1] to access the variables and all
-// functions generally require that this is mounted at
-// "/sys/firmware/efi/efivars".
-//
-// [1] https://www.kernel.org/doc/html/latest/filesystems/efivarfs.html
-package efivarfs
-
-import (
-	"encoding/binary"
-	"errors"
-	"fmt"
-	"io/fs"
-	"os"
-	"strings"
-
-	"github.com/google/uuid"
-	"golang.org/x/text/encoding/unicode"
-)
-
-const (
-	Path = "/sys/firmware/efi/efivars"
-)
-
-var (
-	// ScopeGlobal is the scope of variables defined by the EFI specification
-	// itself.
-	ScopeGlobal = uuid.MustParse("8be4df61-93ca-11d2-aa0d-00e098032b8c")
-	// ScopeSystemd is the scope of variables defined by Systemd/bootspec.
-	ScopeSystemd = uuid.MustParse("4a67b082-0a4c-41cf-b6c7-440b29bb8c4f")
-)
-
-// Encoding defines the Unicode encoding used by UEFI, which is UCS-2 Little
-// Endian. For BMP characters UTF-16 is equivalent to UCS-2. See the UEFI
-// Spec 2.9, Sections 33.2.6 and 1.8.1.
-var Encoding = unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM)
-
-// Attribute contains a bitset of EFI variable attributes.
-type Attribute uint32
-
-const (
-	// If set the value of the variable is is persistent across resets and
-	// power cycles. Variables without this set cannot be created or modified
-	// after UEFI boot services are terminated.
-	AttrNonVolatile Attribute = 1 << iota
-	// If set allows access to this variable from UEFI boot services.
-	AttrBootserviceAccess
-	// If set allows access to this variable from an operating system after
-	// UEFI boot services are terminated. Variables setting this must also
-	// set AttrBootserviceAccess. This is automatically taken care of by Write
-	// in this package.
-	AttrRuntimeAccess
-	// Marks a variable as being a hardware error record. See UEFI 2.10 section
-	// 8.2.8 for more information about this.
-	AttrHardwareErrorRecord
-	// Deprecated, should not be used for new variables.
-	AttrAuthenticatedWriteAccess
-	// Variable requires special authentication to write. These variables
-	// cannot be written with this package.
-	AttrTimeBasedAuthenticatedWriteAccess
-	// If set in a Write() call, tries to append the data instead of replacing
-	// it completely.
-	AttrAppendWrite
-	// Variable requires special authentication to access and write. These
-	// variables cannot be accessed with this package.
-	AttrEnhancedAuthenticatedAccess
-)
-
-func varPath(scope uuid.UUID, varName string) string {
-	return fmt.Sprintf("/sys/firmware/efi/efivars/%s-%s", varName, scope.String())
-}
-
-// Write writes the value of the named variable in the given scope.
-func Write(scope uuid.UUID, varName string, attrs Attribute, value []byte) error {
-	// Write attributes, see @linux//Documentation/filesystems:efivarfs.rst for format
-	f, err := os.OpenFile(varPath(scope, varName), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
-	if err != nil {
-		e := err
-		// Unwrap PathError here as we wrap our own parameter message around it
-		var perr *fs.PathError
-		if errors.As(err, &perr) {
-			e = perr.Err
-		}
-		return fmt.Errorf("writing %q in scope %s: %w", varName, scope, e)
-	}
-	// Required by UEFI 2.10 Section 8.2.3:
-	// Runtime access to a data variable implies boot service access. Attributes
-	// that have EFI_VARIABLE_RUNTIME_ACCESS set must also have
-	// EFI_VARIABLE_BOOTSERVICE_ACCESS set. The caller is responsible for
-	// following this rule.
-	if attrs&AttrRuntimeAccess != 0 {
-		attrs |= AttrBootserviceAccess
-	}
-	// Linux wants everything in on write, so assemble an intermediate buffer
-	buf := make([]byte, len(value)+4)
-	binary.LittleEndian.PutUint32(buf[:4], uint32(attrs))
-	copy(buf[4:], value)
-	_, err = f.Write(buf)
-	if err1 := f.Close(); err1 != nil && err == nil {
-		err = err1
-	}
-	return err
-}
-
-// Read reads the value of the named variable in the given scope.
-func Read(scope uuid.UUID, varName string) ([]byte, Attribute, error) {
-	val, err := os.ReadFile(varPath(scope, varName))
-	if err != nil {
-		e := err
-		// Unwrap PathError here as we wrap our own parameter message around it
-		var perr *fs.PathError
-		if errors.As(err, &perr) {
-			e = perr.Err
-		}
-		return nil, Attribute(0), fmt.Errorf("reading %q in scope %s: %w", varName, scope, e)
-	}
-	if len(val) < 4 {
-		return nil, Attribute(0), fmt.Errorf("reading %q in scope %s: malformed, less than 4 bytes long", varName, scope)
-	}
-	return val[4:], Attribute(binary.LittleEndian.Uint32(val[:4])), nil
-}
-
-// List lists all variable names present for a given scope sorted by their names
-// in Go's "native" string sort order.
-func List(scope uuid.UUID) ([]string, error) {
-	vars, err := os.ReadDir(Path)
-	if err != nil {
-		return nil, fmt.Errorf("failed to list variable directory: %w", err)
-	}
-	var outVarNames []string
-	suffix := fmt.Sprintf("-%v", scope)
-	for _, v := range vars {
-		if v.IsDir() {
-			continue
-		}
-		if !strings.HasSuffix(v.Name(), suffix) {
-			continue
-		}
-		outVarNames = append(outVarNames, strings.TrimSuffix(v.Name(), suffix))
-	}
-	return outVarNames, nil
-}
-
-// Delete deletes the given variable name in the given scope. Use with care,
-// some firmware fails to boot if variables it uses are deleted.
-func Delete(scope uuid.UUID, varName string) error {
-	return os.Remove(varPath(scope, varName))
-}
diff --git a/metropolis/pkg/efivarfs/variables.go b/metropolis/pkg/efivarfs/variables.go
deleted file mode 100644
index fe59324..0000000
--- a/metropolis/pkg/efivarfs/variables.go
+++ /dev/null
@@ -1,146 +0,0 @@
-package efivarfs
-
-import (
-	"bytes"
-	"encoding/binary"
-	"errors"
-	"fmt"
-	"io/fs"
-	"math"
-	"regexp"
-	"strconv"
-
-	"github.com/google/uuid"
-)
-
-func decodeString(varData []byte) (string, error) {
-	efiStringRaw, err := Encoding.NewDecoder().Bytes(varData)
-	if err != nil {
-		// Pass the decoding error unwrapped.
-		return "", err
-	}
-	// Remove the null suffix.
-	return string(bytes.TrimSuffix(efiStringRaw, []byte{0})), nil
-}
-
-// ReadLoaderDevicePartUUID reads the ESP UUID from an EFI variable.
-func ReadLoaderDevicePartUUID() (uuid.UUID, error) {
-	efiVar, _, err := Read(ScopeSystemd, "LoaderDevicePartUUID")
-	if err != nil {
-		return uuid.Nil, err
-	}
-	strContent, err := decodeString(efiVar)
-	if err != nil {
-		return uuid.Nil, fmt.Errorf("decoding string failed: %w", err)
-	}
-	out, err := uuid.Parse(strContent)
-	if err != nil {
-		return uuid.Nil, fmt.Errorf("value in LoaderDevicePartUUID could not be parsed as UUID: %w", err)
-	}
-	return out, nil
-}
-
-// Technically UEFI mandates that only upper-case hex indices are valid, but in
-// practice even vendors themselves ship firmware with lowercase hex indices,
-// thus accept these here as well.
-var bootVarRegexp = regexp.MustCompile(`^Boot([0-9A-Fa-f]{4})$`)
-
-// AddBootEntry creates an new EFI boot entry variable and returns its
-// non-negative index on success.
-func AddBootEntry(be *LoadOption) (int, error) {
-	varNames, err := List(ScopeGlobal)
-	if err != nil {
-		return -1, fmt.Errorf("failed to list EFI variables: %w", err)
-	}
-	presentEntries := make(map[int]bool)
-	// Technically these are sorted, but due to the lower/upper case issue
-	// we cannot rely on this fact.
-	for _, varName := range varNames {
-		s := bootVarRegexp.FindStringSubmatch(varName)
-		if s == nil {
-			continue
-		}
-		idx, err := strconv.ParseUint(s[1], 16, 16)
-		if err != nil {
-			// This cannot be hit as all regexp matches are parseable.
-			// A quick fuzz run agrees.
-			panic(err)
-		}
-		presentEntries[int(idx)] = true
-	}
-	idx := -1
-	for i := 0; i < math.MaxUint16; i++ {
-		if !presentEntries[i] {
-			idx = i
-			break
-		}
-	}
-	if idx == -1 {
-		return -1, errors.New("all 2^16 boot entry variables are occupied")
-	}
-
-	err = SetBootEntry(idx, be)
-	if err != nil {
-		return -1, fmt.Errorf("failed to set new boot entry: %w", err)
-	}
-	return idx, nil
-}
-
-// GetBootEntry returns the boot entry at the given index.
-func GetBootEntry(idx int) (*LoadOption, error) {
-	raw, _, err := Read(ScopeGlobal, fmt.Sprintf("Boot%04X", idx))
-	if errors.Is(err, fs.ErrNotExist) {
-		// Try non-spec-conforming lowercase entry
-		raw, _, err = Read(ScopeGlobal, fmt.Sprintf("Boot%04x", idx))
-	}
-	if err != nil {
-		return nil, err
-	}
-	return UnmarshalLoadOption(raw)
-}
-
-// SetBootEntry writes the given boot entry to the given index.
-func SetBootEntry(idx int, be *LoadOption) error {
-	bem, err := be.Marshal()
-	if err != nil {
-		return fmt.Errorf("while marshaling the EFI boot entry: %w", err)
-	}
-	return Write(ScopeGlobal, fmt.Sprintf("Boot%04X", idx), AttrNonVolatile|AttrRuntimeAccess, bem)
-}
-
-// DeleteBootEntry deletes the boot entry at the given index.
-func DeleteBootEntry(idx int) error {
-	err := Delete(ScopeGlobal, fmt.Sprintf("Boot%04X", idx))
-	if errors.Is(err, fs.ErrNotExist) {
-		// Try non-spec-conforming lowercase entry
-		err = Delete(ScopeGlobal, fmt.Sprintf("Boot%04x", idx))
-	}
-	return err
-}
-
-// SetBootOrder replaces contents of the boot order variable with the order
-// specified in ord.
-func SetBootOrder(ord BootOrder) error {
-	return Write(ScopeGlobal, "BootOrder", AttrNonVolatile|AttrRuntimeAccess, ord.Marshal())
-}
-
-// GetBootOrder returns the current boot order of the system.
-func GetBootOrder() (BootOrder, error) {
-	raw, _, err := Read(ScopeGlobal, "BootOrder")
-	if err != nil {
-		return nil, err
-	}
-	ord, err := UnmarshalBootOrder(raw)
-	if err != nil {
-		return nil, fmt.Errorf("invalid boot order structure: %w", err)
-	}
-	return ord, nil
-}
-
-// SetBootNext sets the boot entry used for the next boot only. It automatically
-// resets after the next boot.
-func SetBootNext(entryIdx uint16) error {
-	data := make([]byte, 2)
-	binary.LittleEndian.PutUint16(data, entryIdx)
-	return Write(ScopeGlobal, "BootNext", AttrNonVolatile|AttrRuntimeAccess, data)
-}
diff --git a/metropolis/pkg/erofs/BUILD.bazel b/metropolis/pkg/erofs/BUILD.bazel
deleted file mode 100644
index ce1622c..0000000
--- a/metropolis/pkg/erofs/BUILD.bazel
+++ /dev/null
@@ -1,37 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
-load("//metropolis/test/ktest:ktest.bzl", "ktest")
-
-go_library(
-    name = "erofs",
-    srcs = [
-        "compression.go",
-        "defs.go",
-        "erofs.go",
-        "inode_types.go",
-        "uncompressed_inode_writer.go",
-    ],
-    importpath = "source.monogon.dev/metropolis/pkg/erofs",
-    visibility = ["//visibility:public"],
-    deps = ["@org_golang_x_sys//unix"],
-)
-
-go_test(
-    name = "erofs_test",
-    srcs = [
-        "compression_test.go",
-        "defs_test.go",
-        "erofs_test.go",
-    ],
-    embed = [":erofs"],
-    pure = "on",  # keep
-    deps = [
-        "@com_github_stretchr_testify//assert",
-        "@com_github_stretchr_testify//require",
-        "@org_golang_x_sys//unix",
-    ],
-)
-
-ktest(
-    cmdline = "ramdisk_size=128",
-    tester = ":erofs_test",
-)
diff --git a/metropolis/pkg/erofs/compression.go b/metropolis/pkg/erofs/compression.go
deleted file mode 100644
index dca9946..0000000
--- a/metropolis/pkg/erofs/compression.go
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package erofs
-
-// This file contains compression-related functions.
-// TODO(lorenz): Fully implement compression. These are currently unused.
-
-import "encoding/binary"
-
-// mapHeader is a legacy but still-used advisory structure at the start of a
-// compressed VLE block. It contains constant values as annotated.
-type mapHeader struct {
-	Reserved      uint32 // 0
-	Advise        uint16 // 1
-	AlgorithmType uint8  // 0
-	ClusterBits   uint8  // 0
-}
-
-// encodeSmallVLEBlock encodes two VLE extents into a 8 byte block.
-func encodeSmallVLEBlock(vals [2]uint16, blkaddr uint32) [8]byte {
-	var out [8]byte
-	binary.LittleEndian.PutUint16(out[0:2], vals[0])
-	binary.LittleEndian.PutUint16(out[2:4], vals[1])
-	binary.LittleEndian.PutUint32(out[4:8], blkaddr)
-	return out
-}
-
-// encodeBigVLEBlock encodes 16 VLE extents into a 32 byte block.
-func encodeBigVLEBlock(vals [16]uint16, blkaddr uint32) [32]byte {
-	var out [32]byte
-	for i, val := range vals {
-		if val > 1<<14 {
-			panic("value is bigger than 14 bits, cannot encode")
-		}
-		// Writes packed 14 bit unsigned integers
-		pos := i * 14
-		bitStartPos := pos % 8
-		byteStartPos := pos / 8
-		out[byteStartPos] = out[byteStartPos]&((1<<bitStartPos)-1) | uint8(val<<bitStartPos)
-		out[byteStartPos+1] = uint8(val >> (8 - bitStartPos))
-		out[byteStartPos+2] = uint8(val >> (16 - bitStartPos))
-	}
-	binary.LittleEndian.PutUint32(out[28:32], blkaddr)
-	return out
-}
diff --git a/metropolis/pkg/erofs/compression_test.go b/metropolis/pkg/erofs/compression_test.go
deleted file mode 100644
index 8d5d656..0000000
--- a/metropolis/pkg/erofs/compression_test.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package erofs
-
-import (
-	"reflect"
-	"testing"
-)
-
-func TestEncodeSmallVLEBlock(t *testing.T) {
-	type args struct {
-		vals    [2]uint16
-		blkaddr uint32
-	}
-	tests := []struct {
-		name string
-		args args
-		want [8]byte
-	}{
-		{
-			name: "Reference",
-			args: args{vals: [2]uint16{vleClusterTypeHead | 1527, vleClusterTypeNonhead | 1}, blkaddr: 1},
-			want: [8]byte{0xf7, 0x15, 0x01, 0x20, 0x01, 0x00, 0x00, 0x00},
-		},
-	}
-	for _, tt := range tests {
-		t.Run(tt.name, func(t *testing.T) {
-			if got := encodeSmallVLEBlock(tt.args.vals, tt.args.blkaddr); !reflect.DeepEqual(got, tt.want) {
-				t.Errorf("encodeSmallVLEBlock() = %v, want %v", got, tt.want)
-			}
-		})
-	}
-}
-
-func TestEncodeBigVLEBlock(t *testing.T) {
-	type args struct {
-		vals    [16]uint16
-		blkaddr uint32
-	}
-	tests := []struct {
-		name string
-		args args
-		want [32]byte
-	}{
-		{
-			name: "Reference",
-			args: args{
-				vals: [16]uint16{
-					vleClusterTypeNonhead | 2,
-					vleClusterTypeHead | 1460,
-					vleClusterTypeNonhead | 1,
-					vleClusterTypeNonhead | 2,
-					vleClusterTypeHead | 2751,
-					vleClusterTypeNonhead | 1,
-					vleClusterTypeNonhead | 2,
-					vleClusterTypeHead | 940,
-					vleClusterTypeNonhead | 1,
-					vleClusterTypeHead | 3142,
-					vleClusterTypeNonhead | 1,
-					vleClusterTypeNonhead | 2,
-					vleClusterTypeHead | 1750,
-					vleClusterTypeNonhead | 1,
-					vleClusterTypeNonhead | 2,
-					vleClusterTypeHead | 683,
-				},
-				blkaddr: 3,
-			},
-			want: [32]byte{0x02, 0x20, 0x6d, 0x15, 0x00, 0x0a, 0x80, 0xbf, 0x5a, 0x00, 0x28, 0x00, 0xb2, 0x4e, 0x01, 0xa0, 0x11, 0x17, 0x00, 0x0a, 0x80, 0xd6, 0x56, 0x00, 0x28, 0x00, 0xae, 0x4a, 0x03, 0x00, 0x00, 0x00}},
-	}
-	for _, tt := range tests {
-		t.Run(tt.name, func(t *testing.T) {
-			if got := encodeBigVLEBlock(tt.args.vals, tt.args.blkaddr); !reflect.DeepEqual(got, tt.want) {
-				t.Errorf("encodeBigVLEBlock() = %v, want %v", got, tt.want)
-			}
-		})
-	}
-}
diff --git a/metropolis/pkg/erofs/defs.go b/metropolis/pkg/erofs/defs.go
deleted file mode 100644
index 85898bf..0000000
--- a/metropolis/pkg/erofs/defs.go
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package erofs
-
-// This file contains definitions coming from the in-Kernel implementation of
-// the EROFS filesystem.  All definitions come from @linux//fs/erofs:erofs_fs.h
-// unless stated otherwise.
-
-// Magic contains the 4 magic bytes starting at position 1024 identifying an
-// EROFS filesystem.  Defined in @linux//include/uapi/linux/magic.h
-// EROFS_SUPER_MAGIC_V1
-var Magic = [4]byte{0xe2, 0xe1, 0xf5, 0xe0}
-
-const blockSizeBits = 12
-const BlockSize = 1 << blockSizeBits
-
-// Defined in @linux//include/linux:fs_types.h starting at FT_UNKNOWN
-const (
-	fileTypeUnknown = iota
-	fileTypeRegularFile
-	fileTypeDirectory
-	fileTypeCharacterDevice
-	fileTypeBlockDevice
-	fileTypeFIFO
-	fileTypeSocket
-	fileTypeSymbolicLink
-)
-
-// Anonymous enum starting at EROFS_INODE_FLAT_PLAIN
-const (
-	inodeFlatPlain             = 0
-	inodeFlatCompressionLegacy = 1
-	inodeFlatInline            = 2
-	inodeFlatCompression       = 3
-)
-
-// struct erofs_dirent
-type directoryEntryRaw struct {
-	NodeNumber      uint64
-	NameStartOffset uint16
-	FileType        uint8
-	Reserved        uint8
-}
-
-// struct erofs_super_block
-type superblock struct {
-	Magic                [4]byte
-	Checksum             uint32
-	FeatureCompat        uint32
-	BlockSizeBits        uint8
-	Reserved0            uint8
-	RootNodeNumber       uint16
-	TotalInodes          uint64
-	BuildTimeSeconds     uint64
-	BuildTimeNanoseconds uint32
-	Blocks               uint32
-	MetaStartAddr        uint32
-	SharedXattrStartAddr uint32
-	UUID                 [16]byte
-	VolumeName           [16]byte
-	FeaturesIncompatible uint32
-	Reserved1            [44]byte
-}
-
-// struct erofs_inode_compact
-type inodeCompact struct {
-	Format         uint16
-	XattrCount     uint16
-	Mode           uint16
-	HardlinkCount  uint16
-	Size           uint32
-	Reserved0      uint32
-	Union          uint32
-	InodeNumCompat uint32
-	UID            uint16
-	GID            uint16
-	Reserved1      uint32
-}
-
-// Anonymous enum starting at Z_EROFS_VLE_CLUSTER_TYPE_PLAIN
-const (
-	vleClusterTypePlain = iota << 12
-	vleClusterTypeHead
-	vleClusterTypeNonhead
-)
diff --git a/metropolis/pkg/erofs/defs_test.go b/metropolis/pkg/erofs/defs_test.go
deleted file mode 100644
index 1d31bff..0000000
--- a/metropolis/pkg/erofs/defs_test.go
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package erofs
-
-import (
-	"bytes"
-	"encoding/binary"
-	"testing"
-
-	"github.com/stretchr/testify/assert"
-)
-
-// These test that the specified structures serialize to the same number of
-// bytes as the ones in the EROFS kernel module.
-
-func TestSuperblockSize(t *testing.T) {
-	var buf bytes.Buffer
-	if err := binary.Write(&buf, binary.LittleEndian, &superblock{}); err != nil {
-		t.Fatalf("failed to write superblock: %v", err)
-	}
-	assert.Equal(t, 128, buf.Len())
-}
-
-func TestDirectoryEntrySize(t *testing.T) {
-	var buf bytes.Buffer
-	if err := binary.Write(&buf, binary.LittleEndian, &directoryEntryRaw{}); err != nil {
-		t.Fatalf("failed to write directory entry: %v", err)
-	}
-	assert.Equal(t, 12, buf.Len())
-}
-
-func TestInodeCompactSize(t *testing.T) {
-	var buf bytes.Buffer
-	if err := binary.Write(&buf, binary.LittleEndian, &inodeCompact{}); err != nil {
-		t.Fatalf("failed to write compact inode: %v", err)
-	}
-	assert.Equal(t, 32, buf.Len())
-}
diff --git a/metropolis/pkg/erofs/doc.md b/metropolis/pkg/erofs/doc.md
deleted file mode 100644
index 1d4c29e..0000000
--- a/metropolis/pkg/erofs/doc.md
+++ /dev/null
@@ -1,68 +0,0 @@
-# EROFS Primer
-EROFS is a relatively modern (Linux 5.3+) filesystem optimized for fast read-only use. Similar to squashfs
-and cramfs EROFS filesystems have no write support in the kernel and can only be created by external tools.
-Both squashfs and cramfs are extremely optimized towards achieving minimal size, to the detriment of performance.
-For modern server use both of them are unacceptably slow as they support limited concurrency, make inefficient
-use of the page cache and read in weird block sizes. EROFS is designed to replace them on modern, fast hardware
-and generally exceeds Ext4 in performance by leveraging the fact that it is read-only. It supports compression,
-but only in fixed disk-aligned chunks and using LZ4 for maximum performance.
-
-Sadly the existing tooling to create EROFS filesystems (erofs-utils's mkfs.erofs) can only pack up single
-folders which does not work in a build process as it would both require root access to get file ownership
-and device nodes correct as well as a complete content copy of all relevant files which is bad as it lies
-on the critical path of the image build process. Adopting mkfs.erofs for a spec-driven build process
-basically amounts to a rewrite as the "library" part of it also directly reads directories and thus cannot
-be used directly.
-
-As reusing the old code proved to be more effort than it's worth, this library was born. Sadly upstream EROFS
-has basically no documentation beyond a few trivial diagrams that describe how exactly the filesystem is
-constructed. This document holds most knowledge I pried from `mkfs.erofs` and the kernel implementation and
-should help people understand the code.
-
-# Blocks
-An EROFS filesystem consists of individual blocks, each of them 4096 bytes (4K) long. Each block can either be a metadata or a data block (but it's not possible to know just by looking at a single block). The first block is always a metadata block and the first 1024 bytes of it are occupied by padding. The next 128 bytes are a Superblock structure. The rest (2944 bytes) is available for normal metadata allocation. Blocks are numbered from zero.
-
-# Superblock
-As mentionend in the previous section about blocks the superblock is not actually a block in EROFS, but a 128 byte-sized structure 1024 bytes into the first block. Most fields don't need to be set and don't matter. The `BuildTimeSeconds` and `BuildTimeNanoseconds` fields determine the ctime, atime and mtime of all inodes which are in compact structure. This library leaves them at zero which results in all files having a creation time of 1.1.1970 (Unix zero). This is similar to what Bazel does for archives. The only fields which need be filled out are the magic bytes, the block size in bits (only 12 for 1 << 12 = 4096 is supported though) and the root `nid` which points to the inode structure of the root inode (which needs to be a directory for the EROFS to be mountable). The root inode cannot be everywhere on the disk as the integer size of the field is only 16 bits, whereas a normal nid field everywhere else is 32 bits. So in general the root directory immediately follows the superblock and thus has `nid` (1024 + 128)/32 = 36.
-
-# Inodes
-Inodes all have a common inode structure which exists in both compact (32 bytes) and extended (64 bytes) form. There's no fundamental difference, the extended form can store more metadata and has a bigger maximum file size (2^64 vs 2^32). All inode structures are aligned to 32 bytes. Through this alignment they are identifiable by a so-called `nid` which is simply their offset in bytes to the start of the filesystem divided by their alignment (32). Certain inodes (inline and compressed, a variant of inline) also store data immediately following the inode. The inode structure and its optional following data are allocated in metadata blocks. If there's no metadata block with enough free bytes to accomodate the inode, a new block is allocated and filled with that inode structure and its following data.
-
-EROFS has three on-disk inode layouts that are in use:
-
-## Plain Inodes
-These consist only of a single inode structure (compact, 32 bytes or extended, 64 bytes) in the metadata area, and zero or more filled data blocks (empty inodes are always plain). All data blocks are consecutive and the Union value of the inode contains the block number of the first data block. The `Size` value contains the size of the content in bytes, not including the inode structure itself. The number of data blocks is determined by dividing the `Size` value of the inode by the block size and rounding up (see next paragraph why rounding up is necessary). The data blocks do not need to be adjacent to the metadata block the inode is in.
-
-## Inodes with inline data
-These are similar to plain inodes but also work for inode content sizes not neatly divisible by the block size. The leftover data at the end of the inode content that didn't fit into a whole data block is placed in the metadata area directly following the inode itself. How many bytes are appended to the inode is again determined by looking at the inode structure's `Size` value and calculating the remainder when divided by the block size (4096 bytes). The number of blocks is the result of the integer division of these numbers. As with the plain inodes the full blocks don't need to be adjacent to the metadata block.
-
-An inline inode can thus occupy more than a whole metadata block (32 bytes inode + 4095 bytes of data that didn't fit into a full block). This special case is handled by detecting that the inode plus the inline data would exceed a full metadata block (4096) and converting to a plain inode with an additional data block which is zero-padded. This is done specifically when (inode_content_size % 4096) + inode_self_size > 4096. Thus if this special case has happened can also be determined just from the inode size value.
-
-## Compressed inodes
-EROFS supports what they call Variable-Length Extents. These are normal plain inodes or inodes with inline data, but instead of the data itself they contain a metadata structure beginning with a `MapHeader` which is mostly there for legacy reasons and always contains the same data. Then follow compressed VLE meta blocks, which contain either 2 or 16 packed 14 bit integers and a on-disk block number. For alignment reasons the first 6 VLE meta integers are always packed into the 2 integer structures. All following complete blocks of 16 VLE meta integers get packed into 16 byte together with their starting block number. Anything that's left over gets once again packed into 2 integer structures. Each integer in this compressed sequence of 14 bit integers represents 4K of uncompressed data. So a file which has an uncompressed size of 4MiB needs 1000 of these integers to be represented, independent of how well it compresses.
-
-Note that VLE meta blocks are treated as content of plain or inline inodes. So if they exceed the maximum inline inode size there will be blocks allocated just for storing VLE meta blocks.
-
-These VLE meta integers integers are divided into 12 lower bits and 2 upper bits. The upper bits determine what the lower 12 bits represent and also how this 4K block of uncompressed data is represented. There's three types: PLAIN, HEAD and NONHEAD. PLAIN means no compression, the block is stored as-is on disk. HEAD means this block is the start of a compressed cluster. Its 12 lower bits represent the offset of the decompressed data with regards to the uncompressed 4K block boundary. NONHEAD means this block is part of the same on-disk block as the last HEAD block when uncompressed. Its lower 12 bits represent the number of blocks until the next HEAD or PLAIN block unless at the end of a VLE meta block (2 or 16 integers), then they represent the distance from the last HEAD or PLAIN block.
-
-Only PLAIN and HEAD blocks have actual on-disk blocks of uncompressed and compressed data respectively. NONHEAD blocks only exist to represent data that's expanded by decompressing. Thus the on-disk block number of a PLAIN or HEAD block can be determined by looking at the on-disk block number of the VLE meta block and incrementing by one for each PLAIN or HEAD block in it. So all data blocks referenced inside one VLE meta block need to be consecutive (but not adjacent to the location of the VLE meta blocks themselves).
-
-# Unix file types and inode layout
-
-## Directories
-Directories are either plain or inline inodes. They have content which consists of 12 byte dirent structures (directoryEntryRaw). These dirent structures contain a `nid` (see Inodes) pointing to the inode structure that represents that child of the directory, a name offset and a file type. The file type is redundant (it is also stored in the child's inode) but needs to be set. Directly following the dirent structures are all names of the children. They are not terminated or aligned. The name offset stored in a dirent is relative to the start of the inode content and marks the first byte of the name for that child. The end can be determined by the name offset of the next dirent or the total size of the inode if it is the last child.
-
-Directories always contain the `.` and `..` children, which need to point to itself and the parent inodes respectively, with the exception that the root directory's parent is defined to be itself. The individual dirents are always sorted according to their name interpreted as bytes to allow for binary searching.
-
-## Symbolic links
-Symbolic links are inline inodes. They have their literal target path as content.
-
-## Device nodes
-Device nodes are always plain inodes. Instead of a content size they have a `dev_t` integer in the `Union` inode struct value encoding the major and minor numbers. The type of device inode (block or character) is determined by the high bits of the Mode value as in standard Unix.
-
-
-## Regular files
-Regular files can be any of the three plain, inline or compressed inodes. The inode content is the content of the file.
-
-## Others
-FIFOs and Sockets are plain inodes with no content and no special fields. They will also be seldomly used in an EROFS.
diff --git a/metropolis/pkg/erofs/erofs.go b/metropolis/pkg/erofs/erofs.go
deleted file mode 100644
index ab87115..0000000
--- a/metropolis/pkg/erofs/erofs.go
+++ /dev/null
@@ -1,282 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package erofs
-
-import (
-	"encoding/binary"
-	"errors"
-	"fmt"
-	"io"
-	"path"
-
-	"golang.org/x/sys/unix"
-)
-
-// Writer writes a new EROFS filesystem.
-type Writer struct {
-	w io.WriteSeeker
-	// fixDirectoryEntry contains for each referenced path where it is
-	// referenced from. Since self-references are required anyways (for the "."
-	// and ".." entries) we let the user write files in any order and just
-	// point the directory entries to the right target nid and file type on
-	// Close().
-	fixDirectoryEntry map[string][]direntFixupLocation
-	pathInodeMeta     map[string]*uncompressedInodeMeta
-	// legacyInodeIndex stores the next legacy (32-bit) inode to be allocated.
-	// 64 bit inodes are automatically calculated by EROFS on mount.
-	legacyInodeIndex    uint32
-	blockAllocatorIndex uint32
-	metadataBlocksFree  metadataBlocksMeta
-}
-
-// NewWriter creates a new EROFS filesystem writer. The given WriteSeeker needs
-// to be at the start.
-func NewWriter(w io.WriteSeeker) (*Writer, error) {
-	erofsWriter := &Writer{
-		w:                 w,
-		fixDirectoryEntry: make(map[string][]direntFixupLocation),
-		pathInodeMeta:     make(map[string]*uncompressedInodeMeta),
-	}
-	_, err := erofsWriter.allocateMetadata(1024+binary.Size(&superblock{}), 0)
-	if err != nil {
-		return nil, fmt.Errorf("cannot allocate first metadata block: %w", err)
-	}
-	if _, err := erofsWriter.w.Write(make([]byte, 1024)); err != nil { // Padding
-		return nil, fmt.Errorf("failed to write initial padding: %w", err)
-	}
-	if err := binary.Write(erofsWriter.w, binary.LittleEndian, &superblock{
-		Magic:         Magic,
-		BlockSizeBits: blockSizeBits,
-		// 1024 (padding) + 128 (superblock) / 32, not eligible for fixup as
-		// different int size
-		RootNodeNumber: 36,
-	}); err != nil {
-		return nil, fmt.Errorf("failed to write superblock: %w", err)
-	}
-	return erofsWriter, nil
-}
-
-// allocateMetadata allocates metadata space of size bytes with a given
-// alignment and seeks to the first byte of the newly-allocated metadata space.
-// It also returns the position of that first byte.
-func (w *Writer) allocateMetadata(size int, alignment uint16) (int64, error) {
-	if size > BlockSize {
-		panic("cannot allocate a metadata object bigger than BlockSize bytes")
-	}
-	sizeU16 := uint16(size)
-	pos, ok := w.metadataBlocksFree.findBlock(sizeU16, alignment)
-	if !ok {
-		blockNumber, err := w.allocateBlocks(1)
-		if err != nil {
-			return 0, fmt.Errorf("failed to allocate additional metadata space: %w", err)
-		}
-		w.metadataBlocksFree = append(w.metadataBlocksFree, metadataBlockMeta{blockNumber: blockNumber, freeBytes: BlockSize - sizeU16})
-		if _, err := w.w.Write(make([]byte, BlockSize)); err != nil {
-			return 0, fmt.Errorf("failed to write metadata: %w", err)
-		}
-		pos = int64(blockNumber) * BlockSize // Always aligned to BlockSize, bigger alignments are unsupported anyways
-	}
-	if _, err := w.w.Seek(pos, io.SeekStart); err != nil {
-		return 0, fmt.Errorf("cannot seek to existing metadata nid, likely misaligned meta write")
-	}
-	return pos, nil
-}
-
-// allocateBlocks allocates n new BlockSize-sized block and seeks to the
-// beginning of the first newly-allocated block.  It also returns the first
-// newly-allocated block number.  The caller is expected to write these blocks
-// completely before calling allocateBlocks again.
-func (w *Writer) allocateBlocks(n uint32) (uint32, error) {
-	if _, err := w.w.Seek(int64(w.blockAllocatorIndex)*BlockSize, io.SeekStart); err != nil {
-		return 0, fmt.Errorf("cannot seek to end of last block, check write alignment: %w", err)
-	}
-	firstBlock := w.blockAllocatorIndex
-	w.blockAllocatorIndex += n
-	return firstBlock, nil
-}
-
-func (w *Writer) create(pathname string, inode Inode) *uncompressedInodeWriter {
-	i := &uncompressedInodeWriter{
-		writer:            w,
-		inode:             *inode.inode(),
-		legacyInodeNumber: w.legacyInodeIndex,
-		pathname:          path.Clean(pathname),
-	}
-	w.legacyInodeIndex++
-	return i
-}
-
-// CreateFile adds a new file to the EROFS. It returns a WriteCloser to which
-// the file contents should be written and which then needs to be closed. The
-// last writer obtained by calling CreateFile() needs to be closed first before
-// opening a new one. The given pathname needs to be referenced by a directory
-// created using Create(), otherwise it will not be accessible.
-func (w *Writer) CreateFile(pathname string, meta *FileMeta) io.WriteCloser {
-	return w.create(pathname, meta)
-}
-
-// Create adds a new non-file inode to the EROFS. This includes directories,
-// device nodes, symlinks and FIFOs.  The first call to Create() needs to be
-// with pathname "." and a directory inode.  The given pathname needs to be
-// referenced by a directory, otherwise it will not be accessible (with the
-// exception of the directory ".").
-func (w *Writer) Create(pathname string, inode Inode) error {
-	iw := w.create(pathname, inode)
-	switch i := inode.(type) {
-	case *Directory:
-		if err := i.writeTo(iw); err != nil {
-			return fmt.Errorf("failed to write directory contents: %w", err)
-		}
-	case *SymbolicLink:
-		if err := i.writeTo(iw); err != nil {
-			return fmt.Errorf("failed to write symbolic link contents: %w", err)
-		}
-	}
-	return iw.Close()
-}
-
-// Close finishes writing an EROFS filesystem. Errors by this function need to
-// be handled as they indicate if the written filesystem is consistent (i.e.
-// there are no directory entries pointing to nonexistent inodes).
-func (w *Writer) Close() error {
-	for targetPath, entries := range w.fixDirectoryEntry {
-		for _, entry := range entries {
-			targetMeta, ok := w.pathInodeMeta[targetPath]
-			if !ok {
-				return fmt.Errorf("failed to link filesystem tree: dangling reference to %v", targetPath)
-			}
-			if err := direntFixup(w.pathInodeMeta[entry.path], int64(entry.entryIndex), targetMeta); err != nil {
-				return err
-			}
-		}
-	}
-	return nil
-}
-
-// uncompressedInodeMeta tracks enough metadata about a written inode to be
-// able to point dirents to it and to provide a WriteSeeker into the inode
-// itself.
-type uncompressedInodeMeta struct {
-	nid   uint64
-	ftype uint8
-
-	// Physical placement metdata
-	blockStart   int64
-	blockLength  int64
-	inlineStart  int64
-	inlineLength int64
-
-	writer        *Writer
-	currentOffset int64
-}
-
-func (a *uncompressedInodeMeta) Seek(offset int64, whence int) (int64, error) {
-	switch whence {
-	case io.SeekCurrent:
-		break
-	case io.SeekStart:
-		a.currentOffset = 0
-	case io.SeekEnd:
-		a.currentOffset = a.blockLength + a.inlineLength
-	}
-	a.currentOffset += offset
-	return a.currentOffset, nil
-}
-
-func (a *uncompressedInodeMeta) Write(p []byte) (int, error) {
-	if a.currentOffset < a.blockLength {
-		// TODO(lorenz): Handle the special case where a directory inode is
-		// spread across multiple blocks (depending on other factors this
-		// occurs around ~200 direct children).
-		return 0, errors.New("relocating dirents in multi-block directory inodes is unimplemented")
-	}
-	if _, err := a.writer.w.Seek(a.inlineStart+a.currentOffset, io.SeekStart); err != nil {
-		return 0, err
-	}
-	a.currentOffset += int64(len(p))
-	return a.writer.w.Write(p)
-}
-
-type direntFixupLocation struct {
-	path       string
-	entryIndex uint16
-}
-
-// direntFixup overrides nid and file type from the path the dirent is pointing
-// to. The given iw is expected to be at the start of the dirent inode to be
-// fixed up.
-func direntFixup(iw io.WriteSeeker, entryIndex int64, meta *uncompressedInodeMeta) error {
-	if _, err := iw.Seek(entryIndex*12, io.SeekStart); err != nil {
-		return fmt.Errorf("failed to seek to dirent: %w", err)
-	}
-	if err := binary.Write(iw, binary.LittleEndian, meta.nid); err != nil {
-		return fmt.Errorf("failed to write nid: %w", err)
-	}
-	if _, err := iw.Seek(2, io.SeekCurrent); err != nil { // Skip NameStartOffset
-		return fmt.Errorf("failed to seek to dirent: %w", err)
-	}
-	if err := binary.Write(iw, binary.LittleEndian, meta.ftype); err != nil {
-		return fmt.Errorf("failed to write ftype: %w", err)
-	}
-	return nil
-}
-
-type metadataBlockMeta struct {
-	blockNumber uint32
-	freeBytes   uint16
-}
-
-// metadataBlocksMeta contains metadata about all metadata blocks, most
-// importantly the amount of free bytes in each block. This is not a map for
-// reproducibility (map ordering).
-type metadataBlocksMeta []metadataBlockMeta
-
-// findBlock returns the absolute position where `size` bytes with the
-// specified alignment can still fit.  If there is not enough space in any
-// metadata block it returns false as the second return value.
-func (m metadataBlocksMeta) findBlock(size uint16, alignment uint16) (int64, bool) {
-	for i, blockMeta := range m {
-		freeBytesAligned := blockMeta.freeBytes
-		if alignment > 0 {
-			freeBytesAligned = blockMeta.freeBytes - (blockMeta.freeBytes % alignment)
-		}
-		if freeBytesAligned > size {
-			m[i] = metadataBlockMeta{
-				blockNumber: blockMeta.blockNumber,
-				freeBytes:   freeBytesAligned - size,
-			}
-			pos := int64(blockMeta.blockNumber+1)*BlockSize - int64(freeBytesAligned)
-			return pos, true
-		}
-	}
-	return 0, false
-}
-
-var unixModeToFTMap = map[uint16]uint8{
-	unix.S_IFREG:  fileTypeRegularFile,
-	unix.S_IFDIR:  fileTypeDirectory,
-	unix.S_IFCHR:  fileTypeCharacterDevice,
-	unix.S_IFBLK:  fileTypeBlockDevice,
-	unix.S_IFIFO:  fileTypeFIFO,
-	unix.S_IFSOCK: fileTypeSocket,
-	unix.S_IFLNK:  fileTypeSymbolicLink,
-}
-
-// unixModeToFT maps a Unix file type to an EROFS file type.
-func unixModeToFT(mode uint16) uint8 {
-	return unixModeToFTMap[mode&unix.S_IFMT]
-}
diff --git a/metropolis/pkg/erofs/erofs_test.go b/metropolis/pkg/erofs/erofs_test.go
deleted file mode 100644
index fa5f248..0000000
--- a/metropolis/pkg/erofs/erofs_test.go
+++ /dev/null
@@ -1,281 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package erofs
-
-import (
-	"io"
-	"log"
-	"math/rand"
-	"os"
-	"testing"
-
-	"github.com/stretchr/testify/assert"
-	"github.com/stretchr/testify/require"
-	"golang.org/x/sys/unix"
-)
-
-func TestKernelInterop(t *testing.T) {
-	if os.Getenv("IN_KTEST") != "true" {
-		t.Skip("Not in ktest")
-	}
-
-	type testCase struct {
-		name     string
-		setup    func(w *Writer) error
-		validate func(t *testing.T) error
-	}
-
-	tests := []testCase{
-		{
-			name: "SimpleFolder",
-			setup: func(w *Writer) error {
-				return w.Create(".", &Directory{
-					Base:     Base{GID: 123, UID: 124, Permissions: 0753},
-					Children: []string{},
-				})
-			},
-			validate: func(t *testing.T) error {
-				var stat unix.Stat_t
-				if err := unix.Stat("/test", &stat); err != nil {
-					t.Errorf("failed to stat output: %v", err)
-				}
-				require.EqualValues(t, 124, stat.Uid, "wrong Uid")
-				require.EqualValues(t, 123, stat.Gid, "wrong Gid")
-				require.EqualValues(t, 0753, stat.Mode&^unix.S_IFMT, "wrong mode")
-				return nil
-			},
-		},
-		{
-			name: "FolderHierarchy",
-			setup: func(w *Writer) error {
-				if err := w.Create(".", &Directory{
-					Base:     Base{GID: 123, UID: 124, Permissions: 0753},
-					Children: []string{"subdir"},
-				}); err != nil {
-					return err
-				}
-				if err := w.Create("subdir", &Directory{
-					Base:     Base{GID: 123, UID: 124, Permissions: 0753},
-					Children: []string{},
-				}); err != nil {
-					return err
-				}
-				return nil
-			},
-			validate: func(t *testing.T) error {
-				dirInfo, err := os.ReadDir("/test")
-				if err != nil {
-					t.Fatalf("Failed to read top-level directory: %v", err)
-				}
-				require.Len(t, dirInfo, 1, "more subdirs than expected")
-				require.Equal(t, "subdir", dirInfo[0].Name(), "unexpected subdir")
-				require.True(t, dirInfo[0].IsDir(), "subdir not a directory")
-				subdirInfo, err := os.ReadDir("/test/subdir")
-				assert.NoError(t, err, "cannot read empty subdir")
-				require.Len(t, subdirInfo, 0, "unexpected subdirs in empty directory")
-				return nil
-			},
-		},
-		{
-			name: "SmallFile",
-			setup: func(w *Writer) error {
-				if err := w.Create(".", &Directory{
-					Base:     Base{GID: 123, UID: 123, Permissions: 0755},
-					Children: []string{"test.bin"},
-				}); err != nil {
-					return err
-				}
-				writer := w.CreateFile("test.bin", &FileMeta{
-					Base: Base{GID: 123, UID: 124, Permissions: 0644},
-				})
-				r := rand.New(rand.NewSource(0)) // Random but deterministic data
-				if _, err := io.CopyN(writer, r, 128); err != nil {
-					return err
-				}
-				if err := writer.Close(); err != nil {
-					return err
-				}
-				return nil
-			},
-			validate: func(t *testing.T) error {
-				var stat unix.Stat_t
-				err := unix.Stat("/test/test.bin", &stat)
-				assert.NoError(t, err, "failed to stat file")
-				require.EqualValues(t, 124, stat.Uid, "wrong Uid")
-				require.EqualValues(t, 123, stat.Gid, "wrong Gid")
-				require.EqualValues(t, 0644, stat.Mode&^unix.S_IFMT, "wrong mode")
-				file, err := os.Open("/test/test.bin")
-				assert.NoError(t, err, "failed to open test file")
-				defer file.Close()
-				r := io.LimitReader(rand.New(rand.NewSource(0)), 128) // Random but deterministic data
-				expected, _ := io.ReadAll(r)
-				actual, err := io.ReadAll(file)
-				assert.NoError(t, err, "failed to read test file")
-				assert.Equal(t, expected, actual, "content not identical")
-				return nil
-			},
-		},
-		{
-			name: "Chardev",
-			setup: func(w *Writer) error {
-				if err := w.Create(".", &Directory{
-					Base:     Base{GID: 123, UID: 123, Permissions: 0755},
-					Children: []string{"ttyS0"},
-				}); err != nil {
-					return err
-				}
-				err := w.Create("ttyS0", &CharacterDevice{
-					Base:  Base{GID: 0, UID: 0, Permissions: 0600},
-					Major: 4,
-					Minor: 64,
-				})
-				if err != nil {
-					return err
-				}
-				return nil
-			},
-			validate: func(t *testing.T) error {
-				var stat unix.Statx_t
-				err := unix.Statx(0, "/test/ttyS0", 0, unix.STATX_ALL, &stat)
-				assert.NoError(t, err, "failed to statx file")
-				require.EqualValues(t, 0, stat.Uid, "wrong Uid")
-				require.EqualValues(t, 0, stat.Gid, "wrong Gid")
-				require.EqualValues(t, 0600, stat.Mode&^unix.S_IFMT, "wrong mode")
-				require.EqualValues(t, unix.S_IFCHR, stat.Mode&unix.S_IFMT, "wrong file type")
-				require.EqualValues(t, 4, stat.Rdev_major, "wrong dev major")
-				require.EqualValues(t, 64, stat.Rdev_minor, "wrong dev minor")
-				return nil
-			},
-		},
-		{
-			name: "LargeFile",
-			setup: func(w *Writer) error {
-				if err := w.Create(".", &Directory{
-					Base:     Base{GID: 123, UID: 123, Permissions: 0755},
-					Children: []string{"test.bin"},
-				}); err != nil {
-					return err
-				}
-				writer := w.CreateFile("test.bin", &FileMeta{
-					Base: Base{GID: 123, UID: 124, Permissions: 0644},
-				})
-				r := rand.New(rand.NewSource(1)) // Random but deterministic data
-				if _, err := io.CopyN(writer, r, 6500); err != nil {
-					return err
-				}
-				if err := writer.Close(); err != nil {
-					return err
-				}
-				return nil
-			},
-			validate: func(t *testing.T) error {
-				var stat unix.Stat_t
-				rawContents, err := os.ReadFile("/dev/ram0")
-				assert.NoError(t, err, "failed to read test data")
-				log.Printf("%x", rawContents)
-				err = unix.Stat("/test/test.bin", &stat)
-				assert.NoError(t, err, "failed to stat file")
-				require.EqualValues(t, 124, stat.Uid, "wrong Uid")
-				require.EqualValues(t, 123, stat.Gid, "wrong Gid")
-				require.EqualValues(t, 0644, stat.Mode&^unix.S_IFMT, "wrong mode")
-				require.EqualValues(t, 6500, stat.Size, "wrong size")
-				file, err := os.Open("/test/test.bin")
-				assert.NoError(t, err, "failed to open test file")
-				defer file.Close()
-				r := io.LimitReader(rand.New(rand.NewSource(1)), 6500) // Random but deterministic data
-				expected, _ := io.ReadAll(r)
-				actual, err := io.ReadAll(file)
-				assert.NoError(t, err, "failed to read test file")
-				assert.Equal(t, expected, actual, "content not identical")
-				return nil
-			},
-		},
-		{
-			name: "MultipleMetaBlocks",
-			setup: func(w *Writer) error {
-				testFileNames := []string{"test1.bin", "test2.bin", "test3.bin"}
-				if err := w.Create(".", &Directory{
-					Base:     Base{GID: 123, UID: 123, Permissions: 0755},
-					Children: testFileNames,
-				}); err != nil {
-					return err
-				}
-				for i, fileName := range testFileNames {
-					writer := w.CreateFile(fileName, &FileMeta{
-						Base: Base{GID: 123, UID: 124, Permissions: 0644},
-					})
-					r := rand.New(rand.NewSource(int64(i))) // Random but deterministic data
-					if _, err := io.CopyN(writer, r, 2053); err != nil {
-						return err
-					}
-					if err := writer.Close(); err != nil {
-						return err
-					}
-				}
-				return nil
-			},
-			validate: func(t *testing.T) error {
-				testFileNames := []string{"test1.bin", "test2.bin", "test3.bin"}
-				for i, fileName := range testFileNames {
-					file, err := os.Open("/test/" + fileName)
-					assert.NoError(t, err, "failed to open test file")
-					defer file.Close()
-					r := io.LimitReader(rand.New(rand.NewSource(int64(i))), 2053) // Random but deterministic data
-					expected, _ := io.ReadAll(r)
-					actual, err := io.ReadAll(file)
-					assert.NoError(t, err, "failed to read test file")
-					require.Equal(t, expected, actual, "content not identical")
-				}
-				return nil
-			},
-		},
-	}
-
-	for _, test := range tests {
-		t.Run(test.name, func(t *testing.T) {
-			file, err := os.OpenFile("/dev/ram0", os.O_WRONLY, 0644)
-			if err != nil {
-				t.Fatalf("failed to create test image: %v", err)
-			}
-			defer file.Close()
-			w, err := NewWriter(file)
-			if err != nil {
-				t.Fatalf("failed to initialize EROFS writer: %v", err)
-			}
-			if err := test.setup(w); err != nil {
-				t.Fatalf("setup failed: %v", err)
-			}
-			if err := w.Close(); err != nil {
-				t.Errorf("failed close: %v", err)
-			}
-			_ = file.Close()
-			if err := os.MkdirAll("/test", 0755); err != nil {
-				t.Error(err)
-			}
-			if err := unix.Mount("/dev/ram0", "/test", "erofs", unix.MS_NOEXEC|unix.MS_NODEV, ""); err != nil {
-				t.Fatal(err)
-			}
-			if err := test.validate(t); err != nil {
-				t.Errorf("validation failure: %v", err)
-			}
-			if err := unix.Unmount("/test", 0); err != nil {
-				t.Fatalf("failed to unmount: %v", err)
-			}
-		})
-
-	}
-}
diff --git a/metropolis/pkg/erofs/inode_types.go b/metropolis/pkg/erofs/inode_types.go
deleted file mode 100644
index 8147892..0000000
--- a/metropolis/pkg/erofs/inode_types.go
+++ /dev/null
@@ -1,174 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package erofs
-
-import (
-	"encoding/binary"
-	"errors"
-	"fmt"
-	"io"
-	"math"
-	"path"
-	"sort"
-
-	"golang.org/x/sys/unix"
-)
-
-// Inode specifies an interface that all inodes that can be written to an EROFS
-// filesystem implement.
-type Inode interface {
-	inode() *inodeCompact
-}
-
-// Base contains generic inode metadata independent from the specific inode
-// type.
-type Base struct {
-	Permissions uint16
-	UID, GID    uint16
-}
-
-func (b *Base) baseInode(fileType uint16) *inodeCompact {
-	return &inodeCompact{
-		UID:  b.UID,
-		GID:  b.GID,
-		Mode: b.Permissions | fileType,
-	}
-}
-
-// Directory represents a directory inode. The Children property contains the
-// directories' direct children (just the name, not the full path).
-type Directory struct {
-	Base
-	Children []string
-}
-
-func (d *Directory) inode() *inodeCompact {
-	return d.baseInode(unix.S_IFDIR)
-}
-
-func (d *Directory) writeTo(w *uncompressedInodeWriter) error {
-	// children is d.Children with appended backrefs (. and ..), copied to not
-	// pollute source
-	children := make([]string, len(d.Children))
-	copy(children, d.Children)
-	children = append(children, ".", "..")
-	sort.Strings(children)
-
-	nameStartOffset := binary.Size(directoryEntryRaw{}) * len(children)
-	var rawEntries []directoryEntryRaw
-	for _, ent := range children {
-		if nameStartOffset > math.MaxUint16 {
-			return errors.New("directory name offset out of range, too many or too big entries")
-		}
-		var entData directoryEntryRaw
-		entData.NameStartOffset = uint16(nameStartOffset)
-		rawEntries = append(rawEntries, entData)
-		nameStartOffset += len(ent)
-	}
-	for i, ent := range rawEntries {
-		targetPath := path.Join(w.pathname, children[i])
-		if targetPath == ".." {
-			targetPath = "."
-		}
-		w.writer.fixDirectoryEntry[targetPath] = append(w.writer.fixDirectoryEntry[targetPath], direntFixupLocation{
-			path:       w.pathname,
-			entryIndex: uint16(i),
-		})
-		if err := binary.Write(w, binary.LittleEndian, ent); err != nil {
-			return fmt.Errorf("failed to write dirent: %w", err)
-		}
-	}
-	for _, childName := range children {
-		if _, err := w.Write([]byte(childName)); err != nil {
-			return fmt.Errorf("failed to write dirent name: %w", err)
-		}
-	}
-	return nil
-}
-
-// CharacterDevice represents a Unix character device inode with major and
-// minor numbers.
-type CharacterDevice struct {
-	Base
-	Major uint32
-	Minor uint32
-}
-
-func (c *CharacterDevice) inode() *inodeCompact {
-	i := c.baseInode(unix.S_IFCHR)
-	i.Union = uint32(unix.Mkdev(c.Major, c.Minor))
-	return i
-}
-
-// BlockDevice represents a Unix block device inode with major and minor
-// numbers.
-type BlockDevice struct {
-	Base
-	Major uint32
-	Minor uint32
-}
-
-func (b *BlockDevice) inode() *inodeCompact {
-	i := b.baseInode(unix.S_IFBLK)
-	i.Union = uint32(unix.Mkdev(b.Major, b.Minor))
-	return i
-}
-
-// FIFO represents a Unix FIFO inode.
-type FIFO struct {
-	Base
-}
-
-func (f *FIFO) inode() *inodeCompact {
-	return f.baseInode(unix.S_IFIFO)
-}
-
-// Socket represents a Unix socket inode.
-type Socket struct {
-	Base
-}
-
-func (s *Socket) inode() *inodeCompact {
-	return s.baseInode(unix.S_IFSOCK)
-}
-
-// SymbolicLink represents a symbolic link/symlink to another inode. Target is
-// the literal string target of the symlink.
-type SymbolicLink struct {
-	Base
-	Target string
-}
-
-func (s *SymbolicLink) inode() *inodeCompact {
-	return s.baseInode(unix.S_IFLNK)
-}
-
-func (s *SymbolicLink) writeTo(w io.Writer) error {
-	_, err := w.Write([]byte(s.Target))
-	return err
-}
-
-// FileMeta represents the metadata of a regular file. In this case the
-// contents are written to a Writer returned by the CreateFile function on the
-// EROFS Writer and not included in the structure itself.
-type FileMeta struct {
-	Base
-}
-
-func (f *FileMeta) inode() *inodeCompact {
-	return f.baseInode(unix.S_IFREG)
-}
diff --git a/metropolis/pkg/erofs/uncompressed_inode_writer.go b/metropolis/pkg/erofs/uncompressed_inode_writer.go
deleted file mode 100644
index 97aefc0..0000000
--- a/metropolis/pkg/erofs/uncompressed_inode_writer.go
+++ /dev/null
@@ -1,126 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package erofs
-
-import (
-	"bytes"
-	"encoding/binary"
-	"errors"
-	"fmt"
-	"math"
-)
-
-// uncompressedInodeWriter exposes a io.Write-style interface for a single
-// uncompressed inode. It splits the Write-calls into blocks and writes both
-// the blocks and inode metadata. It is required to call Close() to ensure
-// everything is properly written down before writing another inode.
-type uncompressedInodeWriter struct {
-	buf               bytes.Buffer
-	writer            *Writer
-	inode             inodeCompact
-	baseBlock         uint32 // baseBlock == 0 implies this inode didn't allocate a block (yet).
-	writtenBytes      int
-	legacyInodeNumber uint32
-	pathname          string
-}
-
-func (i *uncompressedInodeWriter) allocateBlock() error {
-	bb, err := i.writer.allocateBlocks(1)
-	if err != nil {
-		return err
-	}
-	if i.baseBlock == 0 {
-		i.baseBlock = bb
-	}
-	return nil
-}
-
-func (i *uncompressedInodeWriter) flush(n int) error {
-	if err := i.allocateBlock(); err != nil {
-		return err
-	}
-	slice := i.buf.Next(n)
-	if _, err := i.writer.w.Write(slice); err != nil {
-		return err
-	}
-	// Always pad to BlockSize.
-	_, err := i.writer.w.Write(make([]byte, BlockSize-len(slice)))
-	return err
-}
-
-func (i *uncompressedInodeWriter) Write(b []byte) (int, error) {
-	i.writtenBytes += len(b)
-	if _, err := i.buf.Write(b); err != nil {
-		return 0, err
-	}
-	for i.buf.Len() >= BlockSize {
-		if err := i.flush(BlockSize); err != nil {
-			return 0, err
-		}
-	}
-	return len(b), nil
-}
-
-func (i *uncompressedInodeWriter) Close() error {
-	if i.buf.Len() > BlockSize {
-		panic("programming error")
-	}
-	inodeSize := binary.Size(i.inode)
-	if i.buf.Len()+inodeSize > BlockSize {
-		// Can't fit last part of data inline, write it in its own block.
-		if err := i.flush(i.buf.Len()); err != nil {
-			return err
-		}
-	}
-	if i.buf.Len() == 0 {
-		i.inode.Format = inodeFlatPlain << 1
-	} else {
-		// Colocate last part of data with inode.
-		i.inode.Format = inodeFlatInline << 1
-	}
-	if i.writtenBytes > math.MaxUint32 {
-		return errors.New("inodes bigger than 2^32 need the extended inode format which is unsupported by this library")
-	}
-	i.inode.Size = uint32(i.writtenBytes)
-	if i.baseBlock != 0 {
-		i.inode.Union = i.baseBlock
-	}
-	i.inode.HardlinkCount = 1
-	i.inode.InodeNumCompat = i.legacyInodeNumber
-	basePos, err := i.writer.allocateMetadata(inodeSize+i.buf.Len(), 32)
-	if err != nil {
-		return fmt.Errorf("failed to allocate metadata: %w", err)
-	}
-	i.writer.pathInodeMeta[i.pathname] = &uncompressedInodeMeta{
-		nid:          uint64(basePos) / 32,
-		ftype:        unixModeToFT(i.inode.Mode),
-		blockStart:   int64(i.baseBlock),
-		blockLength:  (int64(i.writtenBytes) / BlockSize) * BlockSize,
-		inlineStart:  basePos + 32,
-		inlineLength: int64(i.buf.Len()),
-		writer:       i.writer,
-	}
-	if err := binary.Write(i.writer.w, binary.LittleEndian, &i.inode); err != nil {
-		return err
-	}
-	if i.inode.Format&(inodeFlatInline<<1) != 0 {
-		// Data colocated in inode, if any.
-		_, err := i.writer.w.Write(i.buf.Bytes())
-		return err
-	}
-	return nil
-}
diff --git a/metropolis/pkg/event/BUILD.bazel b/metropolis/pkg/event/BUILD.bazel
deleted file mode 100644
index e6506b5..0000000
--- a/metropolis/pkg/event/BUILD.bazel
+++ /dev/null
@@ -1,9 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library")
-
-go_library(
-    name = "event",
-    srcs = ["event.go"],
-    importpath = "source.monogon.dev/metropolis/pkg/event",
-    visibility = ["//visibility:public"],
-    deps = ["//metropolis/pkg/supervisor"],
-)
diff --git a/metropolis/pkg/event/etcd/BUILD.bazel b/metropolis/pkg/event/etcd/BUILD.bazel
deleted file mode 100644
index 22766ff..0000000
--- a/metropolis/pkg/event/etcd/BUILD.bazel
+++ /dev/null
@@ -1,30 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
-
-go_library(
-    name = "etcd",
-    srcs = ["etcd.go"],
-    importpath = "source.monogon.dev/metropolis/pkg/event/etcd",
-    visibility = ["//visibility:public"],
-    deps = [
-        "//metropolis/pkg/event",
-        "@com_github_cenkalti_backoff_v4//:backoff",
-        "@io_etcd_go_etcd_client_v3//:client",
-    ],
-)
-
-go_test(
-    name = "etcd_test",
-    srcs = ["etcd_test.go"],
-    embed = [":etcd"],
-    deps = [
-        "//metropolis/pkg/event",
-        "//metropolis/pkg/logtree",
-        "@io_etcd_go_etcd_api_v3//v3rpc/rpctypes",
-        "@io_etcd_go_etcd_client_pkg_v3//testutil",
-        "@io_etcd_go_etcd_client_v3//:client",
-        "@io_etcd_go_etcd_tests_v3//integration",
-        "@org_golang_google_grpc//codes",
-        "@org_golang_google_grpc//grpclog",
-        "@org_uber_go_zap//:zap",
-    ],
-)
diff --git a/metropolis/pkg/event/etcd/etcd.go b/metropolis/pkg/event/etcd/etcd.go
deleted file mode 100644
index afcea35..0000000
--- a/metropolis/pkg/event/etcd/etcd.go
+++ /dev/null
@@ -1,444 +0,0 @@
-package etcd
-
-import (
-	"bytes"
-	"context"
-	"errors"
-	"fmt"
-	"sync"
-
-	"github.com/cenkalti/backoff/v4"
-	clientv3 "go.etcd.io/etcd/client/v3"
-
-	"source.monogon.dev/metropolis/pkg/event"
-)
-
-var (
-	// Type assert that *Value implements event.ValueWatcher. We do this
-	// artificially, as there currently is no code path that needs this to be
-	// strictly true.  However, users of this library might want to rely on the
-	// Value type instead of particular Value implementations.
-	_ event.ValueWatch[StringAt] = &Value[StringAt]{}
-)
-
-// ThinClient is a small wrapper interface to combine
-// clientv3.KV and clientv3.Watcher.
-type ThinClient interface {
-	clientv3.KV
-	clientv3.Watcher
-}
-
-// Value is an 'Event Value' backed in an etcd cluster, accessed over an
-// etcd client. This is a stateless handle and can be copied and shared across
-// goroutines.
-type Value[T any] struct {
-	decoder func(key, value []byte) (T, error)
-	etcd    ThinClient
-	key     string
-	keyEnd  string
-}
-
-type Option struct {
-	rangeEnd string
-}
-
-// Range creates a Value that is backed a range of etcd key/value pairs from
-// 'key' passed to NewValue to 'end' passed to Range.
-//
-// The key range semantics (ie. lexicographic ordering) are the same as in etcd
-// ranges, so for example to retrieve all keys prefixed by `foo/` key should be
-// `foo/` and end should be `foo0`.
-//
-// For any update in the given range, the decoder will be called and its result
-// will trigger the return of a Get() call. The decoder should return a type
-// that lets the user distinguish which of the multiple objects in the range got
-// updated, as the Get() call returns no additional information about the
-// location of the retrieved object by itself.
-//
-// The order of values retrieved by Get() is currently fully arbitrary and must
-// not be relied on. It's possible that in the future the order of updates and
-// the blocking behaviour of Get will be formalized, but this is not yet the
-// case. Instead, the data returned should be treated as eventually consistent
-// with the etcd state.
-//
-// For some uses, it might be necessary to first retrieve all the objects
-// contained within the range before starting to block on updates - in this
-// case, the BacklogOnly option should be used when calling Get.
-func Range(end string) *Option {
-	return &Option{
-		rangeEnd: end,
-	}
-}
-
-// NewValue creates a new Value for a given key(s) in an etcd client. The
-// given decoder will be used to convert bytes retrieved from etcd into the
-// interface{} value retrieved by Get by this value's watcher.
-func NewValue[T any](etcd ThinClient, key string, decoder func(key, value []byte) (T, error), options ...*Option) *Value[T] {
-	res := &Value[T]{
-		decoder: decoder,
-		etcd:    etcd,
-		key:     key,
-		keyEnd:  key,
-	}
-
-	for _, opt := range options {
-		if end := opt.rangeEnd; end != "" {
-			res.keyEnd = end
-		}
-	}
-
-	return res
-}
-
-func DecoderNoop(_, value []byte) ([]byte, error) {
-	return value, nil
-}
-
-func DecoderStringAt(key, value []byte) (StringAt, error) {
-	return StringAt{
-		Key:   string(key),
-		Value: string(value),
-	}, nil
-}
-
-type StringAt struct {
-	Key   string
-	Value string
-}
-
-func (e *Value[T]) Watch() event.Watcher[T] {
-	ctx, ctxC := context.WithCancel(context.Background())
-	return &watcher[T]{
-		Value: *e,
-
-		ctx:  ctx,
-		ctxC: ctxC,
-
-		current: make(map[string][]byte),
-
-		getSem: make(chan struct{}, 1),
-	}
-}
-
-type watcher[T any] struct {
-	// Value copy, used to configure the behaviour of this watcher.
-	Value[T]
-
-	// ctx is the context that expresses the liveness of this watcher. It is
-	// canceled when the watcher is closed, and the etcd Watch hangs off of it.
-	ctx  context.Context
-	ctxC context.CancelFunc
-
-	// getSem is a semaphore used to limit concurrent Get calls and throw an
-	// error if concurrent access is attempted.
-	getSem chan struct{}
-
-	// backlogged is a list of keys retrieved from etcd but not yet returned via
-	// Get. These items are not a replay of all the updates from etcd, but are
-	// already compacted to deduplicate updates to the same object (ie., if the
-	// update stream from etcd is for keys A, B, and A, the backlogged list will
-	// only contain one update for A and B each, with the first update for A being
-	// discarded upon arrival of the second update).
-	//
-	// The keys are an index into the current map, which contains the values
-	// retrieved, including ones that have already been returned via Get. This
-	// persistence allows us to deduplicate spurious updates to the user, in which
-	// etcd returned a new revision of a key, but the data stayed the same.
-	backlogged [][]byte
-	// current map, keyed from etcd key into etcd value at said key. This map
-	// persists alongside an etcd connection, permitting deduplication of spurious
-	// etcd updates even across multiple Get calls.
-	current map[string][]byte
-
-	// prev is the etcd store revision of a previously completed etcd Get/Watch
-	// call, used to resume a Watch call in case of failures.
-	prev *int64
-	// wc is the etcd watch channel, or nil if no channel is yet open.
-	wc clientv3.WatchChan
-
-	// testRaceWG is an optional WaitGroup that, if set, will be waited upon
-	// after the initial KV value retrieval, but before the watch is created.
-	// This is only used for testing.
-	testRaceWG *sync.WaitGroup
-	// testSetupWG is an optional WaitGroup that, if set, will be waited upon
-	// after the etcd watch is created.
-	// This is only used for testing.
-	testSetupWG *sync.WaitGroup
-}
-
-// setup initiates wc (the watch channel from etcd) after retrieving the initial
-// value(s) with a get operation.
-func (w *watcher[T]) setup(ctx context.Context) error {
-	if w.wc != nil {
-		return nil
-	}
-	ranged := w.key != w.keyEnd
-
-	// First, check if some data under this key/range already exists.
-
-	// We use an exponential backoff and retry here as the initial Get can fail
-	// if the cluster is unstable (eg. failing over). We only fail the retry if
-	// the context expires.
-	bo := backoff.NewExponentialBackOff()
-	bo.MaxElapsedTime = 0
-
-	err := backoff.Retry(func() error {
-
-		var getOpts []clientv3.OpOption
-		if ranged {
-			getOpts = append(getOpts, clientv3.WithRange(w.keyEnd))
-		}
-		get, err := w.etcd.Get(ctx, w.key, getOpts...)
-		if err != nil {
-			return fmt.Errorf("when retrieving initial value: %w", err)
-		}
-
-		// Assert that the etcd API is behaving as expected.
-		if !ranged && len(get.Kvs) > 1 {
-			panic("More than one key returned in unary GET response")
-		}
-
-		// After a successful Get, save the revision to watch from and re-build the
-		// backlog from scratch based on what was available in the etcd store at that
-		// time.
-		w.prev = &get.Header.Revision
-
-		w.backlogged = nil
-		w.current = make(map[string][]byte)
-		for _, kv := range get.Kvs {
-			w.backlogged = append(w.backlogged, kv.Key)
-			w.current[string(kv.Key)] = kv.Value
-		}
-		return nil
-
-	}, backoff.WithContext(bo, ctx))
-
-	if w.testRaceWG != nil {
-		w.testRaceWG.Wait()
-	}
-	if err != nil {
-		return err
-	}
-
-	watchOpts := []clientv3.OpOption{
-		clientv3.WithRev(*w.prev + 1),
-	}
-	if ranged {
-		watchOpts = append(watchOpts, clientv3.WithRange(w.keyEnd))
-	}
-	w.wc = w.etcd.Watch(w.ctx, w.key, watchOpts...)
-
-	if w.testSetupWG != nil {
-		w.testSetupWG.Wait()
-	}
-	return nil
-}
-
-// backfill blocks until a backlog of items is available. An error is returned
-// if the context is canceled.
-func (w *watcher[T]) backfill(ctx context.Context) error {
-	// Keep watching for watch events.
-	for {
-		var resp *clientv3.WatchResponse
-		select {
-		case r := <-w.wc:
-			resp = &r
-		case <-ctx.Done():
-			return ctx.Err()
-		}
-
-		if resp.Canceled {
-			// Only allow for watches to be canceled due to context
-			// cancellations. Any other error is something we need to handle,
-			// eg. a client close or compaction error.
-			if errors.Is(resp.Err(), ctx.Err()) {
-				return fmt.Errorf("watch canceled: %w", resp.Err())
-			}
-
-			// Attempt to reconnect.
-			if w.wc != nil {
-				// If a wc already exists, close it. This forces a reconnection
-				// by the next setup call.
-				w.ctxC()
-				w.ctx, w.ctxC = context.WithCancel(context.Background())
-				w.wc = nil
-			}
-			if err := w.setup(ctx); err != nil {
-				return fmt.Errorf("failed to setup watcher: %w", err)
-			}
-			continue
-		}
-
-		w.prev = &resp.Header.Revision
-		// Spurious watch event with no update? Keep trying.
-		if len(resp.Events) == 0 {
-			continue
-		}
-
-		// Process updates into compacted list, transforming deletions into value: nil
-		// keyValues. This maps an etcd key into a pointer in the already existing
-		// backlog list. It will then be used to compact all updates into the smallest
-		// backlog possible (by overriding previously backlogged items for a key if this
-		// key is encountered again).
-		//
-		// TODO(q3k): this could be stored in the watcher state to not waste time on
-		// each update, but it's good enough for now.
-
-		// Prepare a set of keys that already exist in the backlog. This will be used
-		// to make sure we don't duplicate backlog entries while maintaining a stable
-		// backlog order.
-		seen := make(map[string]bool)
-		for _, k := range w.backlogged {
-			seen[string(k)] = true
-		}
-
-		for _, ev := range resp.Events {
-			var value []byte
-			switch ev.Type {
-			case clientv3.EventTypeDelete:
-			case clientv3.EventTypePut:
-				value = ev.Kv.Value
-			default:
-				return fmt.Errorf("invalid event type %v", ev.Type)
-			}
-
-			keyS := string(ev.Kv.Key)
-			prev := w.current[keyS]
-			// Short-circuit and skip updates with the same content as already present.
-			// These are sometimes emitted by etcd.
-			if bytes.Equal(prev, value) {
-				continue
-			}
-
-			// Only insert to backlog if not yet present, but maintain order.
-			if !seen[string(ev.Kv.Key)] {
-				w.backlogged = append(w.backlogged, ev.Kv.Key)
-				seen[string(ev.Kv.Key)] = true
-			}
-			// Regardless of backlog list, always update the key to its newest value.
-			w.current[keyS] = value
-		}
-
-		// Still nothing in backlog? Keep trying.
-		if len(w.backlogged) == 0 {
-			continue
-		}
-
-		return nil
-	}
-}
-
-type GetOption struct {
-	backlogOnly bool
-}
-
-// Get implements the Get method of the Watcher interface.
-// It can return an error in three cases:
-//   - the given context is canceled (in which case, the given error will wrap
-//     the context error)
-//   - the watcher's BytesDecoder returned an error (in which case the error
-//     returned by the BytesDecoder will be returned verbatim)
-//   - it has been called with BacklogOnly and the Watcher has no more local
-//     event data to return (see BacklogOnly for more information on the
-//     semantics of this mode of operation)
-//
-// Note that transient and permanent etcd errors are never returned, and the
-// Get call will attempt to recover from these errors as much as possible. This
-// also means that the user of the Watcher will not be notified if the
-// underlying etcd client disconnects from the cluster, or if the cluster loses
-// quorum.
-//
-// TODO(q3k): implement leases to allow clients to be notified when there are
-// transient cluster/quorum/partition errors, if needed.
-//
-// TODO(q3k): implement internal, limited buffering for backlogged data not yet
-// consumed by client, as etcd client library seems to use an unbound buffer in
-// case this happens ( see: watcherStream.buf in clientv3).
-func (w *watcher[T]) Get(ctx context.Context, opts ...event.GetOption[T]) (T, error) {
-	var empty T
-	select {
-	case w.getSem <- struct{}{}:
-	default:
-		return empty, fmt.Errorf("cannot Get() concurrently on a single waiter")
-	}
-	defer func() {
-		<-w.getSem
-	}()
-
-	backlogOnly := false
-	var predicate func(t T) bool
-	for _, opt := range opts {
-		if opt.Predicate != nil {
-			predicate = opt.Predicate
-		}
-		if opt.BacklogOnly {
-			backlogOnly = true
-		}
-	}
-
-	ranged := w.key != w.keyEnd
-	if ranged && predicate != nil {
-		return empty, errors.New("filtering unimplemented for ranged etcd values")
-	}
-	if backlogOnly && predicate != nil {
-		return empty, errors.New("filtering unimplemented for backlog-only requests")
-	}
-
-	for {
-		v, err := w.getUnlocked(ctx, ranged, backlogOnly)
-		if err != nil {
-			return empty, err
-		}
-		if predicate == nil || predicate(v) {
-			return v, nil
-		}
-	}
-}
-
-func (w *watcher[T]) getUnlocked(ctx context.Context, ranged, backlogOnly bool) (T, error) {
-	var empty T
-	// Early check for context cancelations, preventing spurious contact with etcd
-	// if there's no need to.
-	if w.ctx.Err() != nil {
-		return empty, w.ctx.Err()
-	}
-
-	if err := w.setup(ctx); err != nil {
-		return empty, fmt.Errorf("when setting up watcher: %w", err)
-	}
-
-	if backlogOnly && len(w.backlogged) == 0 {
-		return empty, event.ErrBacklogDone
-	}
-
-	// Update backlog from etcd if needed.
-	if len(w.backlogged) < 1 {
-		err := w.backfill(ctx)
-		if err != nil {
-			return empty, fmt.Errorf("when watching for new value: %w", err)
-		}
-	}
-	// Backlog is now guaranteed to contain at least one element.
-
-	if !ranged {
-		// For non-ranged queries, drain backlog fully.
-		if len(w.backlogged) != 1 {
-			panic(fmt.Sprintf("multiple keys in nonranged value: %v", w.backlogged))
-		}
-		k := w.backlogged[0]
-		v := w.current[string(k)]
-		w.backlogged = nil
-		return w.decoder(k, v)
-	} else {
-		// For ranged queries, pop one ranged query off the backlog.
-		k := w.backlogged[0]
-		v := w.current[string(k)]
-		w.backlogged = w.backlogged[1:]
-		return w.decoder(k, v)
-	}
-}
-
-func (w *watcher[T]) Close() error {
-	w.ctxC()
-	return nil
-}
diff --git a/metropolis/pkg/event/etcd/etcd_test.go b/metropolis/pkg/event/etcd/etcd_test.go
deleted file mode 100644
index bd96363..0000000
--- a/metropolis/pkg/event/etcd/etcd_test.go
+++ /dev/null
@@ -1,865 +0,0 @@
-package etcd
-
-import (
-	"context"
-	"errors"
-	"flag"
-	"fmt"
-	"log"
-	"os"
-	"strconv"
-	"sync"
-	"testing"
-	"time"
-
-	"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
-	"go.etcd.io/etcd/client/pkg/v3/testutil"
-	clientv3 "go.etcd.io/etcd/client/v3"
-	"go.etcd.io/etcd/tests/v3/integration"
-	"go.uber.org/zap"
-	"google.golang.org/grpc/codes"
-	"google.golang.org/grpc/grpclog"
-
-	"source.monogon.dev/metropolis/pkg/event"
-	"source.monogon.dev/metropolis/pkg/logtree"
-)
-
-var (
-	cluster   *integration.ClusterV3
-	endpoints []string
-)
-
-// TestMain brings up a 3 node etcd cluster for tests to use.
-func TestMain(m *testing.M) {
-	// This logtree's data is not output anywhere.
-	lt := logtree.New()
-
-	cfg := integration.ClusterConfig{
-		Size:                 3,
-		GRPCKeepAliveMinTime: time.Millisecond,
-		LoggerBuilder: func(memberName string) *zap.Logger {
-			dn := logtree.DN("etcd." + memberName)
-			return logtree.Zapify(lt.MustLeveledFor(dn), zap.WarnLevel)
-		},
-	}
-	tb, cancel := testutil.NewTestingTBProthesis("curator")
-	defer cancel()
-	flag.Parse()
-	integration.BeforeTestExternal(tb)
-	grpclog.SetLoggerV2(logtree.GRPCify(lt.MustLeveledFor("grpc")))
-	cluster = integration.NewClusterV3(tb, &cfg)
-	endpoints = make([]string, 3)
-	for i := range endpoints {
-		endpoints[i] = cluster.Client(i).Endpoints()[0]
-	}
-
-	v := m.Run()
-	cluster.Terminate(tb)
-	os.Exit(v)
-}
-
-// setRaceWg creates a new WaitGroup and sets the given watcher to wait on this
-// WG after it performs the initial retrieval of a value from etcd, but before
-// it starts the watcher. This is used to test potential race conditions
-// present between these two steps.
-func setRaceWg[T any](w event.Watcher[T]) *sync.WaitGroup {
-	var wg sync.WaitGroup
-	w.(*watcher[T]).testRaceWG = &wg
-	return &wg
-}
-
-// setSetupWg creates a new WaitGroup and sets the given watcher to wait on
-// thie WG after an etcd watch channel is created. This is used in tests to
-// ensure that the watcher is fully created before it is tested.
-func setSetupWg[T any](w event.Watcher[T]) *sync.WaitGroup {
-	var wg sync.WaitGroup
-	w.(*watcher[T]).testSetupWG = &wg
-	return &wg
-}
-
-// testClient is an etcd connection to the test cluster.
-type testClient struct {
-	client     *clientv3.Client
-}
-
-func newTestClient(t *testing.T) *testClient {
-	t.Helper()
-	cli, err := clientv3.New(clientv3.Config{
-		Endpoints:            endpoints,
-		DialTimeout:          1 * time.Second,
-		DialKeepAliveTime:    1 * time.Second,
-		DialKeepAliveTimeout: 1 * time.Second,
-	})
-	if err != nil {
-		t.Fatalf("clientv3.New: %v", err)
-	}
-
-	return &testClient{
-		client:     cli,
-	}
-}
-
-func (d *testClient) close() {
-	d.client.Close()
-}
-
-// setEndpoints configures which endpoints (from {0,1,2}) the testClient is
-// connected to.
-func (d *testClient) setEndpoints(nums ...uint) {
-	var eps []string
-	for _, num := range nums {
-		eps = append(eps, endpoints[num])
-	}
-	d.client.SetEndpoints(eps...)
-}
-
-// put uses the testClient to store key with a given string value in etcd. It
-// contains retry logic that will block until the put is successful.
-func (d *testClient) put(t *testing.T, key, value string) {
-	t.Helper()
-	ctx, ctxC := context.WithCancel(context.Background())
-	defer ctxC()
-
-	for {
-		ctxT, ctxC := context.WithTimeout(ctx, 100*time.Millisecond)
-		_, err := d.client.Put(ctxT, key, value)
-		ctxC()
-		if err == nil {
-			return
-		}
-		if errors.Is(err, ctxT.Err()) {
-			log.Printf("Retrying after %v", err)
-			continue
-		}
-		// Retry on etcd unavailability - this will happen in this code as the
-		// etcd cluster repeatedly loses quorum.
-		var eerr rpctypes.EtcdError
-		if errors.As(err, &eerr) && eerr.Code() == codes.Unavailable {
-			log.Printf("Retrying after %v", err)
-			continue
-		}
-		t.Fatalf("Put: %v", err)
-	}
-
-}
-
-// remove uses the testClient to remove the given key from etcd. It contains
-// retry logic that will block until the removal is successful.
-func (d *testClient) remove(t *testing.T, key string) {
-	t.Helper()
-	ctx, ctxC := context.WithCancel(context.Background())
-	defer ctxC()
-
-	_, err := d.client.Delete(ctx, key)
-	if err == nil {
-		return
-	}
-	t.Fatalf("Delete: %v", err)
-}
-
-// expect runs a Get on the given Watcher, ensuring the returned value is a
-// given string.
-func expect(t *testing.T, w event.Watcher[StringAt], value string) {
-	t.Helper()
-	ctx, ctxC := context.WithCancel(context.Background())
-	defer ctxC()
-
-	got, err := w.Get(ctx)
-	if err != nil {
-		t.Fatalf("Get: %v", err)
-	}
-
-	if got, want := got.Value, value; got != want {
-		t.Errorf("Wanted value %q, got %q", want, got)
-	}
-}
-
-// expectTimeout ensures that the given watcher blocks on a Get call for at
-// least 100 milliseconds. This is used by tests to attempt to verify that the
-// watcher Get is fully blocked, but can cause false positives (eg. when Get
-// blocks for 101 milliseconds). Thus, this function should be used sparingly
-// and in tests that perform other baseline behaviour checks alongside this
-// test.
-func expectTimeout[T any](t *testing.T, w event.Watcher[T]) {
-	t.Helper()
-	ctx, ctxC := context.WithTimeout(context.Background(), 100*time.Millisecond)
-	got, err := w.Get(ctx)
-	ctxC()
-
-	if !errors.Is(err, ctx.Err()) {
-		t.Fatalf("Expected timeout error, got %v, %v", got, err)
-	}
-}
-
-// wait wraps a watcher into a channel of strings, ensuring that the watcher
-// never errors on Get calls and always returns strings.
-func wait(t *testing.T, w event.Watcher[StringAt]) (chan string, func()) {
-	t.Helper()
-	ctx, ctxC := context.WithCancel(context.Background())
-
-	c := make(chan string)
-
-	go func() {
-		for {
-			got, err := w.Get(ctx)
-			if err != nil && errors.Is(err, ctx.Err()) {
-				return
-			}
-			if err != nil {
-				t.Errorf("Get: %v", err)
-				close(c)
-				return
-			}
-			c <- got.Value
-		}
-	}()
-
-	return c, ctxC
-}
-
-// TestSimple exercises the simplest possible interaction with a watched value.
-func TestSimple(t *testing.T) {
-	tc := newTestClient(t)
-	defer tc.close()
-
-	k := "test-simple"
-	value := NewValue(tc.client, k, DecoderStringAt)
-	tc.put(t, k, "one")
-
-	watcher := value.Watch()
-	defer watcher.Close()
-	expect(t, watcher, "one")
-
-	tc.put(t, k, "two")
-	expect(t, watcher, "two")
-
-	tc.put(t, k, "three")
-	tc.put(t, k, "four")
-	tc.put(t, k, "five")
-	tc.put(t, k, "six")
-
-	q, cancel := wait(t, watcher)
-	// Test will hang here if the above value does not receive the set "six".
-	log.Printf("a")
-	for el := range q {
-		log.Printf("%q", el)
-		if el == "six" {
-			break
-		}
-	}
-	log.Printf("b")
-	cancel()
-}
-
-// stringAtGet performs a Get from a Watcher, expecting a stringAt and updating
-// the given map with the retrieved value.
-func stringAtGet(ctx context.Context, t *testing.T, w event.Watcher[StringAt], m map[string]string) {
-	t.Helper()
-
-	vr, err := w.Get(ctx)
-	if err != nil {
-		t.Fatalf("Get: %v", err)
-	}
-	m[vr.Key] = vr.Value
-}
-
-// TestSimpleRange exercises the simplest behaviour of a ranged watcher,
-// retrieving updaates via Get in a fully blocking fashion.
-func TestSimpleRange(t *testing.T) {
-	tc := newTestClient(t)
-	defer tc.close()
-
-	ks := "test-simple-range/"
-	ke := "test-simple-range0"
-	value := NewValue(tc.client, ks, DecoderStringAt, Range(ke))
-	tc.put(t, ks+"a", "one")
-	tc.put(t, ks+"b", "two")
-	tc.put(t, ks+"c", "three")
-	tc.put(t, ks+"b", "four")
-
-	w := value.Watch()
-	defer w.Close()
-
-	ctx, ctxC := context.WithCancel(context.Background())
-	defer ctxC()
-
-	res := make(map[string]string)
-	stringAtGet(ctx, t, w, res)
-	stringAtGet(ctx, t, w, res)
-	stringAtGet(ctx, t, w, res)
-
-	tc.put(t, ks+"a", "five")
-	tc.put(t, ks+"e", "six")
-
-	stringAtGet(ctx, t, w, res)
-	stringAtGet(ctx, t, w, res)
-
-	for _, te := range []struct {
-		k, w string
-	}{
-		{ks + "a", "five"},
-		{ks + "b", "four"},
-		{ks + "c", "three"},
-		{ks + "e", "six"},
-	} {
-		if want, got := te.w, res[te.k]; want != got {
-			t.Errorf("res[%q]: wanted %q, got %q", te.k, want, got)
-		}
-	}
-}
-
-// TestCancel ensures that watchers can resume after being canceled.
-func TestCancel(t *testing.T) {
-	tc := newTestClient(t)
-	defer tc.close()
-
-	k := "test-cancel"
-	value := NewValue(tc.client, k, DecoderStringAt)
-	tc.put(t, k, "one")
-
-	watcher := value.Watch()
-	defer watcher.Close()
-	expect(t, watcher, "one")
-
-	ctx, ctxC := context.WithCancel(context.Background())
-	errs := make(chan error, 1)
-	go func() {
-		_, err := watcher.Get(ctx)
-		errs <- err
-	}()
-	ctxC()
-	if want, got := ctx.Err(), <-errs; !errors.Is(got, want) {
-		t.Fatalf("Wanted err %v, got %v", want, got)
-	}
-
-	// Successfully canceled watch, resuming should continue to work.
-	q, cancel := wait(t, watcher)
-	defer cancel()
-
-	tc.put(t, k, "two")
-	if want, got := "two", <-q; want != got {
-		t.Fatalf("Wanted val %q, got %q", want, got)
-	}
-}
-
-// TestCancelOnGet ensures that a context cancellation on an initial Get (which
-// translates to an etcd Get in a backoff loop) doesn't block.
-func TestCancelOnGet(t *testing.T) {
-	tc := newTestClient(t)
-	defer tc.close()
-
-	k := "test-cancel-on-get"
-	value := NewValue(tc.client, k, DecoderStringAt)
-	watcher := value.Watch()
-	tc.put(t, k, "one")
-
-	// Cause partition between client endpoint and rest of cluster. Any read/write
-	// operations will now hang.
-	tc.setEndpoints(0)
-	cluster.Members[0].InjectPartition(t, cluster.Members[1], cluster.Members[2])
-	// Let raft timeouts expire so that the leader is aware a partition has occurred
-	// and stops serving data if it is not part of a quorum anymore.
-	//
-	// Otherwise, if Member[0] was the leader, there will be a window of opportunity
-	// during which it will continue to serve read data even though it has been
-	// partitioned off. This is an effect of how etcd handles linearizable reads:
-	// they go through the leader, but do not go through raft.
-	//
-	// The value is the default etcd leader timeout (1s) + some wiggle room.
-	time.Sleep(time.Second + time.Millisecond*100)
-
-	// Perform the initial Get(), which should attempt to retrieve a KV entry from
-	// the etcd service. This should hang. Unfortunately, there's no easy way to do
-	// this without an arbitrary sleep hoping that the client actually gets to the
-	// underlying etcd.Get call. This can cause false positives (eg. false 'pass'
-	// results) in this test.
-	ctx, ctxC := context.WithCancel(context.Background())
-	errs := make(chan error, 1)
-	go func() {
-		_, err := watcher.Get(ctx)
-		errs <- err
-	}()
-	time.Sleep(time.Second)
-
-	// Now that the etcd.Get is hanging, cancel the context.
-	ctxC()
-	// And now unpartition the cluster, resuming reads.
-	cluster.Members[0].RecoverPartition(t, cluster.Members[1], cluster.Members[2])
-
-	// The etcd.Get() call should've returned with a context cancellation.
-	err := <-errs
-	switch {
-	case err == nil:
-		t.Errorf("watcher.Get() returned no error, wanted context error")
-	case errors.Is(err, ctx.Err()):
-		// Okay.
-	default:
-		t.Errorf("watcher.Get() returned %v, wanted context error", err)
-	}
-}
-
-// TestClientReconnect forces a 'reconnection' of an active watcher from a
-// running member to another member, by stopping the original member and
-// explicitly reconnecting the client to other available members.
-//
-// This doe not reflect a situation expected during Metropolis runtime, as we
-// do not expect splits between an etcd client and its connected member
-// (instead, all etcd clients only connect to their local member). However, it
-// is still an important safety test to perform, and it also exercies the
-// equivalent behaviour of an etcd client re-connecting for any other reason.
-func TestClientReconnect(t *testing.T) {
-	tc := newTestClient(t)
-	defer tc.close()
-	tc.setEndpoints(0)
-
-	k := "test-client-reconnect"
-	value := NewValue(tc.client, k, DecoderStringAt)
-	tc.put(t, k, "one")
-
-	watcher := value.Watch()
-	defer watcher.Close()
-	expect(t, watcher, "one")
-
-	q, cancel := wait(t, watcher)
-	defer cancel()
-
-	cluster.Members[0].Stop(t)
-	defer cluster.Members[0].Restart(t)
-	cluster.WaitLeader(t)
-
-	tc.setEndpoints(1, 2)
-	tc.put(t, k, "two")
-
-	if want, got := "two", <-q; want != got {
-		t.Fatalf("Watcher received incorrect data after client restart, wanted %q, got %q", want, got)
-	}
-}
-
-// TestClientPartition forces a temporary partition of the etcd member while a
-// watcher is running, updates the value from across the partition, and undoes
-// the partition.
-// The partition is expected to be entirely transparent to the watcher.
-func TestClientPartition(t *testing.T) {
-	tcOne := newTestClient(t)
-	defer tcOne.close()
-	tcOne.setEndpoints(0)
-
-	tcRest := newTestClient(t)
-	defer tcRest.close()
-	tcRest.setEndpoints(1, 2)
-
-	k := "test-client-partition"
-	valueOne := NewValue(tcOne.client, k, DecoderStringAt)
-	watcherOne := valueOne.Watch()
-	defer watcherOne.Close()
-	valueRest := NewValue(tcRest.client, k, DecoderStringAt)
-	watcherRest := valueRest.Watch()
-	defer watcherRest.Close()
-
-	tcRest.put(t, k, "a")
-	expect(t, watcherOne, "a")
-	expect(t, watcherRest, "a")
-
-	cluster.Members[0].InjectPartition(t, cluster.Members[1], cluster.Members[2])
-
-	tcRest.put(t, k, "b")
-	expect(t, watcherRest, "b")
-	expectTimeout(t, watcherOne)
-
-	cluster.Members[0].RecoverPartition(t, cluster.Members[1], cluster.Members[2])
-
-	expect(t, watcherOne, "b")
-	tcRest.put(t, k, "c")
-	expect(t, watcherOne, "c")
-	expect(t, watcherRest, "c")
-
-}
-
-// TestEarlyUse exercises the correct behaviour of the value watcher on a value
-// that is not yet set.
-func TestEarlyUse(t *testing.T) {
-	tc := newTestClient(t)
-	defer tc.close()
-
-	k := "test-early-use"
-
-	value := NewValue(tc.client, k, DecoderStringAt)
-	watcher := value.Watch()
-	defer watcher.Close()
-
-	wg := setSetupWg(watcher)
-	wg.Add(1)
-	q, cancel := wait(t, watcher)
-	defer cancel()
-
-	wg.Done()
-
-	tc.put(t, k, "one")
-
-	if want, got := "one", <-q; want != got {
-		t.Fatalf("Expected %q, got %q", want, got)
-	}
-}
-
-// TestRemove exercises the basic functionality of handling deleted values.
-func TestRemove(t *testing.T) {
-	tc := newTestClient(t)
-	defer tc.close()
-
-	k := "test-remove"
-	tc.put(t, k, "one")
-
-	value := NewValue(tc.client, k, DecoderStringAt)
-	watcher := value.Watch()
-	defer watcher.Close()
-
-	expect(t, watcher, "one")
-	tc.remove(t, k)
-	expect(t, watcher, "")
-}
-
-// TestRemoveRange exercises the behaviour of a Get on a ranged watcher when a
-// value is removed.
-func TestRemoveRange(t *testing.T) {
-	tc := newTestClient(t)
-	defer tc.close()
-
-	ks := "test-remove-range/"
-	ke := "test-remove-range0"
-	value := NewValue(tc.client, ks, DecoderStringAt, Range(ke))
-	tc.put(t, ks+"a", "one")
-	tc.put(t, ks+"b", "two")
-	tc.put(t, ks+"c", "three")
-	tc.put(t, ks+"b", "four")
-	tc.remove(t, ks+"c")
-
-	w := value.Watch()
-	defer w.Close()
-
-	ctx, ctxC := context.WithCancel(context.Background())
-	defer ctxC()
-
-	res := make(map[string]string)
-	stringAtGet(ctx, t, w, res)
-	stringAtGet(ctx, t, w, res)
-
-	for _, te := range []struct {
-		k, w string
-	}{
-		{ks + "a", "one"},
-		{ks + "b", "four"},
-		{ks + "c", ""},
-	} {
-		if want, got := te.w, res[te.k]; want != got {
-			t.Errorf("res[%q]: wanted %q, got %q", te.k, want, got)
-		}
-	}
-}
-
-// TestEmptyRace forces the watcher to retrieve an empty value from the K/V
-// store at first, and establishing the watch channel after a new value has
-// been stored in the same place.
-func TestEmptyRace(t *testing.T) {
-	tc := newTestClient(t)
-	defer tc.close()
-
-	k := "test-remove-race"
-	tc.put(t, k, "one")
-	tc.remove(t, k)
-
-	value := NewValue(tc.client, k, DecoderStringAt)
-	watcher := value.Watch()
-	defer watcher.Close()
-
-	wg := setRaceWg(watcher)
-	wg.Add(1)
-	q, cancel := wait(t, watcher)
-	defer cancel()
-
-	tc.put(t, k, "two")
-	wg.Done()
-
-	if want, got := "two", <-q; want != got {
-		t.Fatalf("Watcher received incorrect data after client restart, wanted %q, got %q", want, got)
-	}
-}
-
-type errOrInt struct {
-	val int64
-	err error
-}
-
-// TestDecoder exercises the BytesDecoder functionality of the watcher, by
-// creating a value with a decoder that only accepts string-encoded integers
-// that are divisible by three. The test then proceeds to put a handful of
-// values into etcd, ensuring that undecodable values correctly return an error
-// on Get, but that the watcher continues to work after the error has been
-// returned.
-func TestDecoder(t *testing.T) {
-	decoderDivisibleByThree := func(_, value []byte) (int64, error) {
-		num, err := strconv.ParseInt(string(value), 10, 64)
-		if err != nil {
-			return 0, fmt.Errorf("not a valid number")
-		}
-		if (num % 3) != 0 {
-			return 0, fmt.Errorf("not divisible by 3")
-		}
-		return num, nil
-	}
-
-	tc := newTestClient(t)
-	defer tc.close()
-
-	ctx, ctxC := context.WithCancel(context.Background())
-	defer ctxC()
-
-	k := "test-decoder"
-	value := NewValue(tc.client, k, decoderDivisibleByThree)
-	watcher := value.Watch()
-	defer watcher.Close()
-	tc.put(t, k, "3")
-	_, err := watcher.Get(ctx)
-	if err != nil {
-		t.Fatalf("Initial Get: %v", err)
-	}
-
-	// Stream updates into arbitrarily-bounded test channel.
-	queue := make(chan errOrInt, 100)
-	go func() {
-		for {
-			res, err := watcher.Get(ctx)
-			if err != nil && errors.Is(err, ctx.Err()) {
-				return
-			}
-			if err != nil {
-				queue <- errOrInt{
-					err: err,
-				}
-			} else {
-				queue <- errOrInt{
-					val: res,
-				}
-			}
-		}
-	}()
-
-	var wantList []*int64
-	wantError := func(val string) {
-		wantList = append(wantList, nil)
-		tc.put(t, k, val)
-	}
-	wantValue := func(val string, decoded int64) {
-		wantList = append(wantList, &decoded)
-		tc.put(t, k, val)
-	}
-
-	wantError("")
-	wantValue("9", 9)
-	wantError("foo")
-	wantValue("18", 18)
-	wantError("10")
-	wantValue("27", 27)
-	wantValue("36", 36)
-
-	for i, want := range wantList {
-		q := <-queue
-		if want == nil && q.err == nil {
-			t.Fatalf("Case %d: wanted error, got no error and value %d", i, q.val)
-		}
-		if want != nil && (*want) != q.val {
-			t.Fatalf("Case %d: wanted value %d, got error %v and value %d", i, *want, q.err, q.val)
-		}
-	}
-}
-
-// TestBacklog ensures that the watcher can handle a large backlog of changes
-// in etcd that the client didnt' keep up with, and that whatever final state
-// is available to the client when it actually gets to calling Get().
-func TestBacklog(t *testing.T) {
-	tc := newTestClient(t)
-	defer tc.close()
-
-	k := "test-backlog"
-	value := NewValue(tc.client, k, DecoderStringAt)
-	watcher := value.Watch()
-	defer watcher.Close()
-
-	tc.put(t, k, "initial")
-	expect(t, watcher, "initial")
-
-	for i := 0; i < 1000; i++ {
-		tc.put(t, k, fmt.Sprintf("val-%d", i))
-	}
-
-	ctx, ctxC := context.WithTimeout(context.Background(), time.Second)
-	defer ctxC()
-	for {
-		valB, err := watcher.Get(ctx)
-		if err != nil {
-			t.Fatalf("Get() returned error before expected final value: %v", err)
-		}
-		if valB.Value == "val-999" {
-			break
-		}
-	}
-}
-
-// TestBacklogRange ensures that the ranged etcd watcher can handle a large
-// backlog of changes in etcd that the client didn't keep up with.
-func TestBacklogRange(t *testing.T) {
-	tc := newTestClient(t)
-	defer tc.close()
-
-	ks := "test-backlog-range/"
-	ke := "test-backlog-range0"
-	value := NewValue(tc.client, ks, DecoderStringAt, Range(ke))
-	w := value.Watch()
-	defer w.Close()
-
-	for i := 0; i < 100; i++ {
-		if i%2 == 0 {
-			tc.put(t, ks+"a", fmt.Sprintf("val-%d", i))
-		} else {
-			tc.put(t, ks+"b", fmt.Sprintf("val-%d", i))
-		}
-	}
-
-	ctx, ctxC := context.WithCancel(context.Background())
-	defer ctxC()
-
-	res := make(map[string]string)
-	stringAtGet(ctx, t, w, res)
-	stringAtGet(ctx, t, w, res)
-
-	for _, te := range []struct {
-		k, w string
-	}{
-		{ks + "a", "val-98"},
-		{ks + "b", "val-99"},
-	} {
-		if want, got := te.w, res[te.k]; want != got {
-			t.Errorf("res[%q]: wanted %q, got %q", te.k, want, got)
-		}
-	}
-}
-
-// TestBacklogOnly exercises the BacklogOnly option for non-ranged watchers,
-// which effectively makes any Get operation non-blocking (but also showcases
-// that unless a Get without BacklogOnly is issues, no new data will appear by
-// itself in the watcher - which is an undocumented implementation detail of the
-// option).
-func TestBacklogOnly(t *testing.T) {
-	tc := newTestClient(t)
-	defer tc.close()
-	ctx, ctxC := context.WithCancel(context.Background())
-	defer ctxC()
-
-	k := "test-backlog-only"
-	tc.put(t, k, "initial")
-
-	value := NewValue(tc.client, k, DecoderStringAt)
-	watcher := value.Watch()
-	defer watcher.Close()
-
-	d, err := watcher.Get(ctx, event.BacklogOnly[StringAt]())
-	if err != nil {
-		t.Fatalf("First Get failed: %v", err)
-	}
-	if want, got := "initial", d.Value; want != got {
-		t.Fatalf("First Get: wanted value %q, got %q", want, got)
-	}
-
-	// As expected, next call to Get with BacklogOnly fails - there truly is no new
-	// updates to emit.
-	_, err = watcher.Get(ctx, event.BacklogOnly[StringAt]())
-	if want, got := event.ErrBacklogDone, err; !errors.Is(got, want) {
-		t.Fatalf("Second Get: wanted %v, got %v", want, got)
-	}
-
-	// Implementation detail: even though there is a new value ('second'),
-	// BacklogOnly will still return ErrBacklogDone.
-	tc.put(t, k, "second")
-	_, err = watcher.Get(ctx, event.BacklogOnly[StringAt]())
-	if want, got := event.ErrBacklogDone, err; !errors.Is(got, want) {
-		t.Fatalf("Third Get: wanted %v, got %v", want, got)
-	}
-
-	// ... However, a Get  without BacklogOnly will return the new value.
-	d, err = watcher.Get(ctx)
-	if err != nil {
-		t.Fatalf("Fourth Get failed: %v", err)
-	}
-	if want, got := "second", d.Value; want != got {
-		t.Fatalf("Fourth Get: wanted value %q, got %q", want, got)
-	}
-}
-
-// TestBacklogOnlyRange exercises the BacklogOnly option for ranged watchers,
-// showcasing how it expected to be used for keeping up with the external state
-// of a range by synchronizing to a local map.
-func TestBacklogOnlyRange(t *testing.T) {
-	tc := newTestClient(t)
-	defer tc.close()
-	ctx, ctxC := context.WithCancel(context.Background())
-	defer ctxC()
-
-	ks := "test-backlog-only-range/"
-	ke := "test-backlog-only-range0"
-
-	for i := 0; i < 100; i++ {
-		if i%2 == 0 {
-			tc.put(t, ks+"a", fmt.Sprintf("val-%d", i))
-		} else {
-			tc.put(t, ks+"b", fmt.Sprintf("val-%d", i))
-		}
-	}
-
-	value := NewValue(tc.client, ks, DecoderStringAt, Range(ke))
-	w := value.Watch()
-	defer w.Close()
-
-	// Collect results into a map from key to value.
-	res := make(map[string]string)
-
-	// Run first Get - this is the barrier defining what's part of the backlog.
-	g, err := w.Get(ctx, event.BacklogOnly[StringAt]())
-	if err != nil {
-		t.Fatalf("Get: %v", err)
-	}
-	res[g.Key] = g.Value
-
-	// These won't be part of the backlog.
-	tc.put(t, ks+"a", "val-100")
-	tc.put(t, ks+"b", "val-101")
-
-	// Retrieve the rest of the backlog until ErrBacklogDone is returned.
-	nUpdates := 1
-	for {
-		g, err := w.Get(ctx, event.BacklogOnly[StringAt]())
-		if errors.Is(err, event.ErrBacklogDone) {
-			break
-		}
-		if err != nil {
-			t.Fatalf("Get: %v", err)
-		}
-		nUpdates += 1
-		res[g.Key] = g.Value
-	}
-
-	// The backlog should've been compacted to just two entries at their newest
-	// state.
-	if want, got := 2, nUpdates; want != got {
-		t.Fatalf("wanted backlog in %d updates, got it in %d", want, got)
-	}
-
-	for _, te := range []struct {
-		k, w string
-	}{
-		{ks + "a", "val-98"},
-		{ks + "b", "val-99"},
-	} {
-		if want, got := te.w, res[te.k]; want != got {
-			t.Errorf("res[%q]: wanted %q, got %q", te.k, want, got)
-		}
-	}
-}
diff --git a/metropolis/pkg/event/event.go b/metropolis/pkg/event/event.go
deleted file mode 100644
index a8b3526..0000000
--- a/metropolis/pkg/event/event.go
+++ /dev/null
@@ -1,237 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package event defines and implements Event Values, a mechanism in which
-// multiple consumers can watch a value for updates in a reliable way.
-//
-// Values currently are kept in memory (see: MemoryValue), but a future
-// implementation might exist for other storage backends, eg. etcd.
-//
-// # Background and intended use
-//
-// The Event Value library is intended to be used within Metropolis'
-// supervisor-based runnables to communicate state changes to other runnables,
-// while permitting both sides to restart if needed. It grew out of multiple
-// codebases reimplementing an ad-hoc observer pattern, and from the
-// realization that implementing all possible edge cases of such patterns is
-// non-trivial and subject to programming errors. As such, it was turned into a
-// self-standing library.
-//
-// Why not just channels?
-//
-// Plain channels have multiple deficiencies for this usecase:
-//   - Strict FIFO behaviour: all values sent to a channel must be received, and
-//     historic and newest data must be treated in the same way. This means that
-//     a consumer of state changes must process all updates to the value as if
-//     they are the newest, and unable to skip rapid updates when a system is
-//     slowly settling due to a cascading state change.
-//   - Implementation overhead: implementing an observer
-//     registration/unregistration pattern is prone to programming bugs,
-//     especially for features like always first sending the current state to a
-//     new observer.
-//   - Strict buffer size: due to their FIFO nature and the possibility of
-//     consumers not receiving actively, channels would have to buffer all
-//     existing updates, requiring some arbitrary best-guess channel buffer
-//     sizing that would still not prevent blocking writes or data loss in a
-//     worst case scenario.
-//
-// Or, in other words: Go channels are a synchronization primitive, not a
-// ready-made solution to this problem. The Event Value implementation in fact
-// extensively uses Go channels within its implementation as a building block.
-//
-// Why not just condition variables (sync.Cond)?
-//
-// Go's condition variable implementation doesn't fully address our needs
-// either:
-//   - No context/canceling support: once a condition is being Wait()ed on,
-//     this cannot be interrupted. This is especially painful and unwieldy when
-//     dealing with context-heavy code, such as Metropolis.
-//   - Spartan API: expecting users to plainly use sync.Cond is risky, as the API
-//     is fairly low-level.
-//   - No solution for late consumers: late consumers (ones that missed the value
-//     being set by a producer) would still have to implement logic in order to
-//     find out such a value, as sync.Cond only supports what amounts to
-//     edge-level triggers as part of its Broadcast/Signal system.
-//
-// It would be possible to implement MemoryValue using a sync.Cond internally,
-// but such an implementation would likely be more complex than the current
-// implementation based on channels and mutexes, as it would have to work
-// around issues like lack of canceling, etc.
-package event
-
-import (
-	"context"
-	"errors"
-
-	"source.monogon.dev/metropolis/pkg/supervisor"
-)
-
-// A Value is an 'Event Value', some piece of data that can be updated ('Set')
-// by Producers and retrieved by Consumers.
-type Value[T any] interface {
-	// Set updates the Value to the given data. It is safe to call this from
-	// multiple goroutines, including concurrently.
-	//
-	// Any time Set is called, any consumers performing a Watch on this Value
-	// will be notified with the new data - even if the Set data is the same as
-	// the one that was already stored.
-	//
-	// A Value will initially have no data set. This 'no data' state is seen by
-	// consumers by the first .Get() call on the Watcher blocking until data is Set.
-	//
-	// All updates will be serialized in an arbitrary order - if multiple
-	// producers wish to perform concurrent actions to update the Value partially,
-	// this should be negotiated and serialized externally by the producers.
-	Set(val T)
-
-	// ValueWatch implements the Watch method. It is split out into another
-	// interface to allow some 'Event Values' to implement only the watch/read
-	// part, with the write side being implicit or defined by a more complex
-	// interface than a simple Set().
-	ValueWatch[T]
-}
-
-// ValueWatch is the read side of an 'Event Value', witch can by retrieved by
-// Consumers by performing a Watch operation on it.
-type ValueWatch[T any] interface {
-	// Watch retrieves a Watcher that keeps track on the version of the data
-	// contained within the Value that was last seen by a consumer. Once a
-	// Watcher is retrieved, it can be used to then get the actual data stored
-	// within the Value, and to reliably retrieve updates to it without having
-	// to poll for changes.
-	Watch() Watcher[T]
-}
-
-// A Watcher keeps track of the last version of data seen by a consumer for a
-// given Value. Each consumer should use an own Watcher instance, and it is not
-// safe to use this type concurrently. However, it is safe to move/copy it
-// across different goroutines, as long as no two goroutines access it
-// simultaneously.
-type Watcher[T any] interface {
-	// Get blocks until a Value's data is available:
-	//  - On first use of a Watcher, Get will return the data contained in the
-	//    value at the time of calling .Watch(), or block if no data has been
-	//    .Set() on it yet. If a value has been Set() since the initial
-	//    creation of the Watch() but before Get() is called for the first
-	//    time, the first Get() call will immediately return the new value.
-	//  - On subsequent uses of a Watcher, Get will block until the given Value
-	//    has been Set with new data. This does not necessarily mean that the
-	//    new data is different - consumers should always perform their own
-	//    checks on whether the update is relevant to them (ie., the data has
-	//    changed in a significant way), unless specified otherwise by a Value
-	//    publisher.
-	//
-	// Get() will always return the current newest data that has been Set() on
-	// the Value, and not a full log of historical events. This is geared
-	// towards event values where consumers only care about changes to data
-	// since last retrieval, not every value that has been Set along the way.
-	// Thus, consumers need not make sure that they actively .Get() on a
-	// watcher all the times.
-	//
-	// If the context is canceled before data is available to be returned, the
-	// context's error will be returned. However, the Watcher will still need to be
-	// Closed, as it is still fully functional after the context has been canceled.
-	//
-	// Concurrent requests to Get result in an error. The reasoning to return
-	// an error instead of attempting to serialize the requests is that any
-	// concurrent access from multiple goroutines would cause a desync in the
-	// next usage of the Watcher. For example:
-	//   1) w.Get() (in G0) and w.Get(G1) start. They both block waiting for an
-	//      initial value.
-	//   2) v.Set(0)
-	//   3) w.Get() in G0 returns 0,
-	//   4) v.Set(1)
-	//   4) w.Get() in G1 returns 1,
-	// This would cause G0 and G1 to become desynchronized between eachother
-	// (both have different value data) and subsequent updates will also
-	// continue skipping some updates.
-	// If multiple goroutines need to access the Value, they should each use
-	// their own Watcher.
-	Get(context.Context, ...GetOption[T]) (T, error)
-
-	// Close must be called if the Watcher is not going to be used anymore -
-	// otherwise, it will not be garbage collected.
-	Close() error
-}
-
-type GetOption[T any] struct {
-	Predicate   func(t T) bool
-	BacklogOnly bool
-}
-
-func Filter[T any](pred func(T) bool) GetOption[T] {
-	return GetOption[T]{
-		Predicate: pred,
-	}
-}
-
-// BacklogOnly will prevent Get from blocking on waiting for more updates from
-// etcd, by instead returning ErrBacklogDone whenever no more data is currently
-// locally available. This is different however, from establishing that there
-// are no more pending updates from the etcd cluster - the only way to ensure
-// the local client is up to date is by performing Get calls without this option
-// set.
-//
-// This mode of retrieval should only be used for the retrieval of the existing
-// data in the etcd cluster on the initial creation of the Watcher (by
-// repeatedly calling Get until ErrBacklogDone is returned), and shouldn't be set
-// for any subsequent call. Any use of this option after that initial fetch is
-// undefined behaviour that exposes the internals of the Get implementation, and
-// must not be relied on. However, in the future, this behaviour might be
-// formalized.
-//
-// This mode is particularly useful for ranged watchers. Non-ranged watchers can
-// still use this option to distinguish between blocking because of the
-// nonexistence of an object vs. blocking because of networking issues. However,
-// non-ranged retrieval semantics generally will rarely need to make this
-// distinction.
-func BacklogOnly[T any]() GetOption[T] {
-	return GetOption[T]{BacklogOnly: true}
-}
-
-var (
-	// ErrBacklogDone is returned by Get when BacklogOnly is set and there is no more
-	// event data stored in the Watcher client, ie. when the initial cluster state
-	// of the requested key has been retrieved.
-	ErrBacklogDone = errors.New("no more backlogged data")
-)
-
-// Pipe a Value's initial state and subsequent updates to an already existing
-// channel in a supervisor.Runnable. This is mostly useful when wanting to select
-// {} on many Values.
-//
-// The given channel will NOT be closed when the runnable exits. The process
-// receiving from the channel should be running in a group with the pipe
-// runnable, so that both restart if either does. This ensures that there is always
-// at least one value in the channel when the receiver starts.
-func Pipe[T any](value Value[T], c chan<- T, opts ...GetOption[T]) supervisor.Runnable {
-	return func(ctx context.Context) error {
-		supervisor.Signal(ctx, supervisor.SignalHealthy)
-		w := value.Watch()
-		defer w.Close()
-		for {
-			v, err := w.Get(ctx, opts...)
-			if err != nil {
-				return err
-			}
-			select {
-			case c <- v:
-			case <-ctx.Done():
-				return ctx.Err()
-			}
-		}
-	}
-}
diff --git a/metropolis/pkg/event/memory/BUILD.bazel b/metropolis/pkg/event/memory/BUILD.bazel
deleted file mode 100644
index da07dc3..0000000
--- a/metropolis/pkg/event/memory/BUILD.bazel
+++ /dev/null
@@ -1,19 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
-
-go_library(
-    name = "memory",
-    srcs = ["memory.go"],
-    importpath = "source.monogon.dev/metropolis/pkg/event/memory",
-    visibility = ["//visibility:public"],
-    deps = ["//metropolis/pkg/event"],
-)
-
-go_test(
-    name = "memory_test",
-    srcs = [
-        "example_test.go",
-        "memory_test.go",
-    ],
-    embed = [":memory"],
-    deps = ["//metropolis/pkg/event"],
-)
diff --git a/metropolis/pkg/event/memory/example_test.go b/metropolis/pkg/event/memory/example_test.go
deleted file mode 100644
index 1ae12c6..0000000
--- a/metropolis/pkg/event/memory/example_test.go
+++ /dev/null
@@ -1,114 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package memory
-
-import (
-	"context"
-	"fmt"
-	"net"
-	"time"
-)
-
-// NetworkStatus is example data that will be stored in a Value.
-type NetworkStatus struct {
-	ExternalAddress net.IP
-	DefaultGateway  net.IP
-}
-
-// NetworkService is a fake/example network service that is responsible for
-// communicating the newest information about a machine's network configuration
-// to consumers/watchers.
-type NetworkService struct {
-	Provider Value[NetworkStatus]
-}
-
-// Run pretends to execute the network service's main logic loop, in which it
-// pretends to have received an IP address over DHCP, and communicates that to
-// consumers/watchers.
-func (s *NetworkService) Run(ctx context.Context) {
-	s.Provider.Set(NetworkStatus{
-		ExternalAddress: nil,
-		DefaultGateway:  nil,
-	})
-
-	select {
-	case <-time.After(100 * time.Millisecond):
-	case <-ctx.Done():
-		return
-	}
-
-	fmt.Printf("NS: Got DHCP Lease\n")
-	s.Provider.Set(NetworkStatus{
-		ExternalAddress: net.ParseIP("203.0.113.24"),
-		DefaultGateway:  net.ParseIP("203.0.113.1"),
-	})
-
-	select {
-	case <-time.After(100 * time.Millisecond):
-	case <-ctx.Done():
-		return
-	}
-
-	fmt.Printf("NS: DHCP Address changed\n")
-	s.Provider.Set(NetworkStatus{
-		ExternalAddress: net.ParseIP("203.0.113.103"),
-		DefaultGateway:  net.ParseIP("203.0.113.1"),
-	})
-
-	time.Sleep(100 * time.Millisecond)
-}
-
-// ExampleValue_full demonstrates a typical usecase for Event Values, in which
-// a mock network service lets watchers know that the machine on which the code
-// is running has received a new network configuration.
-// It also shows the typical boilerplate required in order to wrap a Value (eg.
-// MemoryValue) within a typesafe wrapper.
-func ExampleValue_full() {
-	ctx, ctxC := context.WithCancel(context.Background())
-	defer ctxC()
-
-	// Create a fake NetworkService.
-	var ns NetworkService
-
-	// Run an /etc/hosts updater. It will watch for updates from the NetworkService
-	// about the current IP address of the node.
-	go func() {
-		w := ns.Provider.Watch()
-		for {
-			status, err := w.Get(ctx)
-			if err != nil {
-				break
-			}
-			if status.ExternalAddress == nil {
-				continue
-			}
-			// Pretend to write /etc/hosts with the newest ExternalAddress.
-			// In production code, you would also check for whether ExternalAddress has
-			// changed from the last written value, if writing to /etc/hosts is expensive.
-			fmt.Printf("/etc/hosts: foo.example.com is now %s\n", status.ExternalAddress.String())
-		}
-	}()
-
-	// Run fake network service.
-	ns.Run(ctx)
-
-	// Output:
-	// NS: Got DHCP Lease
-	// /etc/hosts: foo.example.com is now 203.0.113.24
-	// NS: DHCP Address changed
-	// /etc/hosts: foo.example.com is now 203.0.113.103
-}
diff --git a/metropolis/pkg/event/memory/memory.go b/metropolis/pkg/event/memory/memory.go
deleted file mode 100644
index 7402d64..0000000
--- a/metropolis/pkg/event/memory/memory.go
+++ /dev/null
@@ -1,233 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package memory
-
-import (
-	"context"
-	"errors"
-	"fmt"
-	"sync"
-
-	"source.monogon.dev/metropolis/pkg/event"
-)
-
-var (
-	// Type assert that *Value implements Value. We do this artificially, as
-	// there currently is no code path that needs this to be strictly true. However,
-	// users of this library might want to rely on the Value type instead of
-	// particular Value implementations.
-	_ event.Value[int] = &Value[int]{}
-)
-
-// Value is a 'memory value', which implements a event.Value stored in memory.
-// It is safe to construct an empty object of this type. However, this must not
-// be copied.
-type Value[T any] struct {
-	// mu guards the inner, innerSet and watchers fields.
-	mu sync.RWMutex
-	// inner is the latest data Set on the Value. It is used to provide the
-	// newest version of the Set data to new watchers.
-	inner T
-	// innerSet is true when inner has been Set at least once. It is used to
-	// differentiate between a nil and unset value.
-	innerSet bool
-	// watchers is the list of watchers that should be updated when new data is
-	// Set. It will grow on every .Watch() and shrink any time a watcher is
-	// determined to have been closed.
-	watchers []*watcher[T]
-
-	// Sync, if set to true, blocks all .Set() calls on the Value until all
-	// Watchers derived from it actively .Get() the new value. This can be used
-	// to ensure Watchers always receive a full log of all Set() calls.
-	//
-	// This must not be changed after the first .Set/.Watch call.
-	//
-	// This is an experimental API and subject to change. It might be migrated
-	// to per-Watcher settings defined within the main event.Value/Watcher
-	// interfaces.
-	Sync bool
-}
-
-// Set updates the Value to the given data. It is safe to call this from
-// multiple goroutines, including concurrently.
-//
-// For more information about guarantees, see event.Value.Set.
-func (m *Value[T]) Set(val T) {
-	m.mu.Lock()
-	defer m.mu.Unlock()
-
-	// Update the data that is provided on first Get() to watchers.
-	m.inner = val
-	m.innerSet = true
-
-	// Go through all watchers, updating them on the new value and filtering out
-	// all closed watchers.
-	newWatchers := m.watchers[:0]
-	for _, w := range m.watchers {
-		if w.closed() {
-			continue
-		}
-		w.update(m.Sync, val)
-		newWatchers = append(newWatchers, w)
-	}
-	if cap(newWatchers) > len(newWatchers)*3 {
-		reallocated := make([]*watcher[T], 0, len(newWatchers)*2)
-		newWatchers = append(reallocated, newWatchers...)
-	}
-	m.watchers = newWatchers
-}
-
-// watcher implements the event.Watcher interface for watchers returned by
-// Value.
-type watcher[T any] struct {
-	// bufferedC is a buffered channel of size 1 for submitting values to the
-	// watcher.
-	bufferedC chan T
-	// unbufferedC is an unbuffered channel, which is used when Sync is enabled.
-	unbufferedC chan T
-
-	// getSem is a channel-based semaphore (which is of size 1, and thus in
-	// fact a mutex) that is used to ensure that only a single .Get() call is
-	// active. It is implemented as a channel to permit concurrent .Get() calls
-	// to error out instead of blocking.
-	getSem chan struct{}
-	// close is a channel that is closed when this watcher is itself Closed.
-	close chan struct{}
-}
-
-// Watch retrieves a Watcher that keeps track on the version of the data
-// contained within the Value that was last seen by a consumer.
-//
-// For more information about guarantees, see event.Value.Watch.
-func (m *Value[T]) Watch() event.Watcher[T] {
-	waiter := &watcher[T]{
-		bufferedC:   make(chan T, 1),
-		unbufferedC: make(chan T),
-		close:       make(chan struct{}),
-		getSem:      make(chan struct{}, 1),
-	}
-
-	m.mu.Lock()
-	// If the watchers slice is at capacity, drop closed watchers, and
-	// reallocate the slice at 2x length if it is not between 1.5x and 3x.
-	if len(m.watchers) == cap(m.watchers) {
-		newWatchers := m.watchers[:0]
-		for _, w := range m.watchers {
-			if !w.closed() {
-				newWatchers = append(newWatchers, w)
-			}
-		}
-		if cap(newWatchers)*2 < len(newWatchers)*3 || cap(newWatchers) > len(newWatchers)*3 {
-			reallocated := make([]*watcher[T], 0, len(newWatchers)*2)
-			newWatchers = append(reallocated, newWatchers...)
-		}
-		m.watchers = newWatchers
-	}
-	// Append this watcher to the Value.
-	m.watchers = append(m.watchers, waiter)
-	// If the Value already has some value set, put it in the buffered channel.
-	if m.innerSet {
-		waiter.bufferedC <- m.inner
-	}
-	m.mu.Unlock()
-
-	return waiter
-}
-
-// closed returns whether this watcher has been closed.
-func (m *watcher[T]) closed() bool {
-	select {
-	case _, ok := <-m.close:
-		if !ok {
-			return true
-		}
-	default:
-	}
-	return false
-}
-
-// update is the high level update-this-watcher function called by Value.
-func (m *watcher[T]) update(sync bool, val T) {
-	// If synchronous delivery was requested, block until a watcher .Gets it,
-	// or is closed.
-	if sync {
-		select {
-		case m.unbufferedC <- val:
-		case <-m.close:
-		}
-		return
-	}
-
-	// Otherwise, deliver asynchronously. If there is already a value in the
-	// buffered channel that was not retrieved, drop it.
-	select {
-	case <-m.bufferedC:
-	default:
-	}
-	// The channel is now empty, so sending to it cannot block.
-	m.bufferedC <- val
-}
-
-func (m *watcher[T]) Close() error {
-	close(m.close)
-	return nil
-}
-
-// Get blocks until a Value's data is available. See event.Watcher.Get for
-// guarantees and more information.
-func (m *watcher[T]) Get(ctx context.Context, opts ...event.GetOption[T]) (T, error) {
-	// Make sure we're the only active .Get call.
-	var empty T
-	select {
-	case m.getSem <- struct{}{}:
-	default:
-		return empty, fmt.Errorf("cannot Get() concurrently on a single waiter")
-	}
-	defer func() {
-		<-m.getSem
-	}()
-
-	var predicate func(t T) bool
-	for _, opt := range opts {
-		if opt.Predicate != nil {
-			predicate = opt.Predicate
-		}
-		if opt.BacklogOnly {
-			return empty, errors.New("BacklogOnly is not implemented for memory watchers")
-		}
-	}
-
-	for {
-		var val T
-		// For Sync values, ensure the initial value in the buffered
-		// channel is delivered first.
-		select {
-		case val = <-m.bufferedC:
-		default:
-			select {
-			case <-ctx.Done():
-				return empty, ctx.Err()
-			case val = <-m.bufferedC:
-			case val = <-m.unbufferedC:
-			}
-		}
-		if predicate != nil && !predicate(val) {
-			continue
-		}
-		return val, nil
-	}
-}
diff --git a/metropolis/pkg/event/memory/memory_test.go b/metropolis/pkg/event/memory/memory_test.go
deleted file mode 100644
index eec0a37..0000000
--- a/metropolis/pkg/event/memory/memory_test.go
+++ /dev/null
@@ -1,371 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package memory
-
-import (
-	"context"
-	"errors"
-	"fmt"
-	"sync"
-	"sync/atomic"
-	"testing"
-	"time"
-
-	"source.monogon.dev/metropolis/pkg/event"
-)
-
-// TestAsync exercises the high-level behaviour of a Value, in which a
-// watcher is able to catch up to the newest Set value.
-func TestAsync(t *testing.T) {
-	p := Value[int]{}
-	p.Set(0)
-
-	ctx := context.Background()
-
-	// The 0 from Set() should be available via .Get().
-	watcher := p.Watch()
-	val, err := watcher.Get(ctx)
-	if err != nil {
-		t.Fatalf("Get: %v", err)
-	}
-	if want, got := 0, val; want != got {
-		t.Fatalf("Value: got %d, wanted %d", got, want)
-	}
-
-	// Send a large amount of updates that the watcher does not actively .Get().
-	for i := 1; i <= 100; i++ {
-		p.Set(i)
-	}
-
-	// The watcher should still end up with the newest .Set() value on the next
-	// .Get() call.
-	val, err = watcher.Get(ctx)
-	if err != nil {
-		t.Fatalf("Get: %v", err)
-	}
-	if want, got := 100, val; want != got {
-		t.Fatalf("Value: got %d, wanted %d", got, want)
-	}
-}
-
-// TestSyncBlocks exercises the Value's 'Sync' field, which makes all
-// Set() calls block until all respective watchers .Get() the updated data.
-// This particular test ensures that .Set() calls to a Watcher result in a
-// prefect log of updates being transmitted to a watcher.
-func TestSync(t *testing.T) {
-	p := Value[int]{
-		Sync: true,
-	}
-	values := make(chan int, 100)
-	var wg sync.WaitGroup
-	wg.Add(1)
-	go func() {
-		ctx := context.Background()
-		watcher := p.Watch()
-		wg.Done()
-		for {
-			value, err := watcher.Get(ctx)
-			if err != nil {
-				panic(err)
-			}
-			values <- value
-		}
-	}()
-
-	p.Set(0)
-	wg.Wait()
-
-	want := []int{1, 2, 3, 4}
-	for _, w := range want {
-		p.Set(w)
-	}
-
-	timeout := time.After(time.Second)
-	for i, w := range append([]int{0}, want...) {
-		select {
-		case <-timeout:
-			t.Fatalf("timed out on value %d (%d)", i, w)
-		case val := <-values:
-			if w != val {
-				t.Errorf("value %d was %d, wanted %d", i, val, w)
-			}
-		}
-	}
-}
-
-// TestSyncBlocks exercises the Value's 'Sync' field, which makes all
-// Set() calls block until all respective watchers .Get() the updated data.
-// This particular test ensures that .Set() calls actually block when a watcher
-// is unattended.
-func TestSyncBlocks(t *testing.T) {
-	p := Value[int]{
-		Sync: true,
-	}
-	ctx := context.Background()
-
-	// Shouldn't block, as there's no declared watchers.
-	p.Set(0)
-
-	watcher := p.Watch()
-
-	// Should retrieve the zero, more requests will pend.
-	value, err := watcher.Get(ctx)
-	if err != nil {
-		t.Fatalf("Get: %v", err)
-	}
-	if want, got := 0, value; want != got {
-		t.Fatalf("Got initial value %d, wanted %d", got, want)
-	}
-
-	// .Set() Should block, as watcher is unattended.
-	//
-	// Whether something blocks in Go is untestable in a robust way (see: halting
-	// problem). We work around this this by introducing a 'stage' int64, which is
-	// put on the 'c' channel after the needs-to-block function returns. We then
-	// perform an action that should unblock this function right after updating
-	// 'stage' to a different value.
-	// Then, we observe what was put on the channel: If it's the initial value, it
-	// means the function didn't block when expected. Otherwise, it means the
-	// function unblocked when expected.
-	stage := int64(0)
-	c := make(chan int64, 1)
-	go func() {
-		p.Set(1)
-		c <- atomic.LoadInt64(&stage)
-	}()
-
-	// Getting should unblock the provider. Mark via 'stage' variable that
-	// unblocking now is expected.
-	atomic.StoreInt64(&stage, int64(1))
-	// Potential race: .Set() unblocks here due to some bug, before .Get() is
-	// called, and we record a false positive.
-	value, err = watcher.Get(ctx)
-	if err != nil {
-		t.Fatalf("Get: %v", err)
-	}
-
-	res := <-c
-	if res != int64(1) {
-		t.Fatalf("Set() returned before Get()")
-	}
-
-	if want, got := 1, value; want != got {
-		t.Fatalf("Wanted value %d, got %d", want, got)
-	}
-
-	// Closing the watcher and setting should not block anymore.
-	if err := watcher.Close(); err != nil {
-		t.Fatalf("Close: %v", err)
-	}
-	// Last step, if this blocks we will get a deadlock error and the test will panic.
-	p.Set(2)
-}
-
-// TestMultipleGets verifies that calling .Get() on a single watcher from two
-// goroutines is prevented by returning an error in exactly one of them.
-func TestMultipleGets(t *testing.T) {
-	p := Value[int]{}
-	ctx := context.Background()
-
-	w := p.Watch()
-
-	tryError := func(errs chan error) {
-		_, err := w.Get(ctx)
-		errs <- err
-	}
-	errs := make(chan error, 2)
-	go tryError(errs)
-	go tryError(errs)
-
-	for err := range errs {
-		if err == nil {
-			t.Fatalf("A Get call succeeded, while it should have blocked or returned an error")
-		} else {
-			// Found the error, test succeeded.
-			break
-		}
-	}
-}
-
-// TestConcurrency attempts to stress the Value/Watcher
-// implementation to design limits (a hundred simultaneous watchers), ensuring
-// that the watchers all settle to the final set value.
-func TestConcurrency(t *testing.T) {
-	ctx := context.Background()
-
-	p := Value[int]{}
-	p.Set(0)
-
-	// Number of watchers to create.
-	watcherN := 100
-	// Expected final value to be Set().
-	final := 100
-	// Result channel per watcher.
-	resC := make([]chan error, watcherN)
-
-	// Spawn watcherN watchers.
-	for i := 0; i < watcherN; i++ {
-		resC[i] = make(chan error, 1)
-		go func(id int) {
-			// done is a helper function that will put an error on the
-			// respective watcher's resC.
-			done := func(err error) {
-				resC[id] <- err
-				close(resC[id])
-			}
-
-			watcher := p.Watch()
-			// prev is used to ensure the values received are monotonic.
-			prev := -1
-			for {
-				val, err := watcher.Get(ctx)
-				if err != nil {
-					done(err)
-					return
-				}
-
-				// Ensure monotonicity of received data.
-				if val <= prev {
-					done(fmt.Errorf("received out of order data: %d after %d", val, prev))
-				}
-				prev = val
-
-				// Quit when the final value is received.
-				if val == final {
-					done(nil)
-					return
-				}
-
-				// Sleep a bit, depending on the watcher. This makes each
-				// watcher behave slightly differently, and attempts to
-				// exercise races dependent on sleep time between subsequent
-				// Get calls.
-				time.Sleep(time.Millisecond * time.Duration(id))
-			}
-		}(i)
-	}
-
-	// Set 1..final on the value.
-	for i := 1; i <= final; i++ {
-		p.Set(i)
-	}
-
-	// Ensure all watchers exit with no error.
-	for i, c := range resC {
-		err := <-c
-		if err != nil {
-			t.Errorf("Watcher %d returned %v", i, err)
-		}
-	}
-}
-
-// TestCanceling exercises whether a context canceling in a .Get() gracefully
-// aborts that particular Get call, but also allows subsequent use of the same
-// watcher.
-func TestCanceling(t *testing.T) {
-	p := Value[int]{
-		Sync: true,
-	}
-
-	ctx, ctxC := context.WithCancel(context.Background())
-
-	watcher := p.Watch()
-
-	// errs will contain the error returned by Get.
-	errs := make(chan error, 1)
-	go func() {
-		// This Get will block, as no initial data has been Set on the value.
-		_, err := watcher.Get(ctx)
-		errs <- err
-	}()
-
-	// Cancel the context, and expect that context error to propagate to the .Get().
-	ctxC()
-	if want, got := ctx.Err(), <-errs; !errors.Is(got, want) {
-		t.Fatalf("Get should've returned %v, got %v", want, got)
-	}
-
-	// Do another .Get() on the same watcher with a new context. Even though the
-	// call was aborted via a context cancel, the watcher should continue working.
-	ctx = context.Background()
-	go func() {
-		_, err := watcher.Get(ctx)
-		errs <- err
-	}()
-
-	// Unblock the .Get now.
-	p.Set(1)
-	if want, got := error(nil), <-errs; !errors.Is(got, want) {
-		t.Fatalf("Get should've returned %v, got %v", want, got)
-	}
-}
-
-// TestSetAfterWatch ensures that if a value is updated between a Watch and the
-// initial Get, only the newest Set value is returns.
-func TestSetAfterWatch(t *testing.T) {
-	ctx := context.Background()
-
-	p := Value[int]{}
-	p.Set(0)
-
-	watcher := p.Watch()
-	p.Set(1)
-
-	data, err := watcher.Get(ctx)
-	if err != nil {
-		t.Fatalf("Get: %v", err)
-	}
-	if want, got := 1, data; want != got {
-		t.Errorf("Get should've returned %v, got %v", want, got)
-	}
-}
-
-// TestWatchersList ensures that the list of watchers is managed correctly,
-// i.e. there is no memory leak and closed watchers are removed while
-// keeping all non-closed watchers.
-func TestWatchersList(t *testing.T) {
-	ctx := context.Background()
-	p := Value[int]{}
-
-	var watchers []event.Watcher[int]
-	for i := 0; i < 100; i++ {
-		watchers = append(watchers, p.Watch())
-	}
-	for i := 0; i < 10000; i++ {
-		watchers[10].Close()
-		watchers[10] = p.Watch()
-	}
-
-	if want, got := 1000, cap(p.watchers); want <= got {
-		t.Fatalf("Got capacity %d, wanted less than %d", got, want)
-	}
-
-	p.Set(1)
-	if want, got := 100, len(p.watchers); want != got {
-		t.Fatalf("Got %d watchers, wanted %d", got, want)
-	}
-
-	for _, watcher := range watchers {
-		data, err := watcher.Get(ctx)
-		if err != nil {
-			t.Fatalf("Get: %v", err)
-		}
-		if want, got := 1, data; want != got {
-			t.Errorf("Get should've returned %v, got %v", want, got)
-		}
-	}
-}
diff --git a/metropolis/pkg/fat32/BUILD.bazel b/metropolis/pkg/fat32/BUILD.bazel
deleted file mode 100644
index b39b40b..0000000
--- a/metropolis/pkg/fat32/BUILD.bazel
+++ /dev/null
@@ -1,37 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
-load("//metropolis/test/ktest:ktest.bzl", "ktest")
-
-go_library(
-    name = "fat32",
-    srcs = [
-        "dos83.go",
-        "fat32.go",
-        "structs.go",
-        "utils.go",
-    ],
-    importpath = "source.monogon.dev/metropolis/pkg/fat32",
-    visibility = ["//visibility:public"],
-)
-
-go_test(
-    name = "fat32_test",
-    srcs = [
-        "fsck_test.go",
-        "linux_test.go",
-        "structs_test.go",
-    ],
-    data = ["@com_github_dosfstools_dosfstools//:fsck"],
-    embed = [":fat32"],
-    deps = [
-        "@com_github_stretchr_testify//assert",
-        "@com_github_stretchr_testify//require",
-        "@io_bazel_rules_go//go/runfiles:go_default_library",
-        "@org_golang_x_mod//semver",
-        "@org_golang_x_sys//unix",
-    ],
-)
-
-ktest(
-    cmdline = "ramdisk_size=266240",
-    tester = ":fat32_test",
-)
diff --git a/metropolis/pkg/fat32/dos83.go b/metropolis/pkg/fat32/dos83.go
deleted file mode 100644
index 650df96..0000000
--- a/metropolis/pkg/fat32/dos83.go
+++ /dev/null
@@ -1,93 +0,0 @@
-package fat32
-
-import (
-	"errors"
-	"fmt"
-	"math"
-	"regexp"
-	"strings"
-)
-
-// By default, DOS names would be encoded as what Microsoft calls the OEM
-// code page. This is however dependant on the code page settings of the
-// OS reading the file name as it's not mentioned in FAT32 metadata.
-// To get maximum compatibility and make it easy to read in hex editors
-// this only encodes ASCII characters and not any specific code page.
-// This can still result in garbled data when using a non-latin code page,
-// but this is unavoidable.
-// This is legal as there is no specific requirements for generating these
-// DOS names and any semi-modern system should use the unicode filenames
-// anyways.
-
-var invalidDOSNameChar = regexp.MustCompile("^[^A-Z0-9!#$%&'()@^_\x60{}~-]$")
-
-// validDOSName matches names which are valid and unique DOS 8.3 file names as
-// well as valid ASCII
-var validDOSName = regexp.MustCompile(`^^([A-Z0-9!#$%&'()@^_\x60{}~-]{0,8})(\.[A-Z0-9!#$%&'()-@^_\x60{}~-]{1,3})?$`)
-
-func makeUniqueDOSNames(inodes []*Inode) error {
-	taken := make(map[[11]byte]bool)
-	var lossyNameInodes []*Inode
-	// Make two passes to ensure that names can always be passed through even
-	// if they would conflict with a generated name.
-	for _, i := range inodes {
-		for j := range i.dosName {
-			i.dosName[j] = ' '
-		}
-		nameUpper := strings.ToUpper(i.Name)
-		dosParts := validDOSName.FindStringSubmatch(nameUpper)
-		if dosParts != nil {
-			// Name is pass-through
-			copy(i.dosName[:8], dosParts[1])
-			if len(dosParts[2]) > 0 {
-				// Skip the dot, it is implicit
-				copy(i.dosName[8:], dosParts[2][1:])
-			}
-			if taken[i.dosName] {
-				// Mapping is unique, complain about the actual file name, not
-				// the 8.3 one
-				return fmt.Errorf("name %q occurs more than once in the same directory", i.Name)
-			}
-			taken[i.dosName] = true
-			continue
-		}
-		lossyNameInodes = append(lossyNameInodes, i)
-	}
-	// Willfully ignore the recommended short name generation algorithm as it
-	// requires tons of bookkeeping and doesn't result in stable names so
-	// cannot be relied on anyway.
-	// A FAT32 directory is limited to 2^16 entries (in practice less than half
-	// of that because of long file name entries), so 4 hex characters
-	// guarantee uniqueness, regardless of the rest of name.
-	var nameIdx int
-	for _, i := range lossyNameInodes {
-		nameUpper := strings.ToUpper(i.Name)
-		dotParts := strings.Split(nameUpper, ".")
-		for j := range dotParts {
-			// Remove all invalid chars
-			dotParts[j] = invalidDOSNameChar.ReplaceAllString(dotParts[j], "")
-		}
-		var fileName string
-		lastDotPart := dotParts[len(dotParts)-1]
-		if len(dotParts) > 1 && len(dotParts[0]) > 0 && len(lastDotPart) > 0 {
-			// We have a valid 8.3 extension
-			copy(i.dosName[8:], lastDotPart)
-			fileName = strings.Join(dotParts[:len(dotParts)-1], "")
-		} else {
-			fileName = strings.Join(dotParts[:], "")
-		}
-		copy(i.dosName[:4], fileName)
-
-		for {
-			copy(i.dosName[4:], fmt.Sprintf("%04X", nameIdx))
-			nameIdx++
-			if nameIdx >= math.MaxUint16 {
-				return errors.New("invariant violated: unable to find unique name with 16 bit counter in 16 bit space")
-			}
-			if !taken[i.dosName] {
-				break
-			}
-		}
-	}
-	return nil
-}
diff --git a/metropolis/pkg/fat32/fat32.go b/metropolis/pkg/fat32/fat32.go
deleted file mode 100644
index 7a45aa4..0000000
--- a/metropolis/pkg/fat32/fat32.go
+++ /dev/null
@@ -1,533 +0,0 @@
-// Package fat32 implements a writer for the FAT32 filesystem.
-package fat32
-
-import (
-	"crypto/rand"
-	"encoding/binary"
-	"errors"
-	"fmt"
-	"io"
-	"io/fs"
-	"math"
-	"math/bits"
-	"strings"
-	"time"
-	"unicode/utf16"
-)
-
-// This package contains multiple references to the FAT32 specification, called
-// Microsoft Extensible Firmware Initiative FAT32 File System Specification
-// version 1.03 (just called the spec from now on). You can get it at
-// https://download.microsoft.com/download/0/8/4/\
-// 084c452b-b772-4fe5-89bb-a0cbf082286a/fatgen103.doc
-
-type Options struct {
-	// Size of a logical block on the block device. Needs to be a power of two
-	// equal or bigger than 512. If left at zero, defaults to 512.
-	BlockSize uint16
-
-	// Number of blocks the filesystem should span. If zero, it will be exactly
-	// as large as it needs to be.
-	BlockCount uint32
-
-	// Human-readable filesystem label. Maximum 10 bytes (gets cut off), should
-	// be uppercase alphanumeric.
-	Label string
-
-	// Filesystem identifier. If unset (i.e. left at zero) a random value will
-	// be assigned by WriteFS.
-	ID uint32
-}
-
-// SizedReader is an io.Reader with a known size
-type SizedReader interface {
-	io.Reader
-	Size() int64
-}
-
-// Attribute is a bitset of flags set on an inode.
-// See also the spec page 24
-type Attribute uint8
-
-const (
-	// AttrReadOnly marks a file as read-only
-	AttrReadOnly Attribute = 0x01
-	// AttrHidden indicates that directory listings should not show this file.
-	AttrHidden Attribute = 0x02
-	// AttrSystem indicates that this is an operating system file.
-	AttrSystem Attribute = 0x04
-	// AttrDirectory indicates that this is a directory and not a file.
-	AttrDirectory Attribute = 0x10
-	// AttrArchive canonically indicates that a file has been created/modified
-	// since the last backup. Its use in practice is inconsistent.
-	AttrArchive Attribute = 0x20
-)
-
-// Inode is file or directory on the FAT32 filesystem. Note that the concept
-// of an inode doesn't really exist on FAT32, its directories are just special
-// files.
-type Inode struct {
-	// Name of the file or directory (not including its path)
-	Name string
-	// Time the file or directory was last modified
-	ModTime time.Time
-	// Time the file or directory was created
-	CreateTime time.Time
-	// Attributes
-	Attrs Attribute
-	// Children of this directory (only valid when Attrs has AttrDirectory set)
-	Children []*Inode
-	// Content of this file
-	// Only valid when Attrs doesn't have AttrDirectory set.
-	Content SizedReader
-
-	// Filled out on placement and write-out
-	startCluster int
-	parent       *Inode
-	dosName      [11]byte
-}
-
-// Number of LFN entries + normal entry (all 32 bytes)
-func (i Inode) metaSize() (int64, error) {
-	fileNameUTF16 := utf16.Encode([]rune(i.Name))
-	// VFAT file names are null-terminated
-	fileNameUTF16 = append(fileNameUTF16, 0x00)
-	if len(fileNameUTF16) > 255 {
-		return 0, errors.New("file name too long, maximum is 255 UTF-16 code points")
-	}
-
-	// ⌈len(fileNameUTF16)/codepointsPerEntry⌉
-	numEntries := (len(fileNameUTF16) + codepointsPerEntry - 1) / codepointsPerEntry
-	return (int64(numEntries) + 1) * 32, nil
-}
-
-func lfnChecksum(dosName [11]byte) uint8 {
-	var sum uint8
-	for _, b := range dosName {
-		sum = ((sum & 1) << 7) + (sum >> 1) + b
-	}
-	return sum
-}
-
-// writeMeta writes information about this inode into the contents of the parent
-// inode.
-func (i Inode) writeMeta(w io.Writer) error {
-	fileNameUTF16 := utf16.Encode([]rune(i.Name))
-	// VFAT file names are null-terminated
-	fileNameUTF16 = append(fileNameUTF16, 0x00)
-	if len(fileNameUTF16) > 255 {
-		return errors.New("file name too long, maximum is 255 UTF-16 code points")
-	}
-
-	// ⌈len(fileNameUTF16)/codepointsPerEntry⌉
-	numEntries := (len(fileNameUTF16) + codepointsPerEntry - 1) / codepointsPerEntry
-	// Fill up to space in given number of entries with fill code point 0xffff
-	fillCodePoints := (numEntries * codepointsPerEntry) - len(fileNameUTF16)
-	for j := 0; j < fillCodePoints; j++ {
-		fileNameUTF16 = append(fileNameUTF16, 0xffff)
-	}
-
-	// Write entries in reverse order
-	for j := numEntries; j > 0; j-- {
-		// Index of the code point being processed
-		cpIdx := (j - 1) * codepointsPerEntry
-		var entry lfnEntry
-		entry.Checksum = lfnChecksum(i.dosName)
-		// Downcast is safe as i <= numEntries <= ⌈255/codepointsPerEntry⌉
-		entry.SequenceNumber = uint8(j)
-		if j == numEntries {
-			entry.SequenceNumber |= lastSequenceNumberFlag
-		}
-		entry.Attributes = 0x0F
-		copy(entry.NamePart1[:], fileNameUTF16[cpIdx:])
-		cpIdx += len(entry.NamePart1)
-		copy(entry.NamePart2[:], fileNameUTF16[cpIdx:])
-		cpIdx += len(entry.NamePart2)
-		copy(entry.NamePart3[:], fileNameUTF16[cpIdx:])
-		cpIdx += len(entry.NamePart3)
-
-		if err := binary.Write(w, binary.LittleEndian, entry); err != nil {
-			return err
-		}
-	}
-	selfSize, err := i.dataSize()
-	if err != nil {
-		return err
-	}
-	if selfSize >= 4*1024*1024*1024 {
-		return errors.New("single file size exceeds 4GiB which is prohibited in FAT32")
-	}
-	if i.Attrs&AttrDirectory != 0 {
-		selfSize = 0 // Directories don't have an explicit size
-	}
-	date, t, _ := timeToMsDosTime(i.ModTime)
-	if err := binary.Write(w, binary.LittleEndian, &dirEntry{
-		DOSName:           i.dosName,
-		Attributes:        uint8(i.Attrs),
-		FirstClusterHigh:  uint16(i.startCluster >> 16),
-		LastWrittenToTime: t,
-		LastWrittenToDate: date,
-		FirstClusterLow:   uint16(i.startCluster & 0xffff),
-		FileSize:          uint32(selfSize),
-	}); err != nil {
-		return err
-	}
-	return nil
-}
-
-// writeData writes the contents of this inode (including possible metadata
-// of its children, but not its children's data)
-func (i Inode) writeData(w io.Writer, volumeLabel [11]byte) error {
-	if i.Attrs&AttrDirectory != 0 {
-		if i.parent == nil {
-			if err := binary.Write(w, binary.LittleEndian, &dirEntry{
-				DOSName:    volumeLabel,
-				Attributes: 0x08, // Volume ID, internal use only
-			}); err != nil {
-				return err
-			}
-		} else {
-			date, t, _ := timeToMsDosTime(i.ModTime)
-			cdate, ctime, ctens := timeToMsDosTime(i.CreateTime)
-			if err := binary.Write(w, binary.LittleEndian, &dirEntry{
-				DOSName:           [11]byte{'.', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '},
-				CreationDate:      cdate,
-				CreationTime:      ctime,
-				CreationTenMilli:  ctens,
-				LastWrittenToTime: t,
-				LastWrittenToDate: date,
-				Attributes:        uint8(i.Attrs),
-				FirstClusterHigh:  uint16(i.startCluster >> 16),
-				FirstClusterLow:   uint16(i.startCluster & 0xffff),
-			}); err != nil {
-				return err
-			}
-			startCluster := i.parent.startCluster
-			if i.parent.parent == nil {
-				// Special case: When the dotdot directory points to the root
-				// directory, the start cluster is defined to be zero even if
-				// it isn't.
-				startCluster = 0
-			}
-			// Time is intentionally taken from this directory, not the parent
-			if err := binary.Write(w, binary.LittleEndian, &dirEntry{
-				DOSName:           [11]byte{'.', '.', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '},
-				LastWrittenToTime: t,
-				LastWrittenToDate: date,
-				Attributes:        uint8(AttrDirectory),
-				FirstClusterHigh:  uint16(startCluster >> 16),
-				FirstClusterLow:   uint16(startCluster & 0xffff),
-			}); err != nil {
-				return err
-			}
-		}
-		err := makeUniqueDOSNames(i.Children)
-		if err != nil {
-			return err
-		}
-		for _, c := range i.Children {
-			if err := c.writeMeta(w); err != nil {
-				return err
-			}
-		}
-	} else {
-		if _, err := io.CopyN(w, i.Content, i.Content.Size()); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-func (i Inode) dataSize() (int64, error) {
-	if i.Attrs&AttrDirectory != 0 {
-		var size int64
-		if i.parent != nil {
-			// Dot and dotdot directories
-			size += 2 * 32
-		} else {
-			// Volume ID
-			size += 1 * 32
-		}
-		for _, c := range i.Children {
-			cs, err := c.metaSize()
-			if err != nil {
-				return 0, err
-			}
-			size += cs
-		}
-		if size > 2*1024*1024 {
-			return 0, errors.New("directory contains > 2MiB of metadata which is prohibited in FAT32")
-		}
-		return size, nil
-	} else {
-		return i.Content.Size(), nil
-	}
-}
-
-func (i *Inode) PlaceFile(path string, reader SizedReader) error {
-	pathParts := strings.Split(path, "/")
-	inodeRef := i
-	for j, part := range pathParts {
-		var childExists bool
-		for _, child := range inodeRef.Children {
-			if strings.EqualFold(child.Name, part) {
-				inodeRef = child
-				childExists = true
-				break
-			}
-		}
-		if j == len(pathParts)-1 { // Is last path part (i.e. file name)
-			if childExists {
-				return &fs.PathError{Path: path, Err: fs.ErrExist, Op: "create"}
-			}
-			newInode := &Inode{
-				Name:    part,
-				Content: reader,
-			}
-			inodeRef.Children = append(inodeRef.Children, newInode)
-			return nil
-		} else if !childExists {
-			newInode := &Inode{
-				Name:  part,
-				Attrs: AttrDirectory,
-			}
-			inodeRef.Children = append(inodeRef.Children, newInode)
-			inodeRef = newInode
-		}
-	}
-	panic("unreachable")
-}
-
-type planningState struct {
-	// List of inodes in filesystem layout order
-	orderedInodes []*Inode
-	// File Allocation Table
-	fat []uint32
-	// Size of a single cluster in the FAT in bytes
-	clusterSize int64
-}
-
-// Allocates clusters capable of holding at least b bytes and returns the
-// starting cluster index
-func (p *planningState) allocBytes(b int64) int {
-	// Zero-byte data entries are located at the cluster zero by definition
-	// No actual allocation is performed
-	if b == 0 {
-		return 0
-	}
-	// Calculate the number of clusters to be allocated
-	n := (b + p.clusterSize - 1) / p.clusterSize
-	allocStartCluster := len(p.fat)
-	for i := int64(0); i < n-1; i++ {
-		p.fat = append(p.fat, uint32(len(p.fat)+1))
-	}
-	p.fat = append(p.fat, fatEOF)
-	return allocStartCluster
-}
-
-func (i *Inode) placeRecursively(p *planningState) error {
-	selfDataSize, err := i.dataSize()
-	if err != nil {
-		return fmt.Errorf("%s: %w", i.Name, err)
-	}
-	i.startCluster = p.allocBytes(selfDataSize)
-	p.orderedInodes = append(p.orderedInodes, i)
-	for _, c := range i.Children {
-		c.parent = i
-		err = c.placeRecursively(p)
-		if err != nil {
-			return fmt.Errorf("%s/%w", i.Name, err)
-		}
-	}
-	return nil
-}
-
-// WriteFS writes a filesystem described by a root inode and its children to a
-// given io.Writer.
-func WriteFS(w io.Writer, rootInode Inode, opts Options) error {
-	if opts.BlockSize == 0 {
-		opts.BlockSize = 512
-	}
-	if bits.OnesCount16(opts.BlockSize) != 1 {
-		return fmt.Errorf("option BlockSize is not a power of two")
-	}
-	if opts.BlockSize < 512 {
-		return fmt.Errorf("option BlockSize must be at least 512 bytes")
-	}
-	if opts.ID == 0 {
-		var buf [4]byte
-		if _, err := rand.Read(buf[:]); err != nil {
-			return fmt.Errorf("failed to assign random FAT ID: %v", err)
-		}
-		opts.ID = binary.BigEndian.Uint32(buf[:])
-	}
-	if rootInode.Attrs&AttrDirectory == 0 {
-		return errors.New("root inode must be a directory (i.e. have AttrDirectory set)")
-	}
-	wb := newBlockWriter(w)
-	bs := bootSector{
-		// Assembled x86_32 machine code corresponding to
-		// jmp $
-		// nop
-		// i.e. an infinite loop doing nothing. Nothing created in the last 35
-		// years should boot this anyway.
-		// TODO(q3k): write a stub
-		JmpInstruction: [3]byte{0xEB, 0xFE, 0x90},
-		// Identification
-		OEMName: [8]byte{'M', 'O', 'N', 'O', 'G', 'O', 'N'},
-		ID:      opts.ID,
-		// Block geometry
-		BlockSize:   opts.BlockSize,
-		TotalBlocks: opts.BlockCount,
-		// BootSector block + FSInfo Block, backup copy at blocks 6 and 7
-		ReservedBlocks: 8,
-		// FSInfo block is always in block 1, right after this block
-		FSInfoBlock: 1,
-		// Start block of the backup of the boot block and FSInfo block
-		// De facto this must be 6 as it is only used when the primary
-		// boot block is damaged at which point this field can no longer be
-		// read.
-		BackupStartBlock: 6,
-		// A lot of implementations only work with 2, so use that
-		NumFATs:          2,
-		BlocksPerCluster: 1,
-		// Flags and signatures
-		MediaCode:     0xf8,
-		BootSignature: 0x29,
-		Label:         [11]byte{' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '},
-		Type:          [8]byte{'F', 'A', 'T', '3', '2', ' ', ' ', ' '},
-		Signature:     [2]byte{0x55, 0xaa},
-	}
-
-	copy(bs.Label[:], opts.Label)
-
-	fs := fsinfo{
-		// Signatures
-		LeadSignature:     [4]byte{0x52, 0x52, 0x61, 0x41},
-		StructSignature:   [4]byte{0x72, 0x72, 0x41, 0x61},
-		TrailingSignature: [2]byte{0x55, 0xAA},
-
-		// This is the unset value which is always legal
-		NextFreeCluster: 0xFFFFFFFF,
-	}
-
-	p := planningState{
-		clusterSize: int64(bs.BlocksPerCluster) * int64(bs.BlockSize),
-	}
-	if opts.BlockCount != 0 {
-		// Preallocate FAT if we know how big it needs to be
-		p.fat = make([]uint32, 0, opts.BlockCount/uint32(bs.BlocksPerCluster))
-	} else {
-		// Preallocate minimum size FAT
-		// See the spec page 15 for the origin of this calculation.
-		p.fat = make([]uint32, 0, 65525+2)
-	}
-	// First two clusters are special
-	p.fat = append(p.fat, 0x0fffff00|uint32(bs.MediaCode), 0x0fffffff)
-	err := rootInode.placeRecursively(&p)
-	if err != nil {
-		return err
-	}
-
-	allocClusters := len(p.fat)
-	if allocClusters >= fatMask&math.MaxUint32 {
-		return fmt.Errorf("filesystem contains more than 2^28 FAT entries, this is unsupported. Note that this package currently always creates minimal clusters")
-	}
-
-	// Fill out FAT to minimum size for FAT32
-	for len(p.fat) < 65525+2 {
-		p.fat = append(p.fat, fatFree)
-	}
-
-	bs.RootClusterNumber = uint32(rootInode.startCluster)
-
-	bs.BlocksPerFAT = uint32(binary.Size(p.fat)+int(opts.BlockSize)-1) / uint32(opts.BlockSize)
-	occupiedBlocks := uint32(bs.ReservedBlocks) + (uint32(len(p.fat)-2) * uint32(bs.BlocksPerCluster)) + bs.BlocksPerFAT*uint32(bs.NumFATs)
-	if bs.TotalBlocks == 0 {
-		bs.TotalBlocks = occupiedBlocks
-	} else if bs.TotalBlocks < occupiedBlocks {
-		return fmt.Errorf("content (minimum %d blocks) would exceed number of blocks specified (%d blocks)", occupiedBlocks, bs.TotalBlocks)
-	} else { // Fixed-size file system with enough space
-		blocksToDistribute := bs.TotalBlocks - uint32(bs.ReservedBlocks)
-		// Number of data blocks which can be described by one metadata/FAT
-		// block. Always an integer because 4 (bytes per uint32) is a divisor of
-		// all powers of two equal or bigger than 8 and FAT32 requires a minimum
-		// of 512.
-		dataBlocksPerFATBlock := (uint32(bs.BlocksPerCluster) * uint32(bs.BlockSize)) / (uint32(binary.Size(p.fat[0])))
-		// Split blocksToDistribute between metadata and data so that exactly as
-		// much metadata (FAT) exists for describing the amount of data blocks
-		// while respecting alignment.
-		divisor := dataBlocksPerFATBlock + uint32(bs.NumFATs)
-		// 2*blocksPerCluster compensates for the first two "magic" FAT entries
-		// which do not have corresponding data.
-		bs.BlocksPerFAT = (bs.TotalBlocks + 2*uint32(bs.BlocksPerCluster) + (divisor - 1)) / divisor
-		dataBlocks := blocksToDistribute - (uint32(bs.NumFATs) * bs.BlocksPerFAT)
-		// Align to full clusters
-		dataBlocks -= dataBlocks % uint32(bs.BlocksPerCluster)
-		// Magic +2 as the first two entries do not describe data
-		for len(p.fat) < (int(dataBlocks)/int(bs.BlocksPerCluster))+2 {
-			p.fat = append(p.fat, fatFree)
-		}
-	}
-	fs.FreeCount = uint32(len(p.fat) - allocClusters)
-	if fs.FreeCount > 1 {
-		fs.NextFreeCluster = uint32(allocClusters) + 1
-	}
-
-	// Write superblock
-	if err := binary.Write(wb, binary.LittleEndian, bs); err != nil {
-		return err
-	}
-	if err := wb.FinishBlock(int64(opts.BlockSize), true); err != nil {
-		return err
-	}
-	if err := binary.Write(wb, binary.LittleEndian, fs); err != nil {
-		return err
-	}
-	if err := wb.FinishBlock(int64(opts.BlockSize), true); err != nil {
-		return err
-	}
-
-	block := make([]byte, opts.BlockSize)
-	for i := 0; i < 4; i++ {
-		if _, err := wb.Write(block); err != nil {
-			return err
-		}
-	}
-	// Backup of superblock at block 6
-	if err := binary.Write(wb, binary.LittleEndian, bs); err != nil {
-		return err
-	}
-	if err := wb.FinishBlock(int64(opts.BlockSize), true); err != nil {
-		return err
-	}
-	if err := binary.Write(wb, binary.LittleEndian, fs); err != nil {
-		return err
-	}
-	if err := wb.FinishBlock(int64(opts.BlockSize), true); err != nil {
-		return err
-	}
-
-	for i := uint8(0); i < bs.NumFATs; i++ {
-		if err := binary.Write(wb, binary.LittleEndian, p.fat); err != nil {
-			return err
-		}
-		if err := wb.FinishBlock(int64(opts.BlockSize), true); err != nil {
-			return err
-		}
-	}
-
-	for _, i := range p.orderedInodes {
-		if err := i.writeData(wb, bs.Label); err != nil {
-			return fmt.Errorf("failed to write inode %q: %v", i.Name, err)
-		}
-		if err := wb.FinishBlock(int64(opts.BlockSize)*int64(bs.BlocksPerCluster), false); err != nil {
-			return err
-		}
-	}
-	// Creatively use block writer to write out all empty data at the end
-	if err := wb.FinishBlock(int64(opts.BlockSize)*int64(bs.TotalBlocks), false); err != nil {
-		return err
-	}
-	return nil
-}
diff --git a/metropolis/pkg/fat32/fsck_test.go b/metropolis/pkg/fat32/fsck_test.go
deleted file mode 100644
index 27de542..0000000
--- a/metropolis/pkg/fat32/fsck_test.go
+++ /dev/null
@@ -1,103 +0,0 @@
-package fat32
-
-import (
-	"fmt"
-	"os"
-	"os/exec"
-	"strings"
-	"testing"
-	"time"
-
-	"github.com/bazelbuild/rules_go/go/runfiles"
-)
-
-func testWithFsck(t *testing.T, rootInode Inode, opts Options) {
-	t.Helper()
-	fsckPath, err := runfiles.Rlocation("com_github_dosfstools_dosfstools/fsck")
-	if err != nil {
-		t.Fatalf("unable to get path to fsck: %v", err)
-	}
-	testFile, err := os.CreateTemp("", "fat32-fsck-test")
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer os.Remove(testFile.Name())
-	if err := WriteFS(testFile, rootInode, opts); err != nil {
-		t.Fatalf("failed to write test FS: %v", err)
-	}
-	// Run fsck non-interactively (-n), disallow spaces in short file names (-S)
-	// as well as perform deep verification (-V)
-	// If the file system is OK (i.e. fsck does not want to fix it) it returns
-	// 0, otherwise 1.
-	fsckCmd := exec.Command(fsckPath, "-n", "-S", "-V", testFile.Name())
-	result, err := fsckCmd.CombinedOutput()
-	if err != nil {
-		t.Errorf("fsck failed: %v", string(result))
-	}
-}
-
-func TestBasicFsck(t *testing.T) {
-	if os.Getenv("IN_KTEST") == "true" {
-		t.Skip("In ktest")
-	}
-	var largeString strings.Builder
-	for i := 0; i < 16384; i++ {
-		fmt.Fprintf(&largeString, "part%d", i)
-	}
-	// Test both common block sizes (512 and 4096 bytes) as well as the largest
-	// supported one (32K)
-	for _, blockSize := range []uint16{512, 4096, 32768} {
-		for _, fixed := range []string{"", "Fixed"} {
-			t.Run(fmt.Sprintf("BlockSize%d%v", blockSize, fixed), func(t *testing.T) {
-				rootInode := Inode{
-					Attrs:      AttrDirectory,
-					ModTime:    time.Date(2022, 03, 04, 5, 6, 7, 8, time.UTC),
-					CreateTime: time.Date(2022, 03, 04, 5, 6, 7, 8, time.UTC),
-				}
-				files := []struct {
-					name    string
-					path    string
-					content string
-				}{
-					{"FileInRoot", "test1.txt", "test1 content"},
-					{"LongFileInRoot", "verylongtest1.txt", "test1 content long"},
-					{"LongPath", "test1/test2/test3/test4/longdirname.ext/hello", "long path test content"},
-					{"LargeFile", "test1/largefile.txt", largeString.String()},
-				}
-				for _, c := range files {
-					err := rootInode.PlaceFile(c.path, strings.NewReader(c.content))
-					if err != nil {
-						t.Errorf("failed to place file: %v", err)
-					}
-				}
-				opts := Options{ID: 1234, Label: "TEST", BlockSize: blockSize}
-				if fixed == "Fixed" {
-					// Use a block count that is slightly higher than the minimum
-					opts.BlockCount = 67000
-				}
-				testWithFsck(t, rootInode, opts)
-			})
-		}
-	}
-}
-
-func TestLotsOfFilesFsck(t *testing.T) {
-	if os.Getenv("IN_KTEST") == "true" {
-		t.Skip("In ktest")
-	}
-	rootInode := Inode{
-		Attrs:   AttrDirectory,
-		ModTime: time.Date(2022, 03, 04, 5, 6, 7, 8, time.UTC),
-	}
-	for i := 0; i < (32*1024)-2; i++ {
-		rootInode.Children = append(rootInode.Children, &Inode{
-			Name:    fmt.Sprintf("test%d", i),
-			Content: strings.NewReader("random test content"),
-			// Add some random attributes
-			Attrs: AttrHidden | AttrSystem,
-			// And a random ModTime
-			ModTime: time.Date(2022, 03, 04, 5, 6, 7, 8, time.UTC),
-		})
-	}
-	testWithFsck(t, rootInode, Options{ID: 1234, Label: "TEST"})
-}
diff --git a/metropolis/pkg/fat32/linux_test.go b/metropolis/pkg/fat32/linux_test.go
deleted file mode 100644
index ca62b76..0000000
--- a/metropolis/pkg/fat32/linux_test.go
+++ /dev/null
@@ -1,281 +0,0 @@
-package fat32
-
-import (
-	"bytes"
-	"fmt"
-	"io"
-	"math/rand"
-	"os"
-	"strings"
-	"testing"
-	"time"
-
-	"github.com/stretchr/testify/assert"
-	"github.com/stretchr/testify/require"
-	"golang.org/x/mod/semver"
-	"golang.org/x/sys/unix"
-)
-
-func TestKernelInterop(t *testing.T) {
-	if os.Getenv("IN_KTEST") != "true" {
-		t.Skip("Not in ktest")
-	}
-
-	// ONCHANGE(//third_party/linux): Drop this once we move to a Kernel version
-	// newer than 5.19 which will have FAT btime support.
-	kernelVersion, err := os.ReadFile("/proc/sys/kernel/osrelease")
-	if err != nil {
-		t.Fatalf("unable to determine kernel version: %v", err)
-	}
-	haveBtime := semver.Compare("v"+string(kernelVersion), "v5.19.0") >= 0
-
-	type testCase struct {
-		name     string
-		setup    func(root *Inode) error
-		validate func(t *testing.T) error
-	}
-
-	// Random timestamp in UTC, divisible by 10ms
-	testTimestamp1 := time.Date(2022, 03, 04, 5, 6, 7, 10, time.UTC)
-	// Random timestamp in UTC, divisible by 2s
-	testTimestamp2 := time.Date(2022, 03, 04, 5, 6, 8, 0, time.UTC)
-	// Random timestamp in UTC, divisible by 10ms
-	testTimestamp3 := time.Date(2052, 03, 02, 5, 6, 7, 10, time.UTC)
-	// Random timestamp in UTC, divisible by 2s
-	testTimestamp4 := time.Date(2052, 10, 04, 5, 3, 4, 0, time.UTC)
-
-	testContent1 := "testcontent1"
-
-	tests := []testCase{
-		{
-			name: "SimpleFolder",
-			setup: func(root *Inode) error {
-				root.Children = []*Inode{{
-					Name:       "testdir",
-					Attrs:      AttrDirectory,
-					CreateTime: testTimestamp1,
-					ModTime:    testTimestamp2,
-				}}
-				return nil
-			},
-			validate: func(t *testing.T) error {
-				var stat unix.Statx_t
-				if err := unix.Statx(0, "/dut/testdir", 0, unix.STATX_TYPE|unix.STATX_MTIME|unix.STATX_BTIME, &stat); err != nil {
-					availableFiles, err := os.ReadDir("/dut")
-					var availableFileNames []string
-					for _, f := range availableFiles {
-						availableFileNames = append(availableFileNames, f.Name())
-					}
-					if err != nil {
-						t.Fatalf("Failed to list filesystem root directory: %v", err)
-					}
-					t.Fatalf("Failed to stat output: %v (available: %v)", err, strings.Join(availableFileNames, ", "))
-				}
-				if stat.Mode&unix.S_IFDIR == 0 {
-					t.Errorf("testdir is expected to be a directory, but has mode %v", stat.Mode)
-				}
-				btime := time.Unix(stat.Btime.Sec, int64(stat.Btime.Nsec))
-				if !btime.Equal(testTimestamp1) && haveBtime {
-					t.Errorf("testdir btime expected %v, got %v", testTimestamp1, btime)
-				}
-				mtime := time.Unix(stat.Mtime.Sec, int64(stat.Mtime.Nsec))
-				if !mtime.Equal(testTimestamp2) {
-					t.Errorf("testdir mtime expected %v, got %v", testTimestamp2, mtime)
-				}
-				return nil
-			},
-		},
-		{
-			name: "SimpleFile",
-			setup: func(root *Inode) error {
-				root.Children = []*Inode{{
-					Name:       "testfile",
-					CreateTime: testTimestamp3,
-					ModTime:    testTimestamp4,
-					Content:    strings.NewReader(testContent1),
-				}}
-				return nil
-			},
-			validate: func(t *testing.T) error {
-				var stat unix.Statx_t
-				if err := unix.Statx(0, "/dut/testfile", 0, unix.STATX_TYPE|unix.STATX_MTIME|unix.STATX_BTIME, &stat); err != nil {
-					t.Fatalf("failed to stat output: %v", err)
-				}
-				if stat.Mode&unix.S_IFREG == 0 {
-					t.Errorf("testfile is expected to be a file, but has mode %v", stat.Mode)
-				}
-				btime := time.Unix(stat.Btime.Sec, int64(stat.Btime.Nsec))
-				if !btime.Equal(testTimestamp3) && haveBtime {
-					t.Errorf("testfile ctime expected %v, got %v", testTimestamp3, btime)
-				}
-				mtime := time.Unix(stat.Mtime.Sec, int64(stat.Mtime.Nsec))
-				if !mtime.Equal(testTimestamp4) {
-					t.Errorf("testfile mtime expected %v, got %v", testTimestamp3, mtime)
-				}
-				contents, err := os.ReadFile("/dut/testfile")
-				if err != nil {
-					t.Fatalf("failed to read back test file: %v", err)
-				}
-				if string(contents) != testContent1 {
-					t.Errorf("testfile contains %x, got %x", contents, []byte(testContent1))
-				}
-				return nil
-			},
-		},
-		{
-			name: "FolderHierarchy",
-			setup: func(i *Inode) error {
-				i.Children = []*Inode{{
-					Name:       "l1",
-					Attrs:      AttrDirectory,
-					CreateTime: testTimestamp1,
-					ModTime:    testTimestamp2,
-					Children: []*Inode{{
-						Name:       "l2",
-						Attrs:      AttrDirectory,
-						CreateTime: testTimestamp1,
-						ModTime:    testTimestamp2,
-					}},
-				}}
-				return nil
-			},
-			validate: func(t *testing.T) error {
-				dirInfo, err := os.ReadDir("/dut/l1")
-				if err != nil {
-					t.Fatalf("Failed to read top-level directory: %v", err)
-				}
-				require.Len(t, dirInfo, 1, "more subdirs than expected")
-				require.Equal(t, "l2", dirInfo[0].Name(), "unexpected subdir")
-				require.True(t, dirInfo[0].IsDir(), "l1 not a directory")
-				subdirInfo, err := os.ReadDir("/dut/l1/l2")
-				assert.NoError(t, err, "cannot read empty subdir")
-				require.Len(t, subdirInfo, 0, "unexpected subdirs in empty directory")
-				return nil
-			},
-		},
-		{
-			name: "LargeFile",
-			setup: func(i *Inode) error {
-				content := make([]byte, 6500)
-				io.ReadFull(rand.New(rand.NewSource(1)), content)
-				i.Children = []*Inode{{
-					Name:    "test.bin",
-					Content: bytes.NewReader(content),
-				}}
-				return nil
-			},
-			validate: func(t *testing.T) error {
-				var stat unix.Stat_t
-				err := unix.Stat("/dut/test.bin", &stat)
-				assert.NoError(t, err, "failed to stat file")
-				require.EqualValues(t, 6500, stat.Size, "wrong size")
-				file, err := os.Open("/dut/test.bin")
-				assert.NoError(t, err, "failed to open test file")
-				defer file.Close()
-				r := io.LimitReader(rand.New(rand.NewSource(1)), 6500) // Random but deterministic data
-				expected, _ := io.ReadAll(r)
-				actual, err := io.ReadAll(file)
-				assert.NoError(t, err, "failed to read test file")
-				assert.Equal(t, expected, actual, "content not identical")
-				return nil
-			},
-		},
-		{
-			name: "Unicode",
-			setup: func(i *Inode) error {
-				i.Children = []*Inode{{
-					Name:    "✨😂", // Really exercise that UTF-16 conversion
-					Content: strings.NewReader("😂"),
-				}}
-				return nil
-			},
-			validate: func(t *testing.T) error {
-				file, err := os.Open("/dut/✨😂")
-				if err != nil {
-					availableFiles, err := os.ReadDir("/dut")
-					var availableFileNames []string
-					for _, f := range availableFiles {
-						availableFileNames = append(availableFileNames, f.Name())
-					}
-					if err != nil {
-						t.Fatalf("Failed to list filesystem root directory: %v", err)
-					}
-					t.Fatalf("Failed to open unicode file: %v (available files: %v)", err, strings.Join(availableFileNames, ", "))
-				}
-				defer file.Close()
-				contents, err := io.ReadAll(file)
-				if err != nil {
-					t.Errorf("Wrong content: expected %x, got %x", []byte("😂"), contents)
-				}
-				return nil
-			},
-		},
-		{
-			name: "MultipleMetaClusters",
-			setup: func(root *Inode) error {
-				// Only test up to 2048 files as Linux gets VERY slow if going
-				// up to the maximum of approximately 32K
-				for i := 0; i < 2048; i++ {
-					root.Children = append(root.Children, &Inode{
-						Name:    fmt.Sprintf("verylongtestfilename%d", i),
-						Content: strings.NewReader("random test content"),
-					})
-				}
-				return nil
-			},
-			validate: func(t *testing.T) error {
-				files, err := os.ReadDir("/dut")
-				if err != nil {
-					t.Errorf("failed to list directory: %v", err)
-				}
-				if len(files) != 2048 {
-					t.Errorf("wrong number of files: expected %d, got %d", 2048, len(files))
-				}
-				return nil
-			},
-		},
-	}
-
-	for _, test := range tests {
-		t.Run(test.name, func(t *testing.T) {
-			file, err := os.OpenFile("/dev/ram0", os.O_WRONLY|os.O_TRUNC, 0644)
-			if err != nil {
-				t.Fatalf("failed to create test image: %v", err)
-			}
-			size, err := unix.IoctlGetInt(int(file.Fd()), unix.BLKGETSIZE64)
-			if err != nil {
-				t.Fatalf("failed to get ramdisk size: %v", err)
-			}
-			blockSize, err := unix.IoctlGetInt(int(file.Fd()), unix.BLKBSZGET)
-			if err != nil {
-				t.Fatalf("failed to get ramdisk block size: %v", err)
-			}
-			defer file.Close()
-			rootInode := Inode{
-				Attrs: AttrDirectory,
-			}
-			if err := test.setup(&rootInode); err != nil {
-				t.Fatalf("setup failed: %v", err)
-			}
-			if err := WriteFS(file, rootInode, Options{
-				ID:         1234,
-				Label:      "KTEST",
-				BlockSize:  uint16(blockSize),
-				BlockCount: uint32(size / blockSize),
-			}); err != nil {
-				t.Fatalf("failed to write fileystem: %v", err)
-			}
-			_ = file.Close()
-			if err := os.MkdirAll("/dut", 0755); err != nil {
-				t.Error(err)
-			}
-			// TODO(lorenz): Set CONFIG_FAT_DEFAULT_UTF8 for Monogon Kernel
-			if err := unix.Mount("/dev/ram0", "/dut", "vfat", unix.MS_NOEXEC|unix.MS_NODEV, "utf8=1"); err != nil {
-				t.Fatal(err)
-			}
-			defer unix.Unmount("/dut", 0)
-			test.validate(t)
-		})
-
-	}
-}
diff --git a/metropolis/pkg/fat32/structs.go b/metropolis/pkg/fat32/structs.go
deleted file mode 100644
index 396361d..0000000
--- a/metropolis/pkg/fat32/structs.go
+++ /dev/null
@@ -1,133 +0,0 @@
-package fat32
-
-const (
-	// FAT32 entries are only 28 bits
-	fatMask = 0x0fffffff
-	// Free entries are 0
-	fatFree = 0x0
-	// Entry at the end of a cluster chain
-	fatEOF = 0x0ffffff8
-)
-
-// FAT32 Boot Sector and BIOS Parameter Block. This structure is 512 bytes long,
-// even if the logical block size is longer. The rest should be filled up with
-// zeroes.
-type bootSector struct {
-	// Jump instruction to boot code.
-	JmpInstruction [3]byte
-	// Creator name. "MSWIN4.1" recommended for compatibility.
-	OEMName [8]byte
-	// Count of bytes per block (i.e. logical block size)
-	// Must be one of 512, 1024, 2048 or 4096
-	BlockSize uint16
-	// Number of blocks per allocation unit (cluster).
-	// Must be a power of 2 that is greater than 0.
-	BlocksPerCluster uint8
-	// Number of reserved blocks in the reserved region of the volume starting
-	// at the first block of the volume. This field must not be 0.
-	ReservedBlocks uint16
-	// The count of FAT data structures on the volume. This field should always
-	// contain the value of 2 for any FAT volume of any type.
-	NumFATs uint8
-	_       [4]byte
-	// Legacy value for media determination, must be 0xf8.
-	MediaCode uint8
-	_         [2]byte
-	// Number of sectors per track for 0x13 interrupts.
-	SectorsPerTrack uint16
-	// Number of heads for 0x13 interrupts.
-	NumHeads uint16
-	// Count of hidden blocks preceding the partition that contains this FAT
-	// volume.
-	HiddenBlocks uint32
-	// Total count of blocks on the volume.
-	TotalBlocks uint32
-	// Count of blocks per FAT.
-	BlocksPerFAT uint32
-	// Flags for FAT32
-	Flags uint16
-	_     [2]byte
-	// Cluster number of the first cluster of the root directory. Usually 2.
-	RootClusterNumber uint32
-	// Block number of the FSINFO structure in the reserved area.
-	FSInfoBlock uint16
-	// Block number of the copy of the boot record in the reserved area.
-	BackupStartBlock uint16
-	_                [12]byte
-	// Drive number for 0x13 interrupts.
-	DriveNumber   uint8
-	_             [1]byte
-	BootSignature uint8
-	// ID of this filesystem
-	ID uint32
-	// Human-readable label of this filesystem, padded with spaces (0x20)
-	Label [11]byte
-	// Always set to ASCII "FAT32    "
-	Type [8]byte
-	_    [420]byte
-	// Always 0x55, 0xAA
-	Signature [2]byte
-}
-
-// Special block (usually at block 1) containing additional metadata,
-// specifically the number of free clusters and the next free cluster.
-// Always 512 bytes, rest of the block should be padded with zeroes.
-type fsinfo struct {
-	// Validates that this is an FSINFO block. Always 0x52, 0x52, 0x61, 0x41
-	LeadSignature [4]byte
-	_             [480]byte
-	// Another signature. Always 0x72, 0x72, 0x41, 0x61
-	StructSignature [4]byte
-	// Last known number of free clusters on the volume.
-	FreeCount uint32
-	// Next free cluster hint. All 1's is interpreted as undefined.
-	NextFreeCluster uint32
-	_               [14]byte
-	// One more signature. Always 0x55, 0xAA.
-	TrailingSignature [2]byte
-}
-
-// Directory entry
-type dirEntry struct {
-	// DOS 8.3 file name.
-	DOSName [11]byte
-	// Attribtes of the file or directory, 0x0f reserved to mark entry as a
-	// LFN entry (see lfnEntry below)
-	Attributes        uint8
-	_                 byte
-	CreationTenMilli  uint8 // Actually 10ms units, 0-199 range
-	CreationTime      uint16
-	CreationDate      uint16
-	_                 [2]byte
-	FirstClusterHigh  uint16
-	LastWrittenToTime uint16
-	LastWrittenToDate uint16
-	FirstClusterLow   uint16
-	FileSize          uint32
-}
-
-const (
-	// lastSequenceNumberFlag is logically-ORed with the sequence number of the
-	// last Long File Name entry to mark it as such.
-	lastSequenceNumberFlag = 0x40
-	// codepointsPerEntry is the number of UTF-16 codepoints that fit into a
-	// single Long File Name entry.
-	codepointsPerEntry = 5 + 6 + 2
-)
-
-// VFAT long file name prepended entry
-type lfnEntry struct {
-	SequenceNumber uint8
-	// First 5 UTF-16 code units
-	NamePart1 [5]uint16
-	// Attributes (must be 0x0f)
-	Attributes uint8
-	_          byte
-	// Checksum of the 8.3 name.
-	Checksum uint8
-	// Next 6 UTF-16 code units
-	NamePart2 [6]uint16
-	_         [2]byte
-	// Next 2 UTF-16 code units
-	NamePart3 [2]uint16
-}
diff --git a/metropolis/pkg/fat32/structs_test.go b/metropolis/pkg/fat32/structs_test.go
deleted file mode 100644
index 77a7df0..0000000
--- a/metropolis/pkg/fat32/structs_test.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package fat32
-
-import (
-	"encoding/binary"
-	"reflect"
-	"testing"
-)
-
-func TestStructureSizes(t *testing.T) {
-	cases := []struct {
-		StructInstance interface{}
-		ExpectedSize   int
-	}{
-		{bootSector{}, 512},
-		{fsinfo{}, 512},
-		{dirEntry{}, 32},
-		{lfnEntry{}, 32},
-	}
-	for _, c := range cases {
-		t.Run(reflect.TypeOf(c.StructInstance).String(), func(t *testing.T) {
-			actualSize := binary.Size(c.StructInstance)
-			if actualSize != c.ExpectedSize {
-				t.Errorf("Expected %d bytes, got %d", c.ExpectedSize, actualSize)
-			}
-		})
-	}
-}
diff --git a/metropolis/pkg/fat32/utils.go b/metropolis/pkg/fat32/utils.go
deleted file mode 100644
index 833665c..0000000
--- a/metropolis/pkg/fat32/utils.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package fat32
-
-import (
-	"fmt"
-	"io"
-	"time"
-)
-
-// Wraps a writer and provides support for writing padding up to a specified
-// alignment.
-// TODO(lorenz): Implement WriterTo when w implements it to allow for copy
-// offload
-type blockWriter struct {
-	w io.Writer
-	n int64
-}
-
-func newBlockWriter(w io.Writer) *blockWriter {
-	return &blockWriter{w: w}
-}
-
-func (b *blockWriter) Write(p []byte) (n int, err error) {
-	n, err = b.w.Write(p)
-	b.n += int64(n)
-	return
-}
-
-func (b *blockWriter) FinishBlock(alignment int64, mustZero bool) (err error) {
-	requiredBytes := (alignment - (b.n % alignment)) % alignment
-	if requiredBytes == 0 {
-		return nil
-	}
-	// Do not actually write out zeroes if not necessary
-	if s, ok := b.w.(io.Seeker); ok && !mustZero {
-		if _, err := s.Seek(requiredBytes-1, io.SeekCurrent); err != nil {
-			return fmt.Errorf("failed to seek to create hole for empty block: %w", err)
-		}
-		if _, err := b.w.Write([]byte{0x00}); err != nil {
-			return fmt.Errorf("failed to write last byte to create hole: %w", err)
-		}
-		b.n += requiredBytes
-		return
-	}
-	emptyBuf := make([]byte, 1*1024*1024)
-	for requiredBytes > 0 {
-		curBlockBytes := requiredBytes
-		if curBlockBytes > int64(len(emptyBuf)) {
-			curBlockBytes = int64(len(emptyBuf))
-		}
-		_, err = b.Write(emptyBuf[:curBlockBytes])
-		if err != nil {
-			return
-		}
-		requiredBytes -= curBlockBytes
-	}
-	return
-}
-
-// timeToMsDosTime converts a time.Time to an MS-DOS date and time.
-// The resolution is 2s with fTime and 10ms if fTenMils is also used.
-// See: http://msdn.microsoft.com/en-us/library/ms724274(v=VS.85).aspx
-func timeToMsDosTime(t time.Time) (fDate uint16, fTime uint16, fTenMils uint8) {
-	t = t.In(time.UTC)
-	if t.Year() < 1980 {
-		t = time.Date(1980, 1, 1, 0, 0, 0, 0, time.UTC)
-	}
-	if t.Year() > 2107 {
-		t = time.Date(2107, 12, 31, 23, 59, 59, 0, time.UTC)
-	}
-	fDate = uint16(t.Day() + int(t.Month())<<5 + (t.Year()-1980)<<9)
-	fTime = uint16(t.Second()/2 + t.Minute()<<5 + t.Hour()<<11)
-	fTenMils = uint8(t.Nanosecond()/1e7 + (t.Second()%2)*100)
-	return
-}
diff --git a/metropolis/pkg/fileargs/BUILD.bazel b/metropolis/pkg/fileargs/BUILD.bazel
deleted file mode 100644
index 0b7b2fc..0000000
--- a/metropolis/pkg/fileargs/BUILD.bazel
+++ /dev/null
@@ -1,9 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library")
-
-go_library(
-    name = "fileargs",
-    srcs = ["fileargs.go"],
-    importpath = "source.monogon.dev/metropolis/pkg/fileargs",
-    visibility = ["//metropolis:__subpackages__"],
-    deps = ["@org_golang_x_sys//unix"],
-)
diff --git a/metropolis/pkg/fileargs/fileargs.go b/metropolis/pkg/fileargs/fileargs.go
deleted file mode 100644
index 7b14550..0000000
--- a/metropolis/pkg/fileargs/fileargs.go
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fileargs
-
-import (
-	"crypto/rand"
-	"encoding/hex"
-	"fmt"
-	"io"
-	"os"
-	"path/filepath"
-
-	"golang.org/x/sys/unix"
-)
-
-// DefaultSize is the default size limit for FileArgs
-const DefaultSize = 4 * 1024 * 1024
-
-// TempDirectory is the directory where FileArgs will mount the actual files
-// to. Defaults to os.TempDir() but can be globally overridden by the
-// application before any FileArgs are used.
-var TempDirectory = os.TempDir()
-
-type FileArgs struct {
-	path      string
-	lastError error
-}
-
-// New initializes a new set of file-based arguments. Remember to call Close()
-// if you're done using it, otherwise this leaks memory and mounts.
-func New() (*FileArgs, error) {
-	return NewWithSize(DefaultSize)
-}
-
-// NewWithSize is the same as new, but with a custom size limit. Please be aware
-// that this data cannot be swapped out and using a size limit that's too high
-// can deadlock your kernel.
-func NewWithSize(size uint64) (*FileArgs, error) {
-	randomNameRaw := make([]byte, 128/8)
-	if _, err := io.ReadFull(rand.Reader, randomNameRaw); err != nil {
-		return nil, err
-	}
-	tmpPath := filepath.Join(TempDirectory, hex.EncodeToString(randomNameRaw))
-	if err := os.MkdirAll(tmpPath, 0700); err != nil {
-		return nil, err
-	}
-	// This uses ramfs instead of tmpfs because we never want to swap this for
-	// security reasons
-	if err := unix.Mount("none", tmpPath, "ramfs", unix.MS_NOEXEC|unix.MS_NOSUID|unix.MS_NODEV, fmt.Sprintf("size=%v", size)); err != nil {
-		return nil, err
-	}
-	return &FileArgs{
-		path: tmpPath,
-	}, nil
-}
-
-// ArgPath returns the path of the temporary file for this argument. It names
-// the temporary file according to name.
-func (f *FileArgs) ArgPath(name string, content []byte) string {
-	if f.lastError != nil {
-		return ""
-	}
-
-	path := filepath.Join(f.path, name)
-
-	if err := os.WriteFile(path, content, 0600); err != nil {
-		f.lastError = err
-		return ""
-	}
-
-	return path
-}
-
-// FileOpt returns a full option with the temporary file name already filled
-// in. Example:
-//
-// option := FileOpt("--testopt", "test.txt", []byte("hello"))
-// option == "--testopt=/tmp/daf8ed.../test.txt"
-func (f *FileArgs) FileOpt(optName, fileName string, content []byte) string {
-	return fmt.Sprintf("%v=%v", optName, f.ArgPath(fileName, content))
-}
-
-func (f *FileArgs) Error() error {
-	return f.lastError
-}
-
-func (f *FileArgs) Close() error {
-	if err := unix.Unmount(f.path, 0); err != nil {
-		return err
-	}
-	return os.Remove(f.path)
-}
diff --git a/metropolis/pkg/freeport/BUILD.bazel b/metropolis/pkg/freeport/BUILD.bazel
deleted file mode 100644
index ee965ee..0000000
--- a/metropolis/pkg/freeport/BUILD.bazel
+++ /dev/null
@@ -1,8 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library")
-
-go_library(
-    name = "freeport",
-    srcs = ["freeport.go"],
-    importpath = "source.monogon.dev/metropolis/pkg/freeport",
-    visibility = ["//visibility:public"],
-)
diff --git a/metropolis/pkg/freeport/freeport.go b/metropolis/pkg/freeport/freeport.go
deleted file mode 100644
index da52311..0000000
--- a/metropolis/pkg/freeport/freeport.go
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package freeport
-
-import (
-	"io"
-	"net"
-)
-
-// AllocateTCPPort allocates a TCP port on the looopback address, and starts a
-// temporary listener on it. That listener is returned to the caller alongside with
-// the allocated port number. The listener must be closed right before the port is
-// used by the caller. This naturally still leaves a race condition window where
-// that port number might be snatched up by some other process, but there doesn't
-// seem to be a better way to do this.
-func AllocateTCPPort() (uint16, io.Closer, error) {
-	addr, err := net.ResolveTCPAddr("tcp", "127.0.0.1:0")
-	if err != nil {
-		return 0, nil, err
-	}
-
-	l, err := net.ListenTCP("tcp", addr)
-	if err != nil {
-		return 0, nil, err
-	}
-	return uint16(l.Addr().(*net.TCPAddr).Port), l, nil
-}
-
-// MustConsume takes the result of AllocateTCPPort, closes the listener and returns
-// the allocated port. If anything goes wrong (port could not be allocated or
-// closed) it will panic.
-func MustConsume(port uint16, lis io.Closer, err error) int {
-	if err != nil {
-		panic(err)
-	}
-	if err := lis.Close(); err != nil {
-		panic(err)
-	}
-	return int(port)
-}
diff --git a/metropolis/pkg/fsquota/BUILD.bazel b/metropolis/pkg/fsquota/BUILD.bazel
deleted file mode 100644
index 5fa7037..0000000
--- a/metropolis/pkg/fsquota/BUILD.bazel
+++ /dev/null
@@ -1,33 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
-load("//metropolis/test/ktest:ktest.bzl", "ktest")
-
-go_library(
-    name = "fsquota",
-    srcs = ["fsquota.go"],
-    importpath = "source.monogon.dev/metropolis/pkg/fsquota",
-    visibility = ["//metropolis:__subpackages__"],
-    deps = [
-        "//metropolis/pkg/fsquota/fsxattrs",
-        "//metropolis/pkg/fsquota/quotactl",
-        "@org_golang_x_sys//unix",
-    ],
-)
-
-go_test(
-    name = "fsquota_test",
-    srcs = ["fsquota_test.go"],
-    embed = [":fsquota"],
-    pure = "on",
-    deps = [
-        "@com_github_stretchr_testify//require",
-        "@org_golang_x_sys//unix",
-    ],
-)
-
-ktest(
-    cmdline = "ramdisk_size=51200",
-    files_cc = {
-        "@xfsprogs//:mkfs": "/mkfs.xfs",
-    },
-    tester = ":fsquota_test",
-)
diff --git a/metropolis/pkg/fsquota/fsquota.go b/metropolis/pkg/fsquota/fsquota.go
deleted file mode 100644
index af87d9f..0000000
--- a/metropolis/pkg/fsquota/fsquota.go
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package fsquota provides a simplified interface to interact with Linux's
-// filesystem qouta API.  It only supports setting quotas on directories, not
-// groups or users.  Quotas need to be already enabled on the filesystem to be
-// able to use them using this package.  See the quotactl package if you intend
-// to use this on a filesystem where quotas need to be enabled manually.
-package fsquota
-
-import (
-	"errors"
-	"fmt"
-	"math"
-	"os"
-
-	"golang.org/x/sys/unix"
-
-	"source.monogon.dev/metropolis/pkg/fsquota/fsxattrs"
-	"source.monogon.dev/metropolis/pkg/fsquota/quotactl"
-)
-
-// SetQuota sets the quota of bytes and/or inodes in a given path. To not set a
-// limit, set the corresponding argument to zero. Setting both arguments to
-// zero removes the quota entirely.  This function can only be called on an
-// empty directory. It can't be used to create a quota below a directory which
-// already has a quota since Linux doesn't offer hierarchical quotas.
-func SetQuota(path string, maxBytes uint64, maxInodes uint64) error {
-	dir, err := os.Open(path)
-	if err != nil {
-		return err
-	}
-	defer dir.Close()
-	var valid uint32
-	if maxBytes > 0 {
-		valid |= quotactl.FlagBLimitsValid
-	}
-	if maxInodes > 0 {
-		valid |= quotactl.FlagILimitsValid
-	}
-
-	attrs, err := fsxattrs.Get(dir)
-	if err != nil {
-		return err
-	}
-
-	var lastID = attrs.ProjectID
-	if lastID == 0 {
-		// No project/quota exists for this directory, assign a new project
-		// quota.
-		// TODO(lorenz): This is racy, but the kernel does not support
-		// atomically assigning quotas. So this needs to be added to the
-		// kernels setquota interface. Due to the short time window and
-		// infrequent calls this should not be an immediate issue.
-		for {
-			quota, err := quotactl.GetNextQuota(dir, quotactl.QuotaTypeProject, lastID)
-			if errors.Is(err, unix.ENOENT) || errors.Is(err, unix.ESRCH) {
-				// We have enumerated all quotas, nothing exists here
-				break
-			} else if err != nil {
-				return fmt.Errorf("failed to call GetNextQuota: %w", err)
-			}
-			if quota.ID > lastID+1 {
-				// Take the first ID in the quota ID gap
-				lastID++
-				break
-			}
-			lastID++
-		}
-	}
-
-	// If both limits are zero, this is a delete operation, process it as such
-	if maxBytes == 0 && maxInodes == 0 {
-		valid = quotactl.FlagBLimitsValid | quotactl.FlagILimitsValid
-		attrs.ProjectID = 0
-		attrs.Flags &= ^fsxattrs.FlagProjectInherit
-	} else {
-		attrs.ProjectID = lastID
-		attrs.Flags |= fsxattrs.FlagProjectInherit
-	}
-
-	if err := fsxattrs.Set(dir, attrs); err != nil {
-		return err
-	}
-
-	// Always round up to the nearest block size
-	bytesLimitBlocks := uint64(math.Ceil(float64(maxBytes) / float64(1024)))
-
-	return quotactl.SetQuota(dir, quotactl.QuotaTypeProject, lastID, &quotactl.Quota{
-		BHardLimit: bytesLimitBlocks,
-		BSoftLimit: bytesLimitBlocks,
-		IHardLimit: maxInodes,
-		ISoftLimit: maxInodes,
-		Valid:      valid,
-	})
-}
-
-type Quota struct {
-	Bytes      uint64
-	BytesUsed  uint64
-	Inodes     uint64
-	InodesUsed uint64
-}
-
-// GetQuota returns the current active quota and its utilization at the given
-// path
-func GetQuota(path string) (*Quota, error) {
-	dir, err := os.Open(path)
-	if err != nil {
-		return nil, err
-	}
-	defer dir.Close()
-	attrs, err := fsxattrs.Get(dir)
-	if err != nil {
-		return nil, err
-	}
-	if attrs.ProjectID == 0 {
-		return nil, os.ErrNotExist
-	}
-	quota, err := quotactl.GetQuota(dir, quotactl.QuotaTypeProject, attrs.ProjectID)
-	if err != nil {
-		return nil, err
-	}
-	return &Quota{
-		Bytes:      quota.BHardLimit * 1024,
-		BytesUsed:  quota.CurSpace,
-		Inodes:     quota.IHardLimit,
-		InodesUsed: quota.CurInodes,
-	}, nil
-}
diff --git a/metropolis/pkg/fsquota/fsquota_test.go b/metropolis/pkg/fsquota/fsquota_test.go
deleted file mode 100644
index 4044b60..0000000
--- a/metropolis/pkg/fsquota/fsquota_test.go
+++ /dev/null
@@ -1,152 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fsquota
-
-import (
-	"errors"
-	"fmt"
-	"math"
-	"os"
-	"os/exec"
-	"syscall"
-	"testing"
-
-	"github.com/stretchr/testify/require"
-	"golang.org/x/sys/unix"
-)
-
-// withinTolerance is a helper for asserting that a value is within a certain
-// percentage of the expected value. The tolerance is specified as a float
-// between 0 (exact match) and 1 (between 0 and twice the expected value).
-func withinTolerance(t *testing.T, expected uint64, actual uint64, tolerance float64, name string) {
-	t.Helper()
-	delta := uint64(math.Round(float64(expected) * tolerance))
-	lowerBound := expected - delta
-	upperBound := expected + delta
-	if actual < lowerBound {
-		t.Errorf("Value %v (%v) is too low, expected between %v and %v", name, actual, lowerBound, upperBound)
-	}
-	if actual > upperBound {
-		t.Errorf("Value %v (%v) is too high, expected between %v and %v", name, actual, lowerBound, upperBound)
-	}
-}
-
-func TestBasic(t *testing.T) {
-	if os.Getenv("IN_KTEST") != "true" {
-		t.Skip("Not in ktest")
-	}
-	mkfsCmd := exec.Command("/mkfs.xfs", "-qf", "/dev/ram0")
-	if _, err := mkfsCmd.Output(); err != nil {
-		t.Fatal(err)
-	}
-	if err := os.Mkdir("/test", 0755); err != nil {
-		t.Error(err)
-	}
-
-	if err := unix.Mount("/dev/ram0", "/test", "xfs", unix.MS_NOEXEC|unix.MS_NODEV, "prjquota"); err != nil {
-		t.Fatal(err)
-	}
-	defer unix.Unmount("/test", 0)
-	defer os.RemoveAll("/test")
-	t.Run("SetQuota", func(t *testing.T) {
-		defer func() {
-			os.RemoveAll("/test/set")
-		}()
-		if err := os.Mkdir("/test/set", 0755); err != nil {
-			t.Fatal(err)
-		}
-		if err := SetQuota("/test/set", 1024*1024, 100); err != nil {
-			t.Fatal(err)
-		}
-	})
-	t.Run("SetQuotaAndExhaust", func(t *testing.T) {
-		defer func() {
-			os.RemoveAll("/test/sizequota")
-		}()
-		if err := os.Mkdir("/test/sizequota", 0755); err != nil {
-			t.Fatal(err)
-		}
-		const bytesQuota = 1024 * 1024 // 1MiB
-		if err := SetQuota("/test/sizequota", bytesQuota, 0); err != nil {
-			t.Fatal(err)
-		}
-		testfile, err := os.Create("/test/sizequota/testfile")
-		if err != nil {
-			t.Fatal(err)
-		}
-		testdata := make([]byte, 1024)
-		var bytesWritten int
-		for {
-			n, err := testfile.Write(testdata)
-			if err != nil {
-				var pathErr *os.PathError
-				if errors.As(err, &pathErr) && errors.Is(pathErr.Err, syscall.ENOSPC) {
-					// Running out of space is the only acceptable error to continue execution
-					break
-				}
-				t.Fatal(err)
-			}
-			bytesWritten += n
-		}
-		if bytesWritten > bytesQuota {
-			t.Errorf("Wrote %v bytes, quota is only %v bytes", bytesWritten, bytesQuota)
-		}
-	})
-	t.Run("GetQuotaReadbackAndUtilization", func(t *testing.T) {
-		defer func() {
-			os.RemoveAll("/test/readback")
-		}()
-		if err := os.Mkdir("/test/readback", 0755); err != nil {
-			t.Fatal(err)
-		}
-		const bytesQuota = 1024 * 1024 // 1MiB
-		const inodesQuota = 100
-		if err := SetQuota("/test/readback", bytesQuota, inodesQuota); err != nil {
-			t.Fatal(err)
-		}
-		sizeFileData := make([]byte, 512*1024)
-		if err := os.WriteFile("/test/readback/512kfile", sizeFileData, 0644); err != nil {
-			t.Fatal(err)
-		}
-
-		quotaUtil, err := GetQuota("/test/readback")
-		if err != nil {
-			t.Fatal(err)
-		}
-		require.Equal(t, uint64(bytesQuota), quotaUtil.Bytes, "bytes quota readback incorrect")
-		require.Equal(t, uint64(inodesQuota), quotaUtil.Inodes, "inodes quota readback incorrect")
-
-		// Give 10% tolerance for quota used values to account for metadata
-		// overhead and internal structures that are also in there. If it's out
-		// by more than that it's an issue anyways.
-		withinTolerance(t, uint64(len(sizeFileData)), quotaUtil.BytesUsed, 0.1, "BytesUsed")
-
-		// Write 50 inodes for a total of 51 (with the 512K file)
-		for i := 0; i < 50; i++ {
-			if err := os.WriteFile(fmt.Sprintf("/test/readback/ifile%v", i), []byte("test"), 0644); err != nil {
-				t.Fatal(err)
-			}
-		}
-
-		quotaUtil, err = GetQuota("/test/readback")
-		if err != nil {
-			t.Fatal(err)
-		}
-
-		withinTolerance(t, 51, quotaUtil.InodesUsed, 0.1, "InodesUsed")
-	})
-}
diff --git a/metropolis/pkg/fsquota/fsxattrs/BUILD.bazel b/metropolis/pkg/fsquota/fsxattrs/BUILD.bazel
deleted file mode 100644
index 2d23e99..0000000
--- a/metropolis/pkg/fsquota/fsxattrs/BUILD.bazel
+++ /dev/null
@@ -1,9 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library")
-
-go_library(
-    name = "fsxattrs",
-    srcs = ["fsxattrs.go"],
-    importpath = "source.monogon.dev/metropolis/pkg/fsquota/fsxattrs",
-    visibility = ["//metropolis:__subpackages__"],
-    deps = ["@org_golang_x_sys//unix"],
-)
diff --git a/metropolis/pkg/fsquota/fsxattrs/fsxattrs.go b/metropolis/pkg/fsquota/fsxattrs/fsxattrs.go
deleted file mode 100644
index 135b886..0000000
--- a/metropolis/pkg/fsquota/fsxattrs/fsxattrs.go
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fsxattrs
-
-import (
-	"fmt"
-	"os"
-	"unsafe"
-
-	"golang.org/x/sys/unix"
-)
-
-type FSXAttrFlag uint32
-
-// Defined in uapi/linux/fs.h
-const (
-	FlagRealtime        FSXAttrFlag = 0x00000001
-	FlagPreallocated    FSXAttrFlag = 0x00000002
-	FlagImmutable       FSXAttrFlag = 0x00000008
-	FlagAppend          FSXAttrFlag = 0x00000010
-	FlagSync            FSXAttrFlag = 0x00000020
-	FlagNoATime         FSXAttrFlag = 0x00000040
-	FlagNoDump          FSXAttrFlag = 0x00000080
-	FlagRealtimeInherit FSXAttrFlag = 0x00000100
-	FlagProjectInherit  FSXAttrFlag = 0x00000200
-	FlagNoSymlinks      FSXAttrFlag = 0x00000400
-	FlagExtentSize      FSXAttrFlag = 0x00000800
-	FlagNoDefragment    FSXAttrFlag = 0x00002000
-	FlagFilestream      FSXAttrFlag = 0x00004000
-	FlagDAX             FSXAttrFlag = 0x00008000
-	FlagCOWExtentSize   FSXAttrFlag = 0x00010000
-	FlagHasAttribute    FSXAttrFlag = 0x80000000
-)
-
-// FS_IOC_FSGETXATTR and FS_IOC_FSSETXATTR are defined in uapi/linux/fs.h
-// and normally would be imported from x/sys/unix. Since they don't exist
-// there define them here for now.
-const (
-	FS_IOC_FSGETXATTR = 0x801c581f
-	FS_IOC_FSSETXATTR = 0x401c5820
-)
-
-type FSXAttrs struct {
-	Flags         FSXAttrFlag
-	ExtentSize    uint32
-	ExtentCount   uint32
-	ProjectID     uint32
-	CoWExtentSize uint32
-	_pad          [8]byte
-}
-
-func Get(file *os.File) (*FSXAttrs, error) {
-	var attrs FSXAttrs
-	_, _, errno := unix.Syscall(unix.SYS_IOCTL, file.Fd(), FS_IOC_FSGETXATTR, uintptr(unsafe.Pointer(&attrs)))
-	if errno != 0 {
-		return nil, fmt.Errorf("failed to execute getFSXAttrs: %v", errno)
-	}
-	return &attrs, nil
-}
-
-func Set(file *os.File, attrs *FSXAttrs) error {
-	_, _, errno := unix.Syscall(unix.SYS_IOCTL, file.Fd(), FS_IOC_FSSETXATTR, uintptr(unsafe.Pointer(attrs)))
-	if errno != 0 {
-		return fmt.Errorf("failed to execute setFSXAttrs: %v", errno)
-	}
-	return nil
-}
diff --git a/metropolis/pkg/fsquota/quotactl/BUILD.bazel b/metropolis/pkg/fsquota/quotactl/BUILD.bazel
deleted file mode 100644
index 108b771..0000000
--- a/metropolis/pkg/fsquota/quotactl/BUILD.bazel
+++ /dev/null
@@ -1,9 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library")
-
-go_library(
-    name = "quotactl",
-    srcs = ["quotactl.go"],
-    importpath = "source.monogon.dev/metropolis/pkg/fsquota/quotactl",
-    visibility = ["//metropolis:__subpackages__"],
-    deps = ["@org_golang_x_sys//unix"],
-)
diff --git a/metropolis/pkg/fsquota/quotactl/quotactl.go b/metropolis/pkg/fsquota/quotactl/quotactl.go
deleted file mode 100644
index c7939ed..0000000
--- a/metropolis/pkg/fsquota/quotactl/quotactl.go
+++ /dev/null
@@ -1,199 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package quotactl implements a low-level wrapper around the modern portion of
-// Linux's quotactl() syscall. See the fsquota package for a nicer interface to
-// the most common part of this API.
-package quotactl
-
-import (
-	"fmt"
-	"os"
-	"unsafe"
-
-	"golang.org/x/sys/unix"
-)
-
-type QuotaType uint
-
-const (
-	QuotaTypeUser QuotaType = iota
-	QuotaTypeGroup
-	QuotaTypeProject
-)
-
-const (
-	Q_SYNC uint = (0x800001 + iota) << 8
-	Q_QUOTAON
-	Q_QUOTAOFF
-	Q_GETFMT
-	Q_GETINFO
-	Q_SETINFO
-	Q_GETQUOTA
-	Q_SETQUOTA
-	Q_GETNEXTQUOTA
-)
-
-const (
-	FlagBLimitsValid = 1 << iota
-	FlagSpaceValid
-	FlagILimitsValid
-	FlagInodesValid
-	FlagBTimeValid
-	FlagITimeValid
-)
-
-type DQInfo struct {
-	Bgrace uint64
-	Igrace uint64
-	Flags  uint32
-	Valid  uint32
-}
-
-type Quota struct {
-	BHardLimit uint64 // Both Byte limits are prescaled by 1024 (so are in KiB), but CurSpace is in B
-	BSoftLimit uint64
-	CurSpace   uint64
-	IHardLimit uint64
-	ISoftLimit uint64
-	CurInodes  uint64
-	BTime      uint64
-	ITime      uint64
-	Valid      uint32
-}
-
-type NextDQBlk struct {
-	HardLimitBytes  uint64
-	SoftLimitBytes  uint64
-	CurrentBytes    uint64
-	HardLimitInodes uint64
-	SoftLimitInodes uint64
-	CurrentInodes   uint64
-	BTime           uint64
-	ITime           uint64
-	Valid           uint32
-	ID              uint32
-}
-
-type QuotaFormat uint32
-
-// Collected from quota_format_type structs
-const (
-	// QuotaFormatNone is a special case where all quota information is
-	// stored inside filesystem metadata and thus requires no quotaFilePath.
-	QuotaFormatNone   QuotaFormat = 0
-	QuotaFormatVFSOld QuotaFormat = 1
-	QuotaFormatVFSV0  QuotaFormat = 2
-	QuotaFormatOCFS2  QuotaFormat = 3
-	QuotaFormatVFSV1  QuotaFormat = 4
-)
-
-// QuotaOn turns quota accounting and enforcement on
-func QuotaOn(fd *os.File, qtype QuotaType, quotaFormat QuotaFormat, quotaFilePath string) error {
-	pathArg, err := unix.BytePtrFromString(quotaFilePath)
-	if err != nil {
-		return err
-	}
-	_, _, errNo := unix.Syscall6(unix.SYS_QUOTACTL_FD, fd.Fd(), uintptr(Q_QUOTAON|uint(qtype)), uintptr(quotaFormat), uintptr(unsafe.Pointer(pathArg)), 0, 0)
-	if errNo != unix.Errno(0) {
-		return errNo
-	}
-	return nil
-}
-
-// QuotaOff turns quotas off
-func QuotaOff(fd *os.File, qtype QuotaType) error {
-	_, _, err := unix.Syscall6(unix.SYS_QUOTACTL_FD, fd.Fd(), uintptr(Q_QUOTAOFF|uint(qtype)), 0, 0, 0, 0)
-	if err != unix.Errno(0) {
-		return err
-	}
-	return nil
-}
-
-// GetFmt gets the quota format used on given filesystem
-func GetFmt(fd *os.File, qtype QuotaType) (QuotaFormat, error) {
-	var fmt uint32
-	_, _, err := unix.Syscall6(unix.SYS_QUOTACTL_FD, fd.Fd(), uintptr(Q_GETFMT|uint(qtype)), 0, uintptr(unsafe.Pointer(&fmt)), 0, 0)
-	if err != unix.Errno(0) {
-		return 0, err
-	}
-	return QuotaFormat(fmt), nil
-}
-
-// GetInfo gets information about quota files
-func GetInfo(fd *os.File, qtype QuotaType) (*DQInfo, error) {
-	var info DQInfo
-	_, _, err := unix.Syscall6(unix.SYS_QUOTACTL_FD, fd.Fd(), uintptr(Q_GETINFO|uint(qtype)), 0, uintptr(unsafe.Pointer(&info)), 0, 0)
-	if err != unix.Errno(0) {
-		return nil, err
-	}
-	return &info, nil
-}
-
-// SetInfo sets information about quota files
-func SetInfo(fd *os.File, qtype QuotaType, info *DQInfo) error {
-	_, _, err := unix.Syscall6(unix.SYS_QUOTACTL_FD, fd.Fd(), uintptr(Q_SETINFO|uint(qtype)), 0, uintptr(unsafe.Pointer(info)), 0, 0)
-	if err != unix.Errno(0) {
-		return err
-	}
-	return nil
-}
-
-// GetQuota gets user quota structure
-func GetQuota(fd *os.File, qtype QuotaType, id uint32) (*Quota, error) {
-	var info Quota
-	_, _, err := unix.Syscall6(unix.SYS_QUOTACTL_FD, fd.Fd(), uintptr(Q_GETQUOTA|uint(qtype)), uintptr(id), uintptr(unsafe.Pointer(&info)), 0, 0)
-	if err != unix.Errno(0) {
-		return nil, err
-	}
-	return &info, nil
-}
-
-// GetNextQuota gets disk limits and usage > ID
-func GetNextQuota(fd *os.File, qtype QuotaType, id uint32) (*NextDQBlk, error) {
-	var info NextDQBlk
-	_, _, err := unix.Syscall6(unix.SYS_QUOTACTL_FD, fd.Fd(), uintptr(Q_GETNEXTQUOTA|uint(qtype)), uintptr(id), uintptr(unsafe.Pointer(&info)), 0, 0)
-	if err != unix.Errno(0) {
-		return nil, err
-	}
-	return &info, nil
-}
-
-// SetQuota sets the given quota
-func SetQuota(fd *os.File, qtype QuotaType, id uint32, quota *Quota) error {
-	_, _, err := unix.Syscall6(unix.SYS_QUOTACTL_FD, fd.Fd(), uintptr(Q_SETQUOTA|uint(qtype)), uintptr(id), uintptr(unsafe.Pointer(quota)), 0, 0)
-	if err != unix.Errno(0) {
-		return fmt.Errorf("failed to set quota: %w", err)
-	}
-	return nil
-}
-
-// Sync syncs disk copy of filesystems quotas. If device is empty it syncs all
-// filesystems.
-func Sync(fd *os.File) error {
-	if fd != nil {
-		_, _, err := unix.Syscall6(unix.SYS_QUOTACTL_FD, fd.Fd(), uintptr(Q_SYNC), 0, 0, 0, 0)
-		if err != unix.Errno(0) {
-			return err
-		}
-	} else {
-		_, _, err := unix.Syscall6(unix.SYS_QUOTACTL, uintptr(Q_SYNC), 0, 0, 0, 0, 0)
-		if err != unix.Errno(0) {
-			return err
-		}
-	}
-	return nil
-}
diff --git a/metropolis/pkg/gpt/BUILD.bazel b/metropolis/pkg/gpt/BUILD.bazel
deleted file mode 100644
index 2bb0b4a..0000000
--- a/metropolis/pkg/gpt/BUILD.bazel
+++ /dev/null
@@ -1,37 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
-load("//metropolis/test/ktest:ktest.bzl", "ktest")
-
-go_library(
-    name = "gpt",
-    srcs = [
-        "gpt.go",
-        "mbr.go",
-    ],
-    importpath = "source.monogon.dev/metropolis/pkg/gpt",
-    visibility = ["//visibility:public"],
-    deps = [
-        "//metropolis/pkg/blockdev",
-        "//metropolis/pkg/msguid",
-        "@com_github_google_uuid//:uuid",
-    ],
-)
-
-go_test(
-    name = "gpt_test",
-    srcs = [
-        "gpt_test.go",
-        "linux_test.go",
-        "mbr_test.go",
-        "structs_test.go",
-    ],
-    embed = [":gpt"],
-    deps = [
-        "//metropolis/pkg/blockdev",
-        "@com_github_google_uuid//:uuid",
-    ],
-)
-
-ktest(
-    cmdline = "ramdisk_size=4096",
-    tester = ":gpt_test",
-)
diff --git a/metropolis/pkg/gpt/gpt.go b/metropolis/pkg/gpt/gpt.go
deleted file mode 100644
index 58c0527..0000000
--- a/metropolis/pkg/gpt/gpt.go
+++ /dev/null
@@ -1,733 +0,0 @@
-// Package gpt implements reading and writing GUID Partition Tables as specified
-// in the UEFI Specification. It only implements up to 128 partitions per table
-// (same as most other implementations) as more would require a dynamic table
-// size, significantly complicating the code for little gain.
-package gpt
-
-import (
-	"bytes"
-	"encoding/binary"
-	"errors"
-	"fmt"
-	"hash/crc32"
-	"sort"
-	"strings"
-	"unicode/utf16"
-
-	"github.com/google/uuid"
-
-	"source.monogon.dev/metropolis/pkg/blockdev"
-	"source.monogon.dev/metropolis/pkg/msguid"
-)
-
-var gptSignature = [8]byte{'E', 'F', 'I', ' ', 'P', 'A', 'R', 'T'}
-var gptRevision uint32 = 0x00010000 // First 2 bytes major, second 2 bytes minor
-
-// See UEFI Specification 2.9 Table 5-5
-type header struct {
-	Signature   [8]byte
-	Revision    uint32
-	HeaderSize  uint32
-	HeaderCRC32 uint32
-	_           [4]byte
-
-	HeaderBlock          uint64
-	AlternateHeaderBlock uint64
-	FirstUsableBlock     uint64
-	LastUsableBlock      uint64
-
-	ID [16]byte
-
-	PartitionEntriesStartBlock uint64
-	PartitionEntryCount        uint32
-	PartitionEntrySize         uint32
-	PartitionEntriesCRC32      uint32
-}
-
-// See UEFI Specification 2.9 Table 5-6
-type partition struct {
-	Type       [16]byte
-	ID         [16]byte
-	FirstBlock uint64
-	LastBlock  uint64
-	Attributes uint64
-	Name       [36]uint16
-}
-
-var (
-	PartitionTypeEFISystem = uuid.MustParse("C12A7328-F81F-11D2-BA4B-00A0C93EC93B")
-)
-
-// Attribute is a bitfield of attributes set on a partition. Bits 0 to 47 are
-// reserved for UEFI specification use and all current assignments are in the
-// following const block. Bits 48 to 64 are available for per-Type use by
-// the organization controlling the partition Type.
-type Attribute uint64
-
-const (
-	// AttrRequiredPartition indicates that this partition is required for the
-	// platform to function. Mostly used by vendors to mark things like recovery
-	// partitions.
-	AttrRequiredPartition = 1 << 0
-	// AttrNoBlockIOProto indicates that EFI firmware must not provide an EFI
-	// block device (EFI_BLOCK_IO_PROTOCOL) for this partition.
-	AttrNoBlockIOProto = 1 << 1
-	// AttrLegacyBIOSBootable indicates to special-purpose software outside of
-	// UEFI that this partition can be booted using a traditional PC BIOS.
-	// Don't use this unless you know that you need it specifically.
-	AttrLegacyBIOSBootable = 1 << 2
-)
-
-// PerTypeAttrs returns the top 24 bits which are reserved for custom per-Type
-// attributes. The top 8 bits of the returned uint32 are always 0.
-func (a Attribute) PerTypeAttrs() uint32 {
-	return uint32(a >> 48)
-}
-
-// SetPerTypeAttrs sets the top 24 bits which are reserved for custom per-Type
-// attributes. It does not touch the lower attributes which are specified by the
-// UEFI specification. The top 8 bits of v are silently discarded.
-func (a *Attribute) SetPerTypeAttrs(v uint32) {
-	*a &= 0x000000FF_FFFFFFFF
-	*a |= Attribute(v) << 48
-}
-
-type Partition struct {
-	// Name of the partition, will be truncated if it expands to more than 36
-	// UTF-16 code points. Not all systems can display non-BMP code points.
-	Name string
-	// Type is the type of Table partition, can either be one of the predefined
-	// constants by the UEFI specification or a custom type identifier.
-	// Note that the all-zero UUID denotes an empty partition slot, so this
-	// MUST be set to something, otherwise it is not treated as a partition.
-	Type uuid.UUID
-	// ID is a unique identifier for this specific partition. It should be
-	// changed when cloning the partition.
-	ID uuid.UUID
-	// The first logical block of the partition (inclusive)
-	FirstBlock uint64
-	// The last logical block of the partition (inclusive)
-	LastBlock uint64
-	// Bitset of attributes of this partition.
-	Attributes Attribute
-
-	*blockdev.Section
-}
-
-// SizeBlocks returns the size of the partition in blocks
-func (p *Partition) SizeBlocks() uint64 {
-	return 1 + p.LastBlock - p.FirstBlock
-}
-
-// IsUnused checks if the partition is unused, i.e. it is nil or its type is
-// the null UUID.
-func (p *Partition) IsUnused() bool {
-	if p == nil {
-		return true
-	}
-	return p.Type == uuid.Nil
-}
-
-// New returns an empty table on the given block device.
-// It does not read any existing GPT on the disk (use Read for that), nor does
-// it write anything until Write is called.
-func New(b blockdev.BlockDev) (*Table, error) {
-	return &Table{
-		b: b,
-	}, nil
-}
-
-type Table struct {
-	// ID is the unique identifier of this specific disk / GPT.
-	// If this is left uninitialized/all-zeroes a new random ID is automatically
-	// generated when writing.
-	ID uuid.UUID
-
-	// Data put at the start of the very first block. Gets loaded and executed
-	// by a legacy BIOS bootloader. This can be used to make GPT-partitioned
-	// disks bootable by legacy systems or display a nice error message.
-	// Maximum length is 440 bytes, if that is exceeded Write returns an error.
-	// Should be left empty if the device is not bootable and/or compatibility
-	// with BIOS booting is not required. Only useful on x86 systems.
-	BootCode []byte
-
-	// Partitions contains the list of partitions in this table. This is
-	// artificially limited to 128 partitions. Holes in the partition list are
-	// represented as nil values. Call IsUnused before checking any other
-	// properties of the partition.
-	Partitions []*Partition
-
-	b blockdev.BlockDev
-}
-
-type addOptions struct {
-	preferEnd        bool
-	keepEmptyEntries bool
-	alignment        int64
-}
-
-// AddOption is a bitset controlling various
-type AddOption func(*addOptions)
-
-// WithPreferEnd tries to put the partition as close to the end as possible
-// instead of as close to the start.
-func WithPreferEnd() AddOption {
-	return func(options *addOptions) {
-		options.preferEnd = true
-	}
-}
-
-// WithKeepEmptyEntries does not fill up empty entries which are followed by
-// filled ones. It always appends the partition after the last used entry.
-// Without this flag, the partition is placed in the first empty entry.
-func WithKeepEmptyEntries() AddOption {
-	return func(options *addOptions) {
-		options.keepEmptyEntries = true
-	}
-}
-
-// WithAlignment allows aligning the partition start block to a non-default
-// value. By default, these are aligned to 1MiB.
-// Only use this flag if you are certain you need it, it can cause quite severe
-// performance degradation under certain conditions.
-func WithAlignment(alignmenet int64) AddOption {
-	return func(options *addOptions) {
-		options.alignment = alignmenet
-	}
-}
-
-// AddPartition takes a pointer to a partition and adds it, placing it into
-// the first (or last using WithPreferEnd) continuous free space which fits it.
-// It writes the placement information (FirstBlock, LastBlock) back to p.
-// By default, AddPartition aligns FirstBlock to 1MiB boundaries, but this can
-// be overridden using WithAlignment.
-func (gpt *Table) AddPartition(p *Partition, size int64, options ...AddOption) error {
-	blockSize := gpt.b.BlockSize()
-	var opts addOptions
-	// Align to 1MiB or the block size, whichever is bigger
-	opts.alignment = 1 * 1024 * 1024
-	if blockSize > opts.alignment {
-		opts.alignment = blockSize
-	}
-	for _, o := range options {
-		o(&opts)
-	}
-	if opts.alignment%blockSize != 0 {
-		return fmt.Errorf("requested alignment (%d bytes) is not an integer multiple of the block size (%d), unable to align", opts.alignment, blockSize)
-	}
-	if p.ID == uuid.Nil {
-		p.ID = uuid.New()
-	}
-
-	fs, _, err := gpt.GetFreeSpaces()
-	if err != nil {
-		return fmt.Errorf("unable to determine free space: %v", err)
-	}
-	if opts.preferEnd {
-		// Reverse fs slice to start iteration at the end
-		for i, j := 0, len(fs)-1; i < j; i, j = i+1, j-1 {
-			fs[i], fs[j] = fs[j], fs[i]
-		}
-	}
-	// Number of blocks the partition should occupy, rounded up.
-	blocks := (size + blockSize - 1) / blockSize
-	if size == -1 {
-		var largestFreeSpace int64
-		for _, freeInt := range fs {
-			intSz := freeInt[1] - freeInt[0]
-			if intSz > largestFreeSpace {
-				largestFreeSpace = intSz
-			}
-		}
-		blocks = largestFreeSpace
-	}
-	var maxFreeBlocks int64
-	for _, freeInt := range fs {
-		start := freeInt[0]
-		end := freeInt[1]
-		freeBlocks := end - start
-		// Align start properly
-		alignTo := opts.alignment / blockSize
-		// Go doesn't implement the euclidean modulus, thus this construction
-		// is necessary.
-		paddingBlocks := ((alignTo - start) % alignTo) % alignTo
-		freeBlocks -= paddingBlocks
-		start += paddingBlocks
-		if maxFreeBlocks < freeBlocks {
-			maxFreeBlocks = freeBlocks
-		}
-		if freeBlocks >= blocks {
-			if !opts.preferEnd {
-				p.FirstBlock = uint64(start)
-				p.LastBlock = uint64(start + blocks - 1)
-			} else {
-				// Realign FirstBlock. This will always succeed as
-				// there is enough space to align to the start.
-				moveLeft := (end - blocks - 1) % (opts.alignment / blockSize)
-				p.FirstBlock = uint64(end - (blocks + 1 + moveLeft))
-				p.LastBlock = uint64(end - (2 + moveLeft))
-			}
-			newPartPos := -1
-			if !opts.keepEmptyEntries {
-				for i, part := range gpt.Partitions {
-					if part.IsUnused() {
-						newPartPos = i
-						break
-					}
-				}
-			}
-			if newPartPos == -1 {
-				gpt.Partitions = append(gpt.Partitions, p)
-			} else {
-				gpt.Partitions[newPartPos] = p
-			}
-			p.Section = blockdev.NewSection(gpt.b, int64(p.FirstBlock), int64(p.LastBlock)+1)
-			return nil
-		}
-	}
-
-	return fmt.Errorf("no space for partition of %d blocks, largest continuous free space after alignment is %d blocks", blocks, maxFreeBlocks)
-}
-
-// FirstUsableBlock returns the first usable (i.e. a partition can start there)
-// block.
-func (gpt *Table) FirstUsableBlock() int64 {
-	blockSize := gpt.b.BlockSize()
-	partitionEntryBlocks := (16384 + blockSize - 1) / blockSize
-	return 2 + partitionEntryBlocks
-}
-
-// LastUsableBlock returns the last usable (i.e. a partition can end there)
-// block. This block is inclusive.
-func (gpt *Table) LastUsableBlock() int64 {
-	blockSize := gpt.b.BlockSize()
-	partitionEntryBlocks := (16384 + blockSize - 1) / blockSize
-	return gpt.b.BlockCount() - (2 + partitionEntryBlocks)
-}
-
-// GetFreeSpaces returns a slice of tuples, each containing a half-closed
-// interval of logical blocks not occupied by the GPT itself or any partition.
-// The returned intervals are always in ascending order as well as
-// non-overlapping. It also returns if it detected any overlaps between
-// partitions or partitions and the GPT. It returns an error if and only if any
-// partition has its FirstBlock before the LastBlock or exceeds the amount of
-// blocks on the block device.
-//
-// Note that the most common use cases for this function are covered by
-// AddPartition, you're encouraged to use it instead.
-func (gpt *Table) GetFreeSpaces() ([][2]int64, bool, error) {
-	// This implements an efficient algorithm for finding free intervals given
-	// a set of potentially overlapping occupying intervals. It uses O(n*log n)
-	// time for n being the amount of intervals, i.e. partitions. It uses O(n)
-	// additional memory. This makes it de facto infinitely scalable in the
-	// context of partition tables as the size of the block device is not part
-	// of its cyclomatic complexity and O(n*log n) is tiny for even very big
-	// partition tables.
-
-	blockCount := gpt.b.BlockCount()
-
-	// startBlocks contains the start blocks (inclusive) of all occupied
-	// intervals.
-	var startBlocks []int64
-	// endBlocks contains the end blocks (exclusive!) of all occupied intervals.
-	// The interval at index i is given by [startBlock[i], endBlock[i]).
-	var endBlocks []int64
-
-	// Reserve the primary GPT interval including the protective MBR.
-	startBlocks = append(startBlocks, 0)
-	endBlocks = append(endBlocks, gpt.FirstUsableBlock())
-
-	// Reserve the alternate GPT interval (needs +1 for exclusive interval)
-	startBlocks = append(startBlocks, gpt.LastUsableBlock()+1)
-	endBlocks = append(endBlocks, blockCount)
-
-	for i, part := range gpt.Partitions {
-		if part.IsUnused() {
-			continue
-		}
-		// Bail if partition does not contain a valid interval. These are open
-		// intervals, thus part.FirstBlock == part.LastBlock denotes a valid
-		// partition with a size of one block.
-		if part.FirstBlock > part.LastBlock {
-			return nil, false, fmt.Errorf("partition %d has a LastBlock smaller than its FirstBlock, its interval is [%d, %d]", i, part.FirstBlock, part.LastBlock)
-		}
-		if part.FirstBlock >= uint64(blockCount) || part.LastBlock >= uint64(blockCount) {
-			return nil, false, fmt.Errorf("partition %d exceeds the block count of the block device", i)
-		}
-		startBlocks = append(startBlocks, int64(part.FirstBlock))
-		// Algorithm needs open-closed intervals, thus add +1 to the end.
-		endBlocks = append(endBlocks, int64(part.LastBlock)+1)
-	}
-	// Sort both sets of blocks independently in ascending order. Note that it
-	// is now no longer possible to extract the original intervals. Integers
-	// have no identity thus it doesn't matter if the sort is stable or not.
-	sort.Slice(startBlocks, func(i, j int) bool { return startBlocks[i] < startBlocks[j] })
-	sort.Slice(endBlocks, func(i, j int) bool { return endBlocks[i] < endBlocks[j] })
-
-	var freeSpaces [][2]int64
-
-	// currentIntervals contains the number of intervals which contain the
-	// position currently being iterated over. If currentIntervals is ever
-	// bigger than 1, there is overlap within the given intervals.
-	currentIntervals := 0
-	var hasOverlap bool
-
-	// Iterate for as long as there are interval boundaries to be processed.
-	for len(startBlocks) != 0 || len(endBlocks) != 0 {
-		// Short-circuit boundary processing. If an interval ends at x and the
-		// next one starts at x (this is using half-open intervals), it would
-		// otherwise perform useless processing as well as create an empty free
-		// interval which would then need to be filtered back out.
-		if len(startBlocks) != 0 && len(endBlocks) != 0 && startBlocks[0] == endBlocks[0] {
-			startBlocks = startBlocks[1:]
-			endBlocks = endBlocks[1:]
-			continue
-		}
-		// Pick the lowest boundary from either startBlocks or endBlocks,
-		// preferring endBlocks if they are equal. Don't try to pick from empty
-		// slices.
-		if (len(startBlocks) != 0 && len(endBlocks) != 0 && startBlocks[0] < endBlocks[0]) || len(endBlocks) == 0 {
-			// If currentIntervals == 0 a free space region ends here.
-			// Since this algorithm creates the free space interval at the end
-			// of an occupied interval, for the first interval there is no free
-			// space entry. But in this case it's fine to just ignore it as the
-			// first interval always starts at 0 because of the GPT.
-			if currentIntervals == 0 && len(freeSpaces) != 0 {
-				freeSpaces[len(freeSpaces)-1][1] = startBlocks[0]
-			}
-			// This is the start of an interval, increase the number of active
-			// intervals.
-			currentIntervals++
-			hasOverlap = hasOverlap || currentIntervals > 1
-			// Drop processed startBlock from slice.
-			startBlocks = startBlocks[1:]
-		} else {
-			// This is the end of an interval, decrease the number of active
-			// intervals.
-			currentIntervals--
-			// If currentIntervals == 0 a free space region starts here.
-			// Same as with the startBlocks, ignore a potential free block after
-			// the final range as the GPT occupies the last blocks anyway.
-			if currentIntervals == 0 && len(startBlocks) != 0 {
-				freeSpaces = append(freeSpaces, [2]int64{endBlocks[0], 0})
-			}
-			endBlocks = endBlocks[1:]
-		}
-	}
-	return freeSpaces, hasOverlap, nil
-}
-
-// Overhead returns the number of blocks the GPT partitioning itself consumes,
-// i.e. aren't usable for user data.
-func Overhead(blockSize int64) int64 {
-	// 3 blocks + 2x 16384 bytes (partition entry space)
-	partitionEntryBlocks := (16384 + blockSize - 1) / blockSize
-	return 3 + (2 * partitionEntryBlocks)
-}
-
-// Write writes the two GPTs, first the alternate, then the primary to the
-// block device. If gpt.ID or any of the partition IDs are the all-zero UUID,
-// new random ones are generated and written back. If the output is supposed
-// to be reproducible, generate the UUIDs beforehand.
-func (gpt *Table) Write() error {
-	blockSize := gpt.b.BlockSize()
-	blockCount := gpt.b.BlockCount()
-	if blockSize < 512 {
-		return errors.New("block size is smaller than 512 bytes, this is unsupported")
-	}
-	// Layout looks as follows:
-	// Block 0: Protective MBR
-	// Block 1: GPT Header
-	// Block 2-(16384 bytes): GPT partition entries
-	// Block (16384 bytes)-n: GPT partition entries alternate copy
-	// Block n: GPT Header alternate copy
-	partitionEntryCount := 128
-	if len(gpt.Partitions) > partitionEntryCount {
-		return errors.New("bigger-than default GPTs (>128 partitions) are unimplemented")
-	}
-
-	partitionEntryBlocks := (16384 + blockSize - 1) / blockSize
-	if blockCount < 3+(2*partitionEntryBlocks) {
-		return errors.New("not enough blocks to write GPT")
-	}
-
-	if gpt.ID == uuid.Nil {
-		gpt.ID = uuid.New()
-	}
-
-	partSize := binary.Size(partition{})
-	var partitionEntriesData bytes.Buffer
-	for i := 0; i < partitionEntryCount; i++ {
-		if len(gpt.Partitions) <= i || gpt.Partitions[i] == nil {
-			// Write an empty entry
-			partitionEntriesData.Write(make([]byte, partSize))
-			continue
-		}
-		p := gpt.Partitions[i]
-		if p.ID == uuid.Nil {
-			p.ID = uuid.New()
-		}
-		rawP := partition{
-			Type:       msguid.From(p.Type),
-			ID:         msguid.From(p.ID),
-			FirstBlock: p.FirstBlock,
-			LastBlock:  p.LastBlock,
-			Attributes: uint64(p.Attributes),
-		}
-		nameUTF16 := utf16.Encode([]rune(p.Name))
-		// copy will automatically truncate if target is too short
-		copy(rawP.Name[:], nameUTF16)
-		binary.Write(&partitionEntriesData, binary.LittleEndian, rawP)
-	}
-
-	hdr := header{
-		Signature:  gptSignature,
-		Revision:   gptRevision,
-		HeaderSize: uint32(binary.Size(&header{})),
-		ID:         msguid.From(gpt.ID),
-
-		PartitionEntryCount: uint32(partitionEntryCount),
-		PartitionEntrySize:  uint32(partSize),
-
-		FirstUsableBlock: uint64(2 + partitionEntryBlocks),
-		LastUsableBlock:  uint64(blockCount - (2 + partitionEntryBlocks)),
-	}
-	hdr.PartitionEntriesCRC32 = crc32.ChecksumIEEE(partitionEntriesData.Bytes())
-
-	hdrChecksum := crc32.NewIEEE()
-
-	// Write alternate header first, as otherwise resizes are unsafe. If the
-	// alternate is currently not at the end of the block device, it cannot
-	// be found. Thus if the write operation is aborted abnormally, the
-	// primary GPT is corrupted and the alternate cannot be found because it
-	// is not at its canonical location. Rewriting the alternate first avoids
-	// this problem.
-
-	// Alternate header
-	hdr.HeaderBlock = uint64(blockCount - 1)
-	hdr.AlternateHeaderBlock = 1
-	hdr.PartitionEntriesStartBlock = uint64(blockCount - (1 + partitionEntryBlocks))
-
-	hdrChecksum.Reset()
-	hdr.HeaderCRC32 = 0
-	binary.Write(hdrChecksum, binary.LittleEndian, &hdr)
-	hdr.HeaderCRC32 = hdrChecksum.Sum32()
-
-	for partitionEntriesData.Len()%int(blockSize) != 0 {
-		partitionEntriesData.WriteByte(0x00)
-	}
-	if _, err := gpt.b.WriteAt(partitionEntriesData.Bytes(), int64(hdr.PartitionEntriesStartBlock)*blockSize); err != nil {
-		return fmt.Errorf("failed to write alternate partition entries: %w", err)
-	}
-
-	var hdrRaw bytes.Buffer
-	if err := binary.Write(&hdrRaw, binary.LittleEndian, &hdr); err != nil {
-		return fmt.Errorf("failed to encode alternate header: %w", err)
-	}
-	for hdrRaw.Len()%int(blockSize) != 0 {
-		hdrRaw.WriteByte(0x00)
-	}
-	if _, err := gpt.b.WriteAt(hdrRaw.Bytes(), (blockCount-1)*blockSize); err != nil {
-		return fmt.Errorf("failed to write alternate header: %v", err)
-	}
-
-	// Primary header
-	hdr.HeaderBlock = 1
-	hdr.AlternateHeaderBlock = uint64(blockCount - 1)
-	hdr.PartitionEntriesStartBlock = 2
-
-	hdrChecksum.Reset()
-	hdr.HeaderCRC32 = 0
-	binary.Write(hdrChecksum, binary.LittleEndian, &hdr)
-	hdr.HeaderCRC32 = hdrChecksum.Sum32()
-
-	hdrRaw.Reset()
-
-	if err := makeProtectiveMBR(&hdrRaw, blockCount, gpt.BootCode); err != nil {
-		return fmt.Errorf("failed creating protective MBR: %w", err)
-	}
-	for hdrRaw.Len()%int(blockSize) != 0 {
-		hdrRaw.WriteByte(0x00)
-	}
-	if err := binary.Write(&hdrRaw, binary.LittleEndian, &hdr); err != nil {
-		panic(err)
-	}
-	for hdrRaw.Len()%int(blockSize) != 0 {
-		hdrRaw.WriteByte(0x00)
-	}
-	hdrRaw.Write(partitionEntriesData.Bytes())
-	for hdrRaw.Len()%int(blockSize) != 0 {
-		hdrRaw.WriteByte(0x00)
-	}
-
-	if _, err := gpt.b.WriteAt(hdrRaw.Bytes(), 0); err != nil {
-		return fmt.Errorf("failed to write primary GPT: %w", err)
-	}
-	return nil
-}
-
-// Read reads a Table from a block device.
-func Read(r blockdev.BlockDev) (*Table, error) {
-	if Overhead(r.BlockSize()) > r.BlockCount() {
-		return nil, errors.New("disk cannot contain a GPT as the block count is too small to store one")
-	}
-	zeroBlock := make([]byte, r.BlockSize())
-	if _, err := r.ReadAt(zeroBlock, 0); err != nil {
-		return nil, fmt.Errorf("failed to read first block: %w", err)
-	}
-
-	var m mbr
-	if err := binary.Read(bytes.NewReader(zeroBlock[:512]), binary.LittleEndian, &m); err != nil {
-		panic(err) // Read is from memory and with enough data
-	}
-	// The UEFI standard says that the only acceptable MBR for a GPT-partitioned
-	// device is a pure protective MBR with one partition of type 0xEE covering
-	// the entire disk. But reality is sadly not so simple. People have come up
-	// with hacks like Hybrid MBR which is basically a way to expose partitions
-	// as both GPT partitions and MBR partitions. There are also GPTs without
-	// any MBR at all.
-	// Following the standard strictly when reading means that this library
-	// would fail to read valid GPT disks where such schemes are employed.
-	// On the other hand just looking at the GPT signature is also dangerous
-	// as not all tools clear the second block where the GPT resides when
-	// writing an MBR, which results in reading a wrong/obsolete GPT.
-	// As a pragmatic solution this library treats any disk as GPT-formatted if
-	// the first block does not contain an MBR signature or at least one MBR
-	// partition has type 0xEE (GPT). It does however not care in which slot
-	// this partition is or if it begins at the start of the disk.
-	//
-	// Note that the block signatures for MBR and FAT are shared. This is a
-	// historical artifact from DOS. It is not reliably possible to
-	// differentiate the two as either has boot code where the other has meta-
-	// data and both lack any checksums. Because the MBR partition table is at
-	// the very end of the FAT bootcode section the following code always
-	// assumes that it is dealing with an MBR. This is both more likely and
-	// the 0xEE marker is rarer and thus more specific than FATs 0x00, 0x80 and
-	// 0x02.
-	var bootCode []byte
-	hasDOSBootSig := m.Signature == mbrSignature
-	if hasDOSBootSig {
-		var isGPT bool
-		for _, p := range m.PartitionRecords {
-			if p.Type == 0xEE {
-				isGPT = true
-			}
-		}
-		// Note that there is a small but non-zero chance that isGPT is true
-		// for a raw FAT filesystem if the bootcode contains a "valid" MBR.
-		// The next error message mentions that possibility.
-		if !isGPT {
-			return nil, errors.New("block device contains an MBR table without a GPT marker or a raw FAT filesystem")
-		}
-		// Trim right zeroes away as they are padded back when writing. This
-		// makes BootCode empty when it is all-zeros, making it easier to work
-		// with while still round-tripping correctly.
-		bootCode = bytes.TrimRight(m.BootCode[:], "\x00")
-	}
-	// Read the primary GPT. If it is damaged and/or broken, read the alternate.
-	primaryGPT, err := readSingleGPT(r, 1)
-	if err != nil {
-		alternateGPT, err2 := readSingleGPT(r, r.BlockCount()-1)
-		if err2 != nil {
-			return nil, fmt.Errorf("failed to read both GPTs: primary GPT (%v), secondary GPT (%v)", err, err2)
-		}
-		alternateGPT.BootCode = bootCode
-		return alternateGPT, nil
-	}
-	primaryGPT.BootCode = bootCode
-	return primaryGPT, nil
-}
-
-func readSingleGPT(r blockdev.BlockDev, headerBlockPos int64) (*Table, error) {
-	hdrBlock := make([]byte, r.BlockSize())
-	if _, err := r.ReadAt(hdrBlock, r.BlockSize()*headerBlockPos); err != nil {
-		return nil, fmt.Errorf("failed to read GPT header block: %w", err)
-	}
-	hdrBlockReader := bytes.NewReader(hdrBlock)
-	var hdr header
-	if err := binary.Read(hdrBlockReader, binary.LittleEndian, &hdr); err != nil {
-		panic(err) // Read from memory with enough bytes, should not fail
-	}
-	if hdr.Signature != gptSignature {
-		return nil, errors.New("no GPT signature found")
-	}
-	if hdr.HeaderSize < uint32(binary.Size(hdr)) {
-		return nil, fmt.Errorf("GPT header size is too small, likely corrupted")
-	}
-	if int64(hdr.HeaderSize) > r.BlockSize() {
-		return nil, fmt.Errorf("GPT header size is bigger than block size, likely corrupted")
-	}
-	// Use reserved bytes to hash, but do not expose them to the user.
-	// If someone has a need to process them, they should extend this library
-	// with whatever an updated UEFI specification contains.
-	// It has been considered to store these in the user-exposed GPT struct to
-	// be able to round-trip them cleanly, but there is significant complexity
-	// and risk involved in doing so.
-	reservedBytes := hdrBlock[binary.Size(hdr):hdr.HeaderSize]
-	hdrExpectedCRC := hdr.HeaderCRC32
-	hdr.HeaderCRC32 = 0
-	hdrCRC := crc32.NewIEEE()
-	binary.Write(hdrCRC, binary.LittleEndian, &hdr)
-	hdrCRC.Write(reservedBytes)
-	if hdrCRC.Sum32() != hdrExpectedCRC {
-		return nil, fmt.Errorf("GPT header checksum mismatch, probably corrupted")
-	}
-	if hdr.HeaderBlock != uint64(headerBlockPos) {
-		return nil, errors.New("GPT header indicates wrong block")
-	}
-	if hdr.PartitionEntrySize < uint32(binary.Size(partition{})) {
-		return nil, errors.New("partition entry size too small")
-	}
-	if hdr.PartitionEntriesStartBlock > uint64(r.BlockCount()) {
-		return nil, errors.New("partition entry start block is out of range")
-	}
-	// Sanity-check total size of the partition entry area. Otherwise, this is a
-	// trivial DoS as it could cause allocation of gigabytes of memory.
-	// 4MiB is equivalent to around 45k partitions at the current size.
-	// I know of no operating system which would handle even a fraction of this.
-	if uint64(hdr.PartitionEntryCount)*uint64(hdr.PartitionEntrySize) > 4*1024*1024 {
-		return nil, errors.New("partition entry area bigger than 4MiB, refusing to read")
-	}
-	partitionEntryData := make([]byte, hdr.PartitionEntrySize*hdr.PartitionEntryCount)
-	if _, err := r.ReadAt(partitionEntryData, r.BlockSize()*int64(hdr.PartitionEntriesStartBlock)); err != nil {
-		return nil, fmt.Errorf("failed to read partition entries: %w", err)
-	}
-	if crc32.ChecksumIEEE(partitionEntryData) != hdr.PartitionEntriesCRC32 {
-		return nil, errors.New("GPT partition entry table checksum mismatch")
-	}
-	var g Table
-	g.ID = msguid.To(hdr.ID)
-	for i := uint32(0); i < hdr.PartitionEntryCount; i++ {
-		entryReader := bytes.NewReader(partitionEntryData[i*hdr.PartitionEntrySize : (i+1)*hdr.PartitionEntrySize])
-		var part partition
-		if err := binary.Read(entryReader, binary.LittleEndian, &part); err != nil {
-			panic(err) // Should not happen
-		}
-		// If the partition type is the all-zero UUID, this slot counts as
-		// unused.
-		if part.Type == uuid.Nil {
-			g.Partitions = append(g.Partitions, nil)
-			continue
-		}
-		g.Partitions = append(g.Partitions, &Partition{
-			ID:         msguid.To(part.ID),
-			Type:       msguid.To(part.Type),
-			Name:       strings.TrimRight(string(utf16.Decode(part.Name[:])), "\x00"),
-			FirstBlock: part.FirstBlock,
-			LastBlock:  part.LastBlock,
-			Attributes: Attribute(part.Attributes),
-		})
-	}
-	// Remove long list of nils at the end as it's inconvenient to work with
-	// (append doesn't work, debug prints are very long) and it round-trips
-	// correctly even without it as it gets zero-padded when writing anyway.
-	var maxValidPartition int
-	for i, p := range g.Partitions {
-		if !p.IsUnused() {
-			maxValidPartition = i
-		}
-	}
-	g.Partitions = g.Partitions[:maxValidPartition+1]
-	g.b = r
-	return &g, nil
-}
diff --git a/metropolis/pkg/gpt/gpt_test.go b/metropolis/pkg/gpt/gpt_test.go
deleted file mode 100644
index 3f25db9..0000000
--- a/metropolis/pkg/gpt/gpt_test.go
+++ /dev/null
@@ -1,136 +0,0 @@
-package gpt
-
-import (
-	"bytes"
-	"crypto/sha256"
-	"io"
-	"os"
-	"testing"
-
-	"github.com/google/uuid"
-
-	"source.monogon.dev/metropolis/pkg/blockdev"
-)
-
-func TestFreeSpaces(t *testing.T) {
-	cases := []struct {
-		name            string
-		parts           []*Partition
-		expected        [][2]int64
-		expectedOverlap bool
-	}{
-		{"Empty", []*Partition{}, [][2]int64{{34, 2015}}, false},
-		{"OnePart", []*Partition{
-			{Type: PartitionTypeEFISystem, FirstBlock: 200, LastBlock: 1499},
-		}, [][2]int64{
-			{34, 200},
-			{1500, 2015},
-		}, false},
-		{"TwoOverlappingParts", []*Partition{
-			{Type: PartitionTypeEFISystem, FirstBlock: 200, LastBlock: 1499},
-			{Type: PartitionTypeEFISystem, FirstBlock: 1000, LastBlock: 1999},
-		}, [][2]int64{
-			{34, 200},
-			{2000, 2015},
-		}, true},
-		{"Full", []*Partition{
-			{Type: PartitionTypeEFISystem, FirstBlock: 34, LastBlock: 999},
-			{Type: PartitionTypeEFISystem, FirstBlock: 1000, LastBlock: 2014},
-		}, [][2]int64{}, false},
-		{"TwoSpacedParts", []*Partition{
-			{Type: PartitionTypeEFISystem, FirstBlock: 500, LastBlock: 899},
-			{Type: PartitionTypeEFISystem, FirstBlock: 1200, LastBlock: 1799},
-		}, [][2]int64{
-			{34, 500},
-			{900, 1200},
-			{1800, 2015},
-		}, false},
-	}
-
-	// Partitions are created manually as AddPartition calls FreeSpaces itself,
-	// which makes the test unreliable as well as making failures very hard to
-	// debug.
-	for _, c := range cases {
-		t.Run(c.name, func(t *testing.T) {
-			d := blockdev.MustNewMemory(512, 2048) // 1MiB
-			g, err := New(d)
-			if err != nil {
-				panic(err)
-			}
-			g.Partitions = c.parts
-			fs, overlap, err := g.GetFreeSpaces()
-			if err != nil {
-				t.Fatal(err)
-			}
-			if overlap != c.expectedOverlap {
-				t.Errorf("expected overlap %v, got %v", c.expectedOverlap, overlap)
-			}
-			if len(fs) != len(c.expected) {
-				t.Fatalf("expected %v, got %v", c.expected, fs)
-			}
-			for i := range fs {
-				if fs[i] != c.expected[i] {
-					t.Errorf("free space mismatch at pos %d: got [%d, %d), expected [%d, %d)", i, fs[i][0], fs[i][1], c.expected[i][0], c.expected[i][1])
-				}
-			}
-		})
-	}
-}
-
-func TestRoundTrip(t *testing.T) {
-	if os.Getenv("IN_KTEST") == "true" {
-		t.Skip("In ktest")
-	}
-	d := blockdev.MustNewMemory(512, 2048) // 1 MiB
-
-	g := Table{
-		ID:       uuid.NewSHA1(uuid.Nil, []byte("test")),
-		BootCode: []byte("just some test code"),
-		Partitions: []*Partition{
-			nil,
-			// This emoji is very complex and exercises UTF16 surrogate encoding
-			// and composing.
-			{Name: "Test 🏃‍♂️", FirstBlock: 10, LastBlock: 19, Type: PartitionTypeEFISystem, ID: uuid.NewSHA1(uuid.Nil, []byte("test1")), Attributes: AttrNoBlockIOProto},
-			nil,
-			{Name: "Test2", FirstBlock: 20, LastBlock: 49, Type: PartitionTypeEFISystem, ID: uuid.NewSHA1(uuid.Nil, []byte("test2")), Attributes: 0},
-		},
-		b: d,
-	}
-	if err := g.Write(); err != nil {
-		t.Fatalf("Error while writing Table: %v", err)
-	}
-
-	originalHash := sha256.New()
-	sr1 := io.NewSectionReader(d, 0, d.BlockSize()*d.BlockCount())
-	if _, err := io.CopyBuffer(originalHash, sr1, make([]byte, d.OptimalBlockSize())); err != nil {
-		panic(err)
-	}
-
-	g2, err := Read(d)
-	if err != nil {
-		t.Fatalf("Failed to read back GPT: %v", err)
-	}
-	if g2.ID != g.ID {
-		t.Errorf("Disk UUID changed when reading back: %v", err)
-	}
-	// Destroy primary GPT
-	d.Zero(1*d.BlockSize(), 5*d.BlockSize())
-	g3, err := Read(d)
-	if err != nil {
-		t.Fatalf("Failed to read back GPT with primary GPT destroyed: %v", err)
-	}
-	if g3.ID != g.ID {
-		t.Errorf("Disk UUID changed when reading back: %v", err)
-	}
-	if err := g3.Write(); err != nil {
-		t.Fatalf("Error while writing back GPT: %v", err)
-	}
-	rewrittenHash := sha256.New()
-	sr2 := io.NewSectionReader(d, 0, d.BlockSize()*d.BlockCount())
-	if _, err := io.CopyBuffer(rewrittenHash, sr2, make([]byte, d.OptimalBlockSize())); err != nil {
-		panic(err)
-	}
-	if !bytes.Equal(originalHash.Sum(nil), rewrittenHash.Sum(nil)) {
-		t.Errorf("Write/Read/Write test was not reproducible: %x != %x", originalHash.Sum(nil), rewrittenHash.Sum(nil))
-	}
-}
diff --git a/metropolis/pkg/gpt/linux_test.go b/metropolis/pkg/gpt/linux_test.go
deleted file mode 100644
index 7f7f6de..0000000
--- a/metropolis/pkg/gpt/linux_test.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package gpt
-
-import (
-	"os"
-	"testing"
-
-	"github.com/google/uuid"
-
-	"source.monogon.dev/metropolis/pkg/blockdev"
-)
-
-var testUUID = uuid.MustParse("85c0b60f-caf9-40dd-86fa-f8797e26238d")
-
-func TestKernelInterop(t *testing.T) {
-	if os.Getenv("IN_KTEST") != "true" {
-		t.Skip("Not in ktest")
-	}
-	ram0, err := blockdev.Open("/dev/ram0")
-	if err != nil {
-		panic(err)
-	}
-	g := Table{
-		ID:       uuid.NewSHA1(testUUID, []byte("test")),
-		BootCode: []byte("just some test code"),
-		Partitions: []*Partition{
-			nil,
-			// This emoji is very complex and exercises UTF16 surrogate encoding
-			// and composing.
-			{Name: "Test 🏃‍♂️", FirstBlock: 10, LastBlock: 19, Type: PartitionTypeEFISystem, ID: uuid.NewSHA1(testUUID, []byte("test1")), Attributes: AttrNoBlockIOProto},
-			nil,
-			{Name: "Test2", FirstBlock: 20, LastBlock: 49, Type: PartitionTypeEFISystem, ID: uuid.NewSHA1(testUUID, []byte("test2")), Attributes: 0},
-		},
-		b: ram0,
-	}
-	if err := g.Write(); err != nil {
-		t.Fatalf("Failed to write GPT: %v", err)
-	}
-	if err := ram0.RefreshPartitionTable(); err != nil {
-		t.Fatalf("Failed to refresh partition table: %v", err)
-	}
-	if _, err := os.Stat("/sys/block/ram0/ram0p2"); err != nil {
-		t.Errorf("Expected ram0p2 to exist, got %v", err)
-	}
-	if _, err := os.Stat("/sys/block/ram0/ram0p4"); err != nil {
-		t.Errorf("Expected ram0p4 to exist, got %v", err)
-	}
-	if _, err := os.Stat("/sys/block/ram0/ram0p1"); err == nil {
-		t.Error("Expected ram0p1 not to exist, but it exists")
-	}
-}
diff --git a/metropolis/pkg/gpt/mbr.go b/metropolis/pkg/gpt/mbr.go
deleted file mode 100644
index fa82359..0000000
--- a/metropolis/pkg/gpt/mbr.go
+++ /dev/null
@@ -1,82 +0,0 @@
-package gpt
-
-import (
-	"encoding/binary"
-	"fmt"
-	"io"
-	"math"
-)
-
-// See UEFI Specification 2.9 Table 5-3
-type mbr struct {
-	BootCode         [440]byte
-	DiskSignature    [4]byte
-	_                [2]byte
-	PartitionRecords [4]mbrPartitionRecord
-	Signature        [2]byte
-}
-
-// See UEFI Specification 2.9 Table 5-4
-type mbrPartitionRecord struct {
-	BootIndicator byte
-	StartingCHS   [3]byte
-	Type          byte
-	EndingCHS     [3]byte
-	StartingBlock uint32
-	SizeInBlocks  uint32
-}
-
-var mbrSignature = [2]byte{0x55, 0xaa}
-
-func makeProtectiveMBR(w io.Writer, blockCount int64, bootCode []byte) error {
-	var representedBlockCount = uint32(math.MaxUint32)
-	if blockCount < math.MaxUint32 {
-		representedBlockCount = uint32(blockCount)
-	}
-	m := mbr{
-		DiskSignature: [4]byte{0, 0, 0, 0},
-		PartitionRecords: [4]mbrPartitionRecord{
-			{
-				StartingCHS:   toCHS(1),
-				Type:          0xEE, // Table/Protective MBR
-				StartingBlock: 1,
-				SizeInBlocks:  representedBlockCount - 1,
-				EndingCHS:     toCHS(blockCount + 1),
-			},
-			{},
-			{},
-			{},
-		},
-		Signature: mbrSignature,
-	}
-	if len(bootCode) > len(m.BootCode) {
-		return fmt.Errorf("BootCode is %d bytes, can only store %d", len(bootCode), len(m.BootCode))
-	}
-	copy(m.BootCode[:], bootCode)
-	if err := binary.Write(w, binary.LittleEndian, &m); err != nil {
-		return fmt.Errorf("failed to write MBR: %w", err)
-	}
-	return nil
-}
-
-// toCHS converts a LBA to a "logical" CHS, i.e. what a legacy BIOS 13h
-// interface would use. This has nothing to do with the actual CHS geometry
-// which depends on the disk and interface used.
-func toCHS(lba int64) (chs [3]byte) {
-	const maxCylinders = (1 << 10) - 1
-	const maxHeadsPerCylinder = (1 << 8) - 1
-	const maxSectorsPerTrack = (1 << 6) - 2 // Sector is 1-based
-	cylinder := lba / (maxHeadsPerCylinder * maxSectorsPerTrack)
-	head := (lba / maxSectorsPerTrack) % maxHeadsPerCylinder
-	sector := (lba % maxSectorsPerTrack) + 1
-	if cylinder > maxCylinders {
-		cylinder = maxCylinders
-		head = maxHeadsPerCylinder
-		sector = maxSectorsPerTrack + 1
-	}
-	chs[0] = uint8(head)
-	chs[1] = uint8(sector)
-	chs[1] |= uint8(cylinder>>2) & 0xc0
-	chs[2] = uint8(cylinder)
-	return
-}
diff --git a/metropolis/pkg/gpt/mbr_test.go b/metropolis/pkg/gpt/mbr_test.go
deleted file mode 100644
index 2ed4ed1..0000000
--- a/metropolis/pkg/gpt/mbr_test.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package gpt
-
-import "testing"
-
-func TestToCHS(t *testing.T) {
-	cases := []struct {
-		name        string
-		lba         int64
-		expectedCHS [3]byte
-	}{
-		{ // See UEFI Specification 2.9 Table 5-4 StartingCHS
-			name:        "One",
-			lba:         1,
-			expectedCHS: [3]byte{0x00, 0x02, 0x00},
-		},
-		{
-			name:        "TooBig",
-			lba:         (1023 * 255 * 63) + 1,
-			expectedCHS: [3]byte{0xff, 0xff, 0xff},
-		},
-	}
-	for _, c := range cases {
-		t.Run(c.name, func(t *testing.T) {
-			chs := toCHS(c.lba)
-			if chs != c.expectedCHS {
-				t.Errorf("expected %x, got %x", c.expectedCHS, chs)
-			}
-		})
-	}
-}
diff --git a/metropolis/pkg/gpt/structs_test.go b/metropolis/pkg/gpt/structs_test.go
deleted file mode 100644
index dc53635..0000000
--- a/metropolis/pkg/gpt/structs_test.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package gpt
-
-import (
-	"encoding/binary"
-	"reflect"
-	"testing"
-)
-
-func TestStructureSizes(t *testing.T) {
-	cases := []struct {
-		StructInstance interface{}
-		ExpectedSize   int
-	}{
-		{mbr{}, 512},
-		{mbrPartitionRecord{}, 16},
-		{header{}, 92},
-		{partition{}, 128},
-	}
-	for _, c := range cases {
-		t.Run(reflect.TypeOf(c.StructInstance).String(), func(t *testing.T) {
-			actualSize := binary.Size(c.StructInstance)
-			if actualSize != c.ExpectedSize {
-				t.Errorf("Expected %d bytes, got %d", c.ExpectedSize, actualSize)
-			}
-		})
-	}
-}
diff --git a/metropolis/pkg/jsonpatch/BUILD.bazel b/metropolis/pkg/jsonpatch/BUILD.bazel
deleted file mode 100644
index e6dd37e..0000000
--- a/metropolis/pkg/jsonpatch/BUILD.bazel
+++ /dev/null
@@ -1,14 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
-
-go_library(
-    name = "jsonpatch",
-    srcs = ["jsonpatch.go"],
-    importpath = "source.monogon.dev/metropolis/pkg/jsonpatch",
-    visibility = ["//metropolis:__subpackages__"],
-)
-
-go_test(
-    name = "jsonpatch_test",
-    srcs = ["jsonpatch_test.go"],
-    embed = [":jsonpatch"],
-)
diff --git a/metropolis/pkg/jsonpatch/jsonpatch.go b/metropolis/pkg/jsonpatch/jsonpatch.go
deleted file mode 100644
index fe0fbac..0000000
--- a/metropolis/pkg/jsonpatch/jsonpatch.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package jsonpatch contains data structures and encoders for JSON Patch (RFC
-// 6902) and JSON Pointers (RFC 6901)
-package jsonpatch
-
-import "strings"
-
-// JsonPatchOp describes a JSON Patch operation (RFC 6902 Section 4)
-type JsonPatchOp struct {
-	Operation string      `json:"op"`
-	Path      string      `json:"path"` // Technically a JSON Pointer, but called Path in the RFC
-	From      string      `json:"from,omitempty"`
-	Value     interface{} `json:"value,omitempty"`
-}
-
-// EncodeJSONRefToken encodes a JSON reference token as part of a JSON Pointer
-// (RFC 6901 Section 2)
-func EncodeJSONRefToken(token string) string {
-	x := strings.ReplaceAll(token, "~", "~0")
-	return strings.ReplaceAll(x, "/", "~1")
-}
-
-// PointerFromParts returns an encoded JSON Pointer from parts
-func PointerFromParts(pathParts []string) string {
-	var encodedParts []string
-	encodedParts = append(encodedParts, "")
-	for _, part := range pathParts {
-		encodedParts = append(encodedParts, EncodeJSONRefToken(part))
-	}
-	return strings.Join(encodedParts, "/")
-}
diff --git a/metropolis/pkg/jsonpatch/jsonpatch_test.go b/metropolis/pkg/jsonpatch/jsonpatch_test.go
deleted file mode 100644
index 33a56ba..0000000
--- a/metropolis/pkg/jsonpatch/jsonpatch_test.go
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jsonpatch
-
-import (
-	"testing"
-)
-
-func TestEncodeJSONRefToken(t *testing.T) {
-	tests := []struct {
-		name  string
-		token string
-		want  string
-	}{
-		{"Passes through normal characters", "asdf123", "asdf123"},
-		{"Encodes simple slashes", "a/b", "a~1b"},
-		{"Encodes tildes", "m~n", "m~0n"},
-		{"Encodes bot tildes and slashes", "a/m~n", "a~1m~0n"},
-	}
-	for _, tt := range tests {
-		t.Run(tt.name, func(t *testing.T) {
-			if got := EncodeJSONRefToken(tt.token); got != tt.want {
-				t.Errorf("EncodeJSONRefToken() = %v, want %v", got, tt.want)
-			}
-		})
-	}
-}
-
-func TestPointerFromParts(t *testing.T) {
-	type args struct {
-		pathParts []string
-	}
-	tests := []struct {
-		name string
-		args args
-		want string
-	}{
-		{"Empty path", args{[]string{}}, ""},
-		{"Single level path", args{[]string{"foo"}}, "/foo"},
-		{"Multi-level path", args{[]string{"foo", "0"}}, "/foo/0"},
-		{"Path starting with empty key", args{[]string{""}}, "/"},
-		{"Path with part containing /", args{[]string{"a/b"}}, "/a~1b"},
-		{"Path with part containing spaces", args{[]string{" "}}, "/ "},
-	}
-	for _, tt := range tests {
-		t.Run(tt.name, func(t *testing.T) {
-			if got := PointerFromParts(tt.args.pathParts); got != tt.want {
-				t.Errorf("PointerFromParts() = %v, want %v", got, tt.want)
-			}
-		})
-	}
-}
diff --git a/metropolis/pkg/kexec/BUILD.bazel b/metropolis/pkg/kexec/BUILD.bazel
deleted file mode 100644
index 61da706..0000000
--- a/metropolis/pkg/kexec/BUILD.bazel
+++ /dev/null
@@ -1,20 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library")
-
-go_library(
-    name = "kexec",
-    srcs = ["kexec.go"],
-    importpath = "source.monogon.dev/metropolis/pkg/kexec",
-    visibility = ["//visibility:public"],
-    deps = select({
-        "@io_bazel_rules_go//go/platform:amd64": [
-            "@org_golang_x_sys//unix",
-        ],
-        "@io_bazel_rules_go//go/platform:arm64": [
-            "@org_golang_x_sys//unix",
-        ],
-        "@io_bazel_rules_go//go/platform:riscv64": [
-            "@org_golang_x_sys//unix",
-        ],
-        "//conditions:default": [],
-    }),
-)
diff --git a/metropolis/pkg/kexec/kexec.go b/metropolis/pkg/kexec/kexec.go
deleted file mode 100644
index 7109903..0000000
--- a/metropolis/pkg/kexec/kexec.go
+++ /dev/null
@@ -1,75 +0,0 @@
-//go:build amd64 || arm64 || riscv64
-// +build amd64 arm64 riscv64
-
-// Package kexec allows executing subsequent kernels from Linux userspace.
-package kexec
-
-import (
-	"bufio"
-	"errors"
-	"fmt"
-	"io"
-	"os"
-	"runtime"
-	"strings"
-
-	"golang.org/x/sys/unix"
-)
-
-// FileLoad loads the given kernel as the new kernel with the given initramfs
-// and cmdline. It also performs auxiliary work like adding the ACPI RSDP
-// physical address to command line if using EFI. The kernel can be started by
-// calling unix.Reboot(unix.LINUX_REBOOT_CMD_KEXEC).
-// The underlying syscall is only available on x86_64, arm64 and riscv.
-// Parts of this function are taken from u-root's kexec package.
-func FileLoad(kernel, initramfs *os.File, cmdline string) error {
-	passedCmdline := cmdline
-	systab, err := os.Open("/sys/firmware/efi/systab")
-	if os.IsNotExist(err) {
-		// No EFI, nothing to do
-	} else if err != nil {
-		return fmt.Errorf("unable to open EFI systab: %w", err)
-	} else {
-		s := bufio.NewScanner(systab)
-		for s.Scan() {
-			if errors.Is(s.Err(), io.EOF) {
-				// We have no RSDP, no need to pass it
-				break
-			}
-			if s.Err() != nil {
-				return fmt.Errorf("failed to read EFI systab: %w", s.Err())
-			}
-			parts := strings.SplitN(s.Text(), "=", 2)
-			// There are two ACPI RDSP revisions, 1.0 and 2.0.
-			// Linux guarantees that the 2.0 always comes before the
-			// 1.0 so just matching and breaking is good enough.
-			if parts[0] == "ACPI20" || parts[0] == "ACPI" {
-				// Technically this could be passed through as parsing a hexa-
-				// decimal address and printing it back does nothing, but in
-				// case unexpected values show up this could cause very hard-
-				// to-debug crashes when the new kernel boots.
-				var acpiRsdp int64
-				if _, err := fmt.Sscanf(parts[1], "0x%x", &acpiRsdp); err != nil {
-					return fmt.Errorf("failed to parse EFI systab ACP RSDP address: %w", err)
-				}
-				passedCmdline += fmt.Sprintf(" acpi_rsdp=0x%x", acpiRsdp)
-				break
-			}
-		}
-	}
-
-	var flags int
-	var initramfsfd int
-	if initramfs != nil {
-		initramfsfd = int(initramfs.Fd())
-	} else {
-		flags |= unix.KEXEC_FILE_NO_INITRAMFS
-	}
-
-	if err := unix.KexecFileLoad(int(kernel.Fd()), initramfsfd, passedCmdline, flags); err != nil {
-		return fmt.Errorf("SYS_kexec_file_load(%d, %d, %s, %x) = %v", kernel.Fd(), initramfsfd, cmdline, flags, err)
-	}
-	runtime.KeepAlive(kernel)
-	runtime.KeepAlive(initramfs)
-	return nil
-}
diff --git a/metropolis/pkg/kmod/BUILD.bazel b/metropolis/pkg/kmod/BUILD.bazel
deleted file mode 100644
index 6264bf9..0000000
--- a/metropolis/pkg/kmod/BUILD.bazel
+++ /dev/null
@@ -1,49 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
-load("//metropolis/node/build/fwprune:def.bzl", "fsspec_linux_firmware")
-load("//metropolis/test/ktest:ktest.bzl", "ktest")
-
-go_library(
-    name = "kmod",
-    srcs = [
-        "manager.go",
-        "meta.go",
-        "modinfo.go",
-        "radix.go",
-        "syscall.go",
-    ],
-    importpath = "source.monogon.dev/metropolis/pkg/kmod",
-    visibility = ["//visibility:public"],
-    deps = [
-        "//metropolis/pkg/kmod/spec",
-        "@org_golang_google_protobuf//proto",
-        "@org_golang_x_sys//unix",
-    ],
-)
-
-go_test(
-    name = "kmod_test",
-    srcs = [
-        "manager_test.go",
-        "radix_test.go",
-    ],
-    embed = [":kmod"],
-    deps = [
-        "//metropolis/pkg/kmod/spec",
-        "@com_github_google_go_cmp//cmp",
-        "@org_golang_google_protobuf//testing/protocmp",
-    ],
-)
-
-fsspec_linux_firmware(
-    name = "firmware",
-    firmware_files = ["@linux-firmware//:all_files"],
-    kernel = "//metropolis/test/ktest:linux-testing",
-    metadata = "@linux-firmware//:metadata",
-)
-
-ktest(
-    fsspecs = [
-        ":firmware",
-    ],
-    tester = ":kmod_test",
-)
diff --git a/metropolis/pkg/kmod/manager.go b/metropolis/pkg/kmod/manager.go
deleted file mode 100644
index 3b3f875..0000000
--- a/metropolis/pkg/kmod/manager.go
+++ /dev/null
@@ -1,159 +0,0 @@
-package kmod
-
-import (
-	"bufio"
-	"errors"
-	"fmt"
-	"os"
-	"path/filepath"
-	"strings"
-	"sync"
-
-	"golang.org/x/sys/unix"
-	"google.golang.org/protobuf/proto"
-
-	kmodpb "source.monogon.dev/metropolis/pkg/kmod/spec"
-)
-
-// Manager contains all the logic and metadata required to efficiently load
-// Linux kernel modules. It has internal loading tracking, thus it's recommended
-// to only keep one Manager instance running per kernel. It can recreate its
-// internal state quite well, but there are edge cases where the kernel makes
-// it hard to do so (MODULE_STATE_UNFORMED) thus if possible that single
-// instance should be kept alive. It currently does not support unloading
-// modules, but that can be added to the existing design if deemed necessary.
-type Manager struct {
-	// Directory the modules are loaded from. The path in each module Meta
-	// message is relative to this.
-	modulesPath string
-	meta        *kmodpb.Meta
-	// Extra map to quickly find module indexes from names
-	moduleIndexes map[string]uint32
-
-	// mutex protects loadedModules, rest is read-only
-	// This cannot use a RWMutex as locks cannot be upgraded
-	mutex         sync.Mutex
-	loadedModules map[uint32]bool
-}
-
-// NewManager instantiates a kernel module loading manager. Please take a look
-// at the additional considerations on the Manager type itself.
-func NewManager(meta *kmodpb.Meta, modulesPath string) (*Manager, error) {
-	modIndexes := make(map[string]uint32)
-	for i, m := range meta.Modules {
-		modIndexes[m.Name] = uint32(i)
-	}
-	modulesFile, err := os.Open("/proc/modules")
-	if err != nil {
-		return nil, err
-	}
-	loadedModules := make(map[uint32]bool)
-	s := bufio.NewScanner(modulesFile)
-	for s.Scan() {
-		fields := strings.Fields(s.Text())
-		if len(fields) == 0 {
-			// Skip invalid lines
-			continue
-		}
-		modIdx, ok := modIndexes[fields[0]]
-		if !ok {
-			// Certain modules are only available as built-in and are thus not
-			// part of the module metadata. They do not need to be handled by
-			// this code, ignore them.
-			continue
-		}
-		loadedModules[modIdx] = true
-	}
-	return &Manager{
-		modulesPath:   modulesPath,
-		meta:          meta,
-		moduleIndexes: modIndexes,
-		loadedModules: loadedModules,
-	}, nil
-}
-
-// NewManagerFromPath instantiates a new kernel module loading manager from a
-// path containing a meta.pb file containing a kmod.Meta message as well as the
-// kernel modules within. Please take a look at the additional considerations on
-// the Manager type itself.
-func NewManagerFromPath(modulesPath string) (*Manager, error) {
-	moduleMetaRaw, err := os.ReadFile(filepath.Join(modulesPath, "meta.pb"))
-	if err != nil {
-		return nil, fmt.Errorf("error reading module metadata: %w", err)
-	}
-	var moduleMeta kmodpb.Meta
-	if err := proto.Unmarshal(moduleMetaRaw, &moduleMeta); err != nil {
-		return nil, fmt.Errorf("error decoding module metadata: %w", err)
-	}
-	return NewManager(&moduleMeta, modulesPath)
-}
-
-// ErrNotFound is returned when an attempt is made to load a module which does
-// not exist according to the loaded metadata.
-type ErrNotFound struct {
-	Name string
-}
-
-func (e *ErrNotFound) Error() string {
-	return fmt.Sprintf("module %q does not exist", e.Name)
-}
-
-// LoadModule loads the module with the given name. If the module is already
-// loaded or  built-in, it returns no error. If it failed loading the module or
-// the module does not exist, it returns an error.
-func (s *Manager) LoadModule(name string) error {
-	modIdx, ok := s.moduleIndexes[name]
-	if !ok {
-		return &ErrNotFound{Name: name}
-	}
-	s.mutex.Lock()
-	defer s.mutex.Unlock()
-	return s.loadModuleRecursive(modIdx)
-}
-
-// LoadModulesForDevice loads all modules whose device match expressions
-// (modaliases) match the given device modalias. It only returns an error if
-// a module which matched the device or one of its dependencies caused an error
-// when loading. A device modalias string which matches nothing is not an error.
-func (s *Manager) LoadModulesForDevice(devModalias string) error {
-	matches := make(map[uint32]bool)
-	lookupModulesRec(s.meta.ModuleDeviceMatches, devModalias, matches)
-	s.mutex.Lock()
-	defer s.mutex.Unlock()
-	for m := range matches {
-		if err := s.loadModuleRecursive(m); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-// Caller is REQUIRED to hold s.mutex!
-func (s *Manager) loadModuleRecursive(modIdx uint32) error {
-	if s.loadedModules[modIdx] {
-		return nil
-	}
-	modMeta := s.meta.Modules[modIdx]
-	if modMeta.Path == "" {
-		// Module is built-in, dependency always satisfied
-		return nil
-	}
-	for _, dep := range modMeta.Depends {
-		if err := s.loadModuleRecursive(dep); err != nil {
-			// Pass though as is, recursion can otherwise cause
-			// extremely large errors
-			return err
-		}
-	}
-	module, err := os.Open(filepath.Join(s.modulesPath, modMeta.Path))
-	if err != nil {
-		return fmt.Errorf("error opening kernel module: %w", err)
-	}
-	defer module.Close()
-	err = LoadModule(module, "", 0)
-	if err != nil && errors.Is(err, unix.EEXIST) {
-		return fmt.Errorf("error loading kernel module %v: %w", modMeta.Name, err)
-	}
-	s.loadedModules[modIdx] = true
-	return nil
-}
diff --git a/metropolis/pkg/kmod/manager_test.go b/metropolis/pkg/kmod/manager_test.go
deleted file mode 100644
index 43c9428..0000000
--- a/metropolis/pkg/kmod/manager_test.go
+++ /dev/null
@@ -1,48 +0,0 @@
-package kmod
-
-import (
-	"errors"
-	"os"
-	"testing"
-)
-
-func TestManagerIntegration(t *testing.T) {
-	if os.Getenv("IN_KTEST") != "true" {
-		t.Skip("Not in ktest")
-	}
-	mgr, err := NewManagerFromPath("/lib/modules")
-	if err != nil {
-		t.Fatal(err)
-	}
-	t.Run("LoadExampleModule", func(t *testing.T) {
-		if err := mgr.LoadModule("r8169"); err != nil {
-			t.Error(err)
-		}
-		if _, err := os.Stat("/sys/module/r8169"); err != nil {
-			t.Error("module load returned success, but module not in sysfs")
-		}
-	})
-	t.Run("LoadNonexistentModule", func(t *testing.T) {
-		err := mgr.LoadModule("definitelynomodule")
-		var notFoundErr *ErrNotFound
-		if !errors.As(err, &notFoundErr) {
-			t.Errorf("expected ErrNotFound, got %v", err)
-		}
-	})
-	t.Run("LoadModuleTwice", func(t *testing.T) {
-		if err := mgr.LoadModule("r8169"); err != nil {
-			t.Error(err)
-		}
-	})
-	// TODO(lorenz): Should test loading dependencies here, but we currently
-	// have none in the kernel config and I'm not about to build another kernel
-	// just for this.
-	t.Run("LoadDeviceModule", func(t *testing.T) {
-		if err := mgr.LoadModulesForDevice("pci:v00008086d00001591sv00001043sd000085F0bc02sc00i00"); err != nil {
-			t.Error(err)
-		}
-		if _, err := os.Stat("/sys/module/ice"); err != nil {
-			t.Error("module load returned success, but module not in sysfs")
-		}
-	})
-}
diff --git a/metropolis/pkg/kmod/meta.go b/metropolis/pkg/kmod/meta.go
deleted file mode 100644
index 2554c33..0000000
--- a/metropolis/pkg/kmod/meta.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package kmod
-
-import (
-	"fmt"
-	"log"
-
-	kmodpb "source.monogon.dev/metropolis/pkg/kmod/spec"
-)
-
-// MakeMetaFromModuleInfo is a more flexible alternative to MakeMeta. It only
-// relies on ModuleInfo structures, additional processing can be done outside of
-// this function. It does however require that for dynamically-loaded modules
-// the "path" key is set to the path of the .ko file relative to the module
-// root.
-func MakeMetaFromModuleInfo(modinfos []ModuleInfo) (*kmodpb.Meta, error) {
-	modIndices := make(map[string]uint32)
-	modInfoMap := make(map[string]ModuleInfo)
-	var meta kmodpb.Meta
-	meta.ModuleDeviceMatches = &kmodpb.RadixNode{
-		Type: kmodpb.RadixNode_ROOT,
-	}
-	var i uint32
-	for _, m := range modinfos {
-		meta.Modules = append(meta.Modules, &kmodpb.Module{
-			Name: m.Name(),
-			Path: m.Get("path"),
-		})
-		for _, p := range m.Aliases() {
-			if m.Get("path") == "" {
-				// Ignore built-in modaliases as they do not need to be loaded.
-				continue
-			}
-			if err := AddPattern(meta.ModuleDeviceMatches, p, i); err != nil {
-				return nil, fmt.Errorf("failed adding device match %q: %w", p, err)
-			}
-		}
-		modIndices[m.Name()] = i
-		modInfoMap[m.Name()] = m
-		i++
-	}
-	for _, m := range meta.Modules {
-		for _, dep := range modInfoMap[m.Name].GetDependencies() {
-			if _, ok := modIndices[dep]; !ok {
-				log.Printf("Unknown dependency %q for module %q", modInfoMap[m.Name].GetDependencies(), m.Name)
-			}
-			m.Depends = append(m.Depends, modIndices[dep])
-		}
-	}
-	return &meta, nil
-}
diff --git a/metropolis/pkg/kmod/modinfo.go b/metropolis/pkg/kmod/modinfo.go
deleted file mode 100644
index 5c01681..0000000
--- a/metropolis/pkg/kmod/modinfo.go
+++ /dev/null
@@ -1,206 +0,0 @@
-package kmod
-
-import (
-	"bufio"
-	"bytes"
-	"debug/elf"
-	"errors"
-	"fmt"
-	"io"
-	"strings"
-)
-
-// ModuleInfo contains Linux kernel module metadata. It maps keys to a list
-// of values. For known keys accessor functions are provided.
-type ModuleInfo map[string][]string
-
-// Get returns the first value of the given key or an empty string if the key
-// does not exist.
-func (i ModuleInfo) Get(key string) string {
-	if len(i[key]) > 0 {
-		return i[key][0]
-	}
-	return ""
-}
-
-// Name returns the name of the module as defined in kbuild.
-func (i ModuleInfo) Name() string {
-	return i.Get("name")
-}
-
-// Authors returns the list of a authors of the module.
-func (i ModuleInfo) Authors() []string {
-	return i["author"]
-}
-
-// Description returns a human-readable description of the module or an empty
-// string if it is not available.
-func (i ModuleInfo) Description() string {
-	return i.Get("description")
-}
-
-// GetDependencies returns a list of module names which need to be loaded
-// before this one.
-func (i ModuleInfo) GetDependencies() []string {
-	if len(i["depends"]) == 1 && i["depends"][0] == "" {
-		return nil
-	}
-	return i["depends"]
-}
-
-type OptionalDependencies struct {
-	// Pre contains a list of module names to be optionally loaded before the
-	// module itself.
-	Pre []string
-	// Post contains a list of module names to be optionally loaded after the
-	// module itself.
-	Post []string
-}
-
-// GetOptionalDependencies returns a set of modules which are recommended to
-// be loaded before and after this module. These are optional, but enhance
-// the functionality of this module.
-func (i ModuleInfo) GetOptionalDependencies() OptionalDependencies {
-	var out OptionalDependencies
-	for _, s := range i["softdep"] {
-		tokens := strings.Fields(s)
-		const (
-			MODE_IDLE = 0
-			MODE_PRE  = 1
-			MODE_POST = 2
-		)
-		var state = MODE_IDLE
-		for _, token := range tokens {
-			switch token {
-			case "pre:":
-				state = MODE_PRE
-			case "post:":
-				state = MODE_POST
-			default:
-				switch state {
-				case MODE_PRE:
-					out.Pre = append(out.Pre, token)
-				case MODE_POST:
-					out.Post = append(out.Post, token)
-				default:
-				}
-			}
-		}
-	}
-	return out
-}
-
-// Aliases returns a list of match expressions for matching devices handled
-// by this module. Every returned string consists of a literal as well as '*'
-// wildcards matching one or more characters. These should be matched against
-// the kobject MODALIAS field or the modalias sysfs file.
-func (i ModuleInfo) Aliases() []string {
-	return i["alias"]
-}
-
-// Firmware returns a list of firmware file paths required by this module.
-// These paths are usually relative to the root of a linux-firmware install
-// unless the firmware is non-redistributable.
-func (i ModuleInfo) Firmware() []string {
-	return i["firmware"]
-}
-
-// Licenses returns the licenses use of this module is governed by.
-// For mainline modules, the list of valid license strings is
-// documented in the kernel's Documentation/process/license-rules.rst file
-// under the `MODULE_LICENSE` section.
-func (i ModuleInfo) Licenses() []string {
-	return i["license"]
-}
-
-// IsInTree returns true if the module was built in the Linux source tree and
-// not externally. This does not necessarily mean that the module is in the
-// mainline kernel.
-func (i ModuleInfo) IsInTree() bool {
-	return i.Get("intree") == "Y"
-}
-
-// vermagic and retpoline are intentionally not exposed here, if you need them
-// you should know how to get them out of the map yourself as AFAIK these
-// are not a stable interface and most programs should not process them.
-
-func nullByteSplit(data []byte, atEOF bool) (advance int, token []byte, err error) {
-	if atEOF && len(data) == 0 {
-		return 0, nil, nil
-	}
-	if i := bytes.IndexByte(data, 0x00); i >= 0 {
-		return i + 1, bytes.TrimLeft(data[0:i], "\x00"), nil
-	}
-	if atEOF {
-		return len(data), data, nil
-	}
-	return 0, nil, nil
-}
-
-// GetModuleInfo looks for a ".modinfo" section in the passed ELF Linux kernel
-// module and parses it into a ModuleInfo structure.
-func GetModuleInfo(e *elf.File) (ModuleInfo, error) {
-	for _, section := range e.Sections {
-		if section.Name == ".modinfo" {
-			out := make(ModuleInfo)
-			s := bufio.NewScanner(io.NewSectionReader(section, 0, int64(section.Size)))
-			s.Split(nullByteSplit)
-
-			for s.Scan() {
-				// Format is <key>=<value>
-				key, value, ok := bytes.Cut(s.Bytes(), []byte("="))
-				if !ok {
-					continue
-				}
-				keyStr := string(key)
-				out[keyStr] = append(out[keyStr], string(value))
-			}
-			return out, nil
-		}
-	}
-	return nil, errors.New("no .modinfo section found")
-}
-
-// GetBuiltinModulesInfo parses all modinfo structures for builtin modules from
-// a modinfo file (modules.builtin.modinfo).
-func GetBuiltinModulesInfo(f io.Reader) ([]ModuleInfo, error) {
-	var out []ModuleInfo
-	s := bufio.NewScanner(f)
-	s.Split(nullByteSplit)
-
-	currModule := make(ModuleInfo)
-	for s.Scan() {
-		if s.Err() != nil {
-			return nil, fmt.Errorf("failed scanning for next token: %w", s.Err())
-		}
-		// Format is <module>.<key>=<value>
-		modName, entry, ok := bytes.Cut(s.Bytes(), []byte{'.'})
-		if !ok {
-			continue
-		}
-		if string(modName) != currModule.Name() {
-			if currModule.Name() != "" {
-				out = append(out, currModule)
-			}
-			currModule = make(ModuleInfo)
-			currModule["name"] = []string{string(modName)}
-		}
-		key, value, ok := bytes.Cut(entry, []byte("="))
-		if !ok {
-			continue
-		}
-		keyStr := string(key)
-		currModule[keyStr] = append(currModule[keyStr], string(value))
-	}
-	if currModule.Name() != "" {
-		out = append(out, currModule)
-	}
-	seenModNames := make(map[string]bool)
-	for _, m := range out {
-		if seenModNames[m.Name()] {
-			return nil, fmt.Errorf("duplicate/out-of-order module metadata for module %q", m)
-		}
-		seenModNames[m.Name()] = true
-	}
-	return out, nil
-}
diff --git a/metropolis/pkg/kmod/radix.go b/metropolis/pkg/kmod/radix.go
deleted file mode 100644
index 7dfc68c..0000000
--- a/metropolis/pkg/kmod/radix.go
+++ /dev/null
@@ -1,267 +0,0 @@
-package kmod
-
-import (
-	"errors"
-	"fmt"
-	"log"
-	"sort"
-	"strings"
-
-	kmodpb "source.monogon.dev/metropolis/pkg/kmod/spec"
-)
-
-// LookupModules looks up all matching modules for a given modalias device
-// identifier.
-func LookupModules(meta *kmodpb.Meta, modalias string) (mods []*kmodpb.Module) {
-	matches := make(map[uint32]bool)
-	lookupModulesRec(meta.ModuleDeviceMatches, modalias, matches)
-	for idx := range matches {
-		mods = append(mods, meta.Modules[idx])
-	}
-	sort.Slice(mods, func(i, j int) bool { return mods[i].Name < mods[j].Name })
-	return
-}
-
-func lookupModulesRec(n *kmodpb.RadixNode, needle string, matches map[uint32]bool) {
-	for _, c := range n.Children {
-		switch c.Type {
-		case kmodpb.RadixNode_LITERAL:
-			if len(needle) < len(c.Literal) {
-				continue
-			}
-			if c.Literal == needle[:len(c.Literal)] {
-				lookupModulesRec(c, needle[len(c.Literal):], matches)
-			}
-		case kmodpb.RadixNode_WILDCARD:
-			for i := 0; i <= len(needle); i++ {
-				lookupModulesRec(c, needle[i:], matches)
-			}
-		case kmodpb.RadixNode_SINGLE_WILDCARD:
-			if len(needle) < 1 {
-				continue
-			}
-			lookupModulesRec(c, needle[1:], matches)
-		case kmodpb.RadixNode_BYTE_RANGE:
-			if len(needle) < 1 {
-				continue
-			}
-			if needle[0] >= byte(c.StartByte) && needle[0] <= byte(c.EndByte) {
-				lookupModulesRec(c, needle[1:], matches)
-			}
-		}
-	}
-	if len(needle) == 0 {
-		for _, mi := range n.ModuleIndex {
-			matches[mi] = true
-		}
-	}
-}
-
-// AddPattern adds a new pattern associated with a moduleIndex to the radix tree
-// rooted at root.
-func AddPattern(root *kmodpb.RadixNode, pattern string, moduleIndex uint32) error {
-	pp, err := parsePattern(pattern)
-	if err != nil {
-		return fmt.Errorf("error parsing pattern %q: %w", pattern, err)
-	}
-	if len(pp) > 0 {
-		pp[len(pp)-1].ModuleIndex = []uint32{moduleIndex}
-	} else {
-		// This exists to handle empty patterns, which have little use in
-		// practice (but their behavior is well-defined). It exists primarily
-		// to not crash in that case as well as to appease the Fuzzer.
-		root.ModuleIndex = append(root.ModuleIndex, moduleIndex)
-	}
-	return addPatternRec(root, pp, nil)
-}
-
-// addPatternRec recursively adds a new pattern to the radix tree.
-// If currPartOverride is non-nil it is used instead of the first part in the
-// parts array.
-func addPatternRec(n *kmodpb.RadixNode, parts []*kmodpb.RadixNode, currPartOverride *kmodpb.RadixNode) error {
-	if len(parts) == 0 {
-		return nil
-	}
-	var currPart *kmodpb.RadixNode
-	if currPartOverride != nil {
-		currPart = currPartOverride
-	} else {
-		currPart = parts[0]
-	}
-	for _, c := range n.Children {
-		if c.Type != currPart.Type {
-			continue
-		}
-		switch c.Type {
-		case kmodpb.RadixNode_LITERAL:
-			if c.Literal[0] == currPart.Literal[0] {
-				var i int
-				for i < len(c.Literal) && i < len(currPart.Literal) && c.Literal[i] == currPart.Literal[i] {
-					i++
-				}
-				if i == len(c.Literal) && i == len(currPart.Literal) {
-					if len(parts) == 1 {
-						c.ModuleIndex = append(c.ModuleIndex, parts[0].ModuleIndex...)
-						return nil
-					}
-					return addPatternRec(c, parts[1:], nil)
-				}
-				if i == len(c.Literal) {
-					return addPatternRec(c, parts, &kmodpb.RadixNode{Type: kmodpb.RadixNode_LITERAL, Literal: currPart.Literal[i:], ModuleIndex: currPart.ModuleIndex})
-				}
-				// Split current node
-				splitOldPart := &kmodpb.RadixNode{
-					Type:        kmodpb.RadixNode_LITERAL,
-					Literal:     c.Literal[i:],
-					Children:    c.Children,
-					ModuleIndex: c.ModuleIndex,
-				}
-				var splitNewPart *kmodpb.RadixNode
-				// Current part is a strict subset of the node being traversed
-				if i == len(currPart.Literal) {
-					if len(parts) < 2 {
-						c.Children = []*kmodpb.RadixNode{splitOldPart}
-						c.Literal = currPart.Literal
-						c.ModuleIndex = currPart.ModuleIndex
-						return nil
-					}
-					splitNewPart = parts[1]
-					parts = parts[1:]
-				} else {
-					splitNewPart = &kmodpb.RadixNode{
-						Type:        kmodpb.RadixNode_LITERAL,
-						Literal:     currPart.Literal[i:],
-						ModuleIndex: currPart.ModuleIndex,
-					}
-				}
-				c.Children = []*kmodpb.RadixNode{
-					splitOldPart,
-					splitNewPart,
-				}
-				c.Literal = currPart.Literal[:i]
-				c.ModuleIndex = nil
-				return addPatternRec(splitNewPart, parts[1:], nil)
-			}
-
-		case kmodpb.RadixNode_BYTE_RANGE:
-			if c.StartByte == currPart.StartByte && c.EndByte == currPart.EndByte {
-				if len(parts) == 1 {
-					c.ModuleIndex = append(c.ModuleIndex, parts[0].ModuleIndex...)
-				}
-				return addPatternRec(c, parts[1:], nil)
-			}
-		case kmodpb.RadixNode_SINGLE_WILDCARD, kmodpb.RadixNode_WILDCARD:
-			if len(parts) == 1 {
-				c.ModuleIndex = append(c.ModuleIndex, parts[0].ModuleIndex...)
-			}
-			return addPatternRec(c, parts[1:], nil)
-		}
-	}
-	// No child or common prefix found, append node
-	n.Children = append(n.Children, currPart)
-	return addPatternRec(currPart, parts[1:], nil)
-}
-
-// PrintTree prints the tree from the given root node to standard out.
-// The output is not stable and should only be used for debugging/diagnostics.
-// It will log and exit the process if it encounters invalid nodes.
-func PrintTree(r *kmodpb.RadixNode) {
-	printTree(r, 0, false)
-}
-
-func printTree(r *kmodpb.RadixNode, indent int, noIndent bool) {
-	if !noIndent {
-		for i := 0; i < indent; i++ {
-			fmt.Print("  ")
-		}
-	}
-	if len(r.ModuleIndex) > 0 {
-		fmt.Printf("%v ", r.ModuleIndex)
-	}
-	switch r.Type {
-	case kmodpb.RadixNode_LITERAL:
-		fmt.Printf("%q: ", r.Literal)
-	case kmodpb.RadixNode_SINGLE_WILDCARD:
-		fmt.Printf("?: ")
-	case kmodpb.RadixNode_WILDCARD:
-		fmt.Printf("*: ")
-	case kmodpb.RadixNode_BYTE_RANGE:
-		fmt.Printf("[%c-%c]: ", rune(r.StartByte), rune(r.EndByte))
-	default:
-		log.Fatalf("Unknown tree type %T\n", r)
-	}
-	if len(r.Children) == 1 {
-		printTree(r.Children[0], indent, true)
-		return
-	}
-	fmt.Println("")
-	for _, c := range r.Children {
-		printTree(c, indent+1, false)
-	}
-}
-
-// parsePattern parses a string pattern into a non-hierarchical list of
-// RadixNodes. These nodes can then be futher modified and integrated into
-// a Radix tree.
-func parsePattern(pattern string) ([]*kmodpb.RadixNode, error) {
-	var out []*kmodpb.RadixNode
-	var i int
-	var currentLiteral strings.Builder
-	storeCurrentLiteral := func() {
-		if currentLiteral.Len() > 0 {
-			out = append(out, &kmodpb.RadixNode{
-				Type:    kmodpb.RadixNode_LITERAL,
-				Literal: currentLiteral.String(),
-			})
-			currentLiteral.Reset()
-		}
-	}
-	for i < len(pattern) {
-		switch pattern[i] {
-		case '*':
-			storeCurrentLiteral()
-			i += 1
-			if len(out) > 0 && out[len(out)-1].Type == kmodpb.RadixNode_WILDCARD {
-				continue
-			}
-			out = append(out, &kmodpb.RadixNode{
-				Type: kmodpb.RadixNode_WILDCARD,
-			})
-		case '?':
-			storeCurrentLiteral()
-			out = append(out, &kmodpb.RadixNode{
-				Type: kmodpb.RadixNode_SINGLE_WILDCARD,
-			})
-			i += 1
-		case '[':
-			storeCurrentLiteral()
-			if len(pattern) <= i+4 {
-				return nil, errors.New("illegal byte range notation, not enough characters")
-			}
-			if pattern[i+2] != '-' || pattern[i+4] != ']' {
-				return nil, errors.New("illegal byte range notation, incorrect dash or closing character")
-			}
-			nn := &kmodpb.RadixNode{
-				Type:      kmodpb.RadixNode_BYTE_RANGE,
-				StartByte: uint32(pattern[i+1]),
-				EndByte:   uint32(pattern[i+3]),
-			}
-			if nn.StartByte > nn.EndByte {
-				return nil, errors.New("byte range start byte larger than end byte")
-			}
-			out = append(out, nn)
-			i += 5
-		case '\\':
-			if len(pattern) <= i+1 {
-				return nil, errors.New("illegal escape character at the end of the string")
-			}
-			currentLiteral.WriteByte(pattern[i+1])
-			i += 2
-		default:
-			currentLiteral.WriteByte(pattern[i])
-			i += 1
-		}
-	}
-	storeCurrentLiteral()
-	return out, nil
-}
diff --git a/metropolis/pkg/kmod/radix_test.go b/metropolis/pkg/kmod/radix_test.go
deleted file mode 100644
index 7a815ba..0000000
--- a/metropolis/pkg/kmod/radix_test.go
+++ /dev/null
@@ -1,146 +0,0 @@
-package kmod
-
-import (
-	"fmt"
-	"regexp"
-	"strings"
-	"testing"
-	"unicode"
-
-	"github.com/google/go-cmp/cmp"
-	"google.golang.org/protobuf/testing/protocmp"
-
-	kmodpb "source.monogon.dev/metropolis/pkg/kmod/spec"
-)
-
-func TestParsePattern(t *testing.T) {
-	cases := []struct {
-		name          string
-		pattern       string
-		expectedNodes []*kmodpb.RadixNode
-	}{
-		{"Empty", "", nil},
-		{"SingleLiteral", "asdf", []*kmodpb.RadixNode{{Type: kmodpb.RadixNode_LITERAL, Literal: "asdf"}}},
-		{"SingleWildcard", "as*df", []*kmodpb.RadixNode{
-			{Type: kmodpb.RadixNode_LITERAL, Literal: "as"},
-			{Type: kmodpb.RadixNode_WILDCARD},
-			{Type: kmodpb.RadixNode_LITERAL, Literal: "df"},
-		}},
-		{"EscapedWildcard", "a\\*", []*kmodpb.RadixNode{{Type: kmodpb.RadixNode_LITERAL, Literal: "a*"}}},
-		{"SingleRange", "[y-z]", []*kmodpb.RadixNode{{Type: kmodpb.RadixNode_BYTE_RANGE, StartByte: 121, EndByte: 122}}},
-		{"SingleWildcardChar", "a?c", []*kmodpb.RadixNode{
-			{Type: kmodpb.RadixNode_LITERAL, Literal: "a"},
-			{Type: kmodpb.RadixNode_SINGLE_WILDCARD},
-			{Type: kmodpb.RadixNode_LITERAL, Literal: "c"},
-		}},
-	}
-	for _, c := range cases {
-		t.Run(c.name, func(t *testing.T) {
-			out, err := parsePattern(c.pattern)
-			if err != nil {
-				t.Fatal(err)
-			}
-			diff := cmp.Diff(c.expectedNodes, out, protocmp.Transform())
-			if diff != "" {
-				t.Error(diff)
-			}
-		})
-	}
-}
-
-func TestLookupComplex(t *testing.T) {
-	root := &kmodpb.RadixNode{
-		Type: kmodpb.RadixNode_LITERAL,
-	}
-	if err := AddPattern(root, "usb:v0B95p1790d*dc*dsc*dp*icFFiscFFip00in*", 2); err != nil {
-		t.Error(err)
-	}
-	if err := AddPattern(root, "usb:v0B95p178Ad*dc*dsc*dp*icFFiscFFip00in*", 3); err != nil {
-		t.Error(err)
-	}
-	if err := AddPattern(root, "acpi*:PNP0C14:*", 10); err != nil {
-		t.Error(err)
-	}
-	matches := make(map[uint32]bool)
-	lookupModulesRec(root, "acpi:PNP0C14:asdf", matches)
-	if !matches[10] {
-		t.Error("value should match pattern 10")
-	}
-}
-
-func isASCII(s string) bool {
-	for i := 0; i < len(s); i++ {
-		if s[i] > unicode.MaxASCII {
-			return false
-		}
-	}
-	return true
-}
-
-func FuzzRadixImpl(f *testing.F) {
-	f.Add("acpi*:PNP0C14:*\x00usb:v0B95p1790d*dc*dsc*dp*icFFiscFFip00in*", "acpi:PNP0C14:asdf\x00usb:v0B95p1790d0dc0dsc0dp0icFFiscFFip00in")
-	f.Fuzz(func(t *testing.T, a string, b string) {
-		patternsRaw := strings.Split(a, "\x00")
-		values := strings.Split(b, "\x00")
-		var patternsRegexp []regexp.Regexp
-		root := &kmodpb.RadixNode{
-			Type: kmodpb.RadixNode_LITERAL,
-		}
-		for i, p := range patternsRaw {
-			if !isASCII(p) {
-				// Ignore non-ASCII patterns, there are tons of edge cases with them
-				return
-			}
-			pp, err := parsePattern(p)
-			if err != nil {
-				// Bad pattern
-				return
-			}
-			if err := AddPattern(root, p, uint32(i)); err != nil {
-				t.Fatal(err)
-			}
-			var regexb strings.Builder
-			regexb.WriteString("(?s)^")
-			for _, part := range pp {
-				switch part.Type {
-				case kmodpb.RadixNode_LITERAL:
-					regexb.WriteString(regexp.QuoteMeta(part.Literal))
-				case kmodpb.RadixNode_SINGLE_WILDCARD:
-					regexb.WriteString(".")
-				case kmodpb.RadixNode_WILDCARD:
-					regexb.WriteString(".*")
-				case kmodpb.RadixNode_BYTE_RANGE:
-					regexb.WriteString(fmt.Sprintf("[%s-%s]", regexp.QuoteMeta(string([]rune{rune(part.StartByte)})), regexp.QuoteMeta(string([]rune{rune(part.EndByte)}))))
-				default:
-					t.Errorf("Unknown node type %v", part.Type)
-				}
-			}
-			regexb.WriteString("$")
-			patternsRegexp = append(patternsRegexp, *regexp.MustCompile(regexb.String()))
-		}
-		for _, v := range values {
-			if !isASCII(v) {
-				// Ignore non-ASCII values
-				return
-			}
-			if len(v) > 64 {
-				// Ignore big values as they are not realistic and cause the
-				// wildcard matches to be very expensive.
-				return
-			}
-			radixMatchesSet := make(map[uint32]bool)
-			lookupModulesRec(root, v, radixMatchesSet)
-			for i, re := range patternsRegexp {
-				if re.MatchString(v) {
-					if !radixMatchesSet[uint32(i)] {
-						t.Errorf("Pattern %q is expected to match %q but didn't", patternsRaw[i], v)
-					}
-				} else {
-					if radixMatchesSet[uint32(i)] {
-						t.Errorf("Pattern %q is not expected to match %q but did", patternsRaw[i], v)
-					}
-				}
-			}
-		}
-	})
-}
diff --git a/metropolis/pkg/kmod/spec/BUILD.bazel b/metropolis/pkg/kmod/spec/BUILD.bazel
deleted file mode 100644
index 145ab7b..0000000
--- a/metropolis/pkg/kmod/spec/BUILD.bazel
+++ /dev/null
@@ -1,23 +0,0 @@
-load("@rules_proto//proto:defs.bzl", "proto_library")
-load("@io_bazel_rules_go//go:def.bzl", "go_library")
-load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
-
-proto_library(
-    name = "spec_proto",
-    srcs = ["meta.proto"],
-    visibility = ["//visibility:public"],
-)
-
-go_proto_library(
-    name = "spec_go_proto",
-    importpath = "source.monogon.dev/metropolis/pkg/kmod/spec",
-    proto = ":spec_proto",
-    visibility = ["//visibility:public"],
-)
-
-go_library(
-    name = "spec",
-    embed = [":spec_go_proto"],
-    importpath = "source.monogon.dev/metropolis/pkg/kmod/spec",
-    visibility = ["//visibility:public"],
-)
diff --git a/metropolis/pkg/kmod/spec/gomod-generated-placeholder.go b/metropolis/pkg/kmod/spec/gomod-generated-placeholder.go
deleted file mode 100644
index f09cd57..0000000
--- a/metropolis/pkg/kmod/spec/gomod-generated-placeholder.go
+++ /dev/null
@@ -1 +0,0 @@
-package spec
diff --git a/metropolis/pkg/kmod/spec/meta.proto b/metropolis/pkg/kmod/spec/meta.proto
deleted file mode 100644
index a608b69..0000000
--- a/metropolis/pkg/kmod/spec/meta.proto
+++ /dev/null
@@ -1,57 +0,0 @@
-syntax = "proto3";
-
-package metropolis.pkg.kmod;
-
-option go_package = "source.monogon.dev/metropolis/pkg/kmod/spec";
-
-// Module contains important metadata about a Linux kernel module.
-message Module {
-    // Name of the module
-    string name = 1;
-    // Path of the module, relative to the module root.
-    // Unset if built-in.
-    string path = 2;
-    // List of Meta.modules indices on which this module depends.
-    repeated uint32 depends = 3;
-}
-
-message RadixNode {
-    enum Type {
-        // Matches one or more characters literally.
-        LITERAL = 0;
-        // Matches zero or more arbitrary characters.
-        WILDCARD = 1;
-        // Matches exactly one arbitrary character.
-        SINGLE_WILDCARD = 2;
-        // Matches exactly one character between start_byte and end_byte.
-        BYTE_RANGE = 3;
-        // Root matches nothing, but serves a the root node for a radix
-        // tree.
-        ROOT = 4;
-    }
-    Type type = 1;
-
-    // Only valid for LITERAL type
-    string literal = 2;
-
-    // Only valid when BYTE_RANGE type
-    uint32 start_byte = 3;
-    uint32 end_byte = 4;
-
-    // Contains a list of radix nodes which are children of this node.
-    repeated RadixNode children = 5;
-
-    // A list of module indices (in the Meta.modules list) which have
-    // match expressions ending at this node.
-    repeated uint32 module_index = 6;
-}
-
-// Meta contains metadata about all modules in a Linux kernel
-message Meta {
-    // Contains a list of modules, including built-in ones.
-    repeated Module modules = 1;
-
-    // Contains the root node of a radix tree for looking up modules to load
-    // for a given device modalias.
-    RadixNode module_device_matches = 2;
-}
\ No newline at end of file
diff --git a/metropolis/pkg/kmod/syscall.go b/metropolis/pkg/kmod/syscall.go
deleted file mode 100644
index 41d3233..0000000
--- a/metropolis/pkg/kmod/syscall.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package kmod
-
-import (
-	"errors"
-	"fmt"
-	"syscall"
-	"unsafe"
-
-	"golang.org/x/sys/unix"
-)
-
-// LoadModule loads a kernel module into the kernel.
-func LoadModule(file syscall.Conn, params string, flags uintptr) error {
-	sc, err := file.SyscallConn()
-	if err != nil {
-		return fmt.Errorf("failed getting SyscallConn handle: %w", err)
-	}
-	paramsRaw, err := unix.BytePtrFromString(params)
-	if err != nil {
-		return errors.New("invalid null byte in params")
-	}
-	var errNo unix.Errno
-	ctrlErr := sc.Control(func(fd uintptr) {
-		_, _, errNo = unix.Syscall(unix.SYS_FINIT_MODULE, fd, uintptr(unsafe.Pointer(paramsRaw)), flags)
-	})
-	if ctrlErr != nil {
-		return fmt.Errorf("unable to get control handle: %w", ctrlErr)
-	}
-	if errNo != unix.Errno(0) {
-		return errNo
-	}
-	return nil
-}
-
-// UnloadModule unloads a kernel module from the kernel.
-func UnloadModule(name string, flags uintptr) error {
-	nameRaw, err := unix.BytePtrFromString(name)
-	if err != nil {
-		return errors.New("invalid null byte in name")
-	}
-	_, _, errNo := unix.Syscall(unix.SYS_DELETE_MODULE, uintptr(unsafe.Pointer(nameRaw)), flags, 0)
-	if errNo != unix.Errno(0) {
-		return errNo
-	}
-	return nil
-}
diff --git a/metropolis/pkg/logbuffer/BUILD.bazel b/metropolis/pkg/logbuffer/BUILD.bazel
deleted file mode 100644
index 8433802..0000000
--- a/metropolis/pkg/logbuffer/BUILD.bazel
+++ /dev/null
@@ -1,22 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
-
-go_library(
-    name = "logbuffer",
-    srcs = [
-        "linebuffer.go",
-        "logbuffer.go",
-    ],
-    importpath = "source.monogon.dev/metropolis/pkg/logbuffer",
-    visibility = ["//metropolis:__subpackages__"],
-    deps = ["//metropolis/pkg/logtree/proto"],
-)
-
-go_test(
-    name = "logbuffer_test",
-    srcs = [
-        "linebuffer_test.go",
-        "logbuffer_test.go",
-    ],
-    embed = [":logbuffer"],
-    deps = ["@com_github_stretchr_testify//require"],
-)
diff --git a/metropolis/pkg/logbuffer/linebuffer.go b/metropolis/pkg/logbuffer/linebuffer.go
deleted file mode 100644
index 3892e0c..0000000
--- a/metropolis/pkg/logbuffer/linebuffer.go
+++ /dev/null
@@ -1,169 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logbuffer
-
-import (
-	"bytes"
-	"fmt"
-	"strings"
-	"sync"
-
-	lpb "source.monogon.dev/metropolis/pkg/logtree/proto"
-)
-
-// Line is a line stored in the log buffer - a string, that has been perhaps
-// truncated (due to exceeded limits).
-type Line struct {
-	Data           string
-	OriginalLength int
-}
-
-// Truncated returns whether this line has been truncated to fit limits.
-func (l *Line) Truncated() bool {
-	return l.OriginalLength > len(l.Data)
-}
-
-// String returns the line with an ellipsis at the end (...) if the line has been
-// truncated, or the original line otherwise.
-func (l *Line) String() string {
-	if l.Truncated() {
-		return l.Data + "..."
-	}
-	return l.Data
-}
-
-// ProtoLog returns a Logging-specific protobuf structure.
-func (l *Line) ProtoLog() *lpb.LogEntry_Raw {
-	return &lpb.LogEntry_Raw{
-		Data:           l.Data,
-		OriginalLength: int64(l.OriginalLength),
-	}
-}
-
-// LineFromLogProto converts a Logging-specific protobuf message back into a Line.
-func LineFromLogProto(raw *lpb.LogEntry_Raw) (*Line, error) {
-	if raw.OriginalLength < int64(len(raw.Data)) {
-		return nil, fmt.Errorf("original_length smaller than length of data")
-	}
-	originalLength := int(raw.OriginalLength)
-	if int64(originalLength) < raw.OriginalLength {
-		return nil, fmt.Errorf("original_length larger than native int size")
-	}
-	return &Line{
-		Data:           raw.Data,
-		OriginalLength: originalLength,
-	}, nil
-}
-
-// LineBuffer is a io.WriteCloser that will call a given callback every time a line
-// is completed.
-type LineBuffer struct {
-	maxLineLength int
-	cb            LineBufferCallback
-
-	mu  sync.Mutex
-	cur strings.Builder
-	// length is the length of the line currently being written - this will continue to
-	// increase, even if the string exceeds maxLineLength.
-	length int
-	closed bool
-}
-
-// LineBufferCallback is a callback that will get called any time the line is
-// completed. The function must not cause another write to the LineBuffer, or the
-// program will deadlock.
-type LineBufferCallback func(*Line)
-
-// NewLineBuffer creates a new LineBuffer with a given line length limit and
-// callback.
-func NewLineBuffer(maxLineLength int, cb LineBufferCallback) *LineBuffer {
-	return &LineBuffer{
-		maxLineLength: maxLineLength,
-		cb:            cb,
-	}
-}
-
-// writeLimited writes to the internal buffer, making sure that its size does not
-// exceed the maxLineLength.
-func (l *LineBuffer) writeLimited(data []byte) {
-	l.length += len(data)
-	if l.cur.Len()+len(data) > l.maxLineLength {
-		data = data[:l.maxLineLength-l.cur.Len()]
-	}
-	l.cur.Write(data)
-}
-
-// comitLine calls the callback and resets the builder.
-func (l *LineBuffer) commitLine() {
-	l.cb(&Line{
-		Data:           l.cur.String(),
-		OriginalLength: l.length,
-	})
-	l.cur.Reset()
-	l.length = 0
-}
-
-func (l *LineBuffer) Write(data []byte) (int, error) {
-	var pos = 0
-
-	l.mu.Lock()
-	defer l.mu.Unlock()
-
-	if l.closed {
-		return 0, fmt.Errorf("closed")
-	}
-
-	for {
-		nextNewline := bytes.IndexRune(data[pos:], '\n')
-
-		// No newline in the data, write everything to the current line
-		if nextNewline == -1 {
-			l.writeLimited(data[pos:])
-			break
-		}
-
-		// Write this line and update position
-		l.writeLimited(data[pos : pos+nextNewline])
-		l.commitLine()
-		pos += nextNewline + 1
-
-		// Data ends with a newline, stop now without writing an empty line
-		if nextNewline == len(data)-1 {
-			break
-		}
-	}
-	return len(data), nil
-}
-
-// Close will emit any leftover data in the buffer to the callback. Subsequent
-// calls to Write will fail. Subsequent calls to Close will also fail.
-func (l *LineBuffer) Close() error {
-	if l.closed {
-		return fmt.Errorf("already closed")
-	}
-	l.mu.Lock()
-	defer l.mu.Unlock()
-	l.closed = true
-	if l.length > 0 {
-		l.commitLine()
-	}
-	return nil
-}
-
-func (l *LineBuffer) Sync() error {
-	return nil
-}
diff --git a/metropolis/pkg/logbuffer/linebuffer_test.go b/metropolis/pkg/logbuffer/linebuffer_test.go
deleted file mode 100644
index 699c3dc..0000000
--- a/metropolis/pkg/logbuffer/linebuffer_test.go
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logbuffer
-
-import (
-	"fmt"
-	"testing"
-)
-
-func TestLineBuffer(t *testing.T) {
-	var lines []*Line
-	lb := NewLineBuffer(1024, func(l *Line) {
-		lines = append(lines, l)
-	})
-
-	compare := func(a []*Line, b ...string) string {
-		msg := fmt.Sprintf("want %v, got %v", a, b)
-		if len(a) != len(b) {
-			return msg
-		}
-		for i := range a {
-			if a[i].String() != b[i] {
-				return msg
-			}
-		}
-		return ""
-	}
-
-	// Write some data.
-	fmt.Fprintf(lb, "foo ")
-	if diff := compare(lines); diff != "" {
-		t.Fatal(diff)
-	}
-	fmt.Fprintf(lb, "bar\n")
-	if diff := compare(lines, "foo bar"); diff != "" {
-		t.Fatal(diff)
-	}
-	fmt.Fprintf(lb, "baz")
-	if diff := compare(lines, "foo bar"); diff != "" {
-		t.Fatal(diff)
-	}
-	fmt.Fprintf(lb, " baz")
-	if diff := compare(lines, "foo bar"); diff != "" {
-		t.Fatal(diff)
-	}
-	// Close and expect flush.
-	if err := lb.Close(); err != nil {
-		t.Fatalf("Close: %v", err)
-	}
-	if diff := compare(lines, "foo bar", "baz baz"); diff != "" {
-		t.Fatal(diff)
-	}
-
-	// Check behaviour after close
-	if _, err := fmt.Fprintf(lb, "nope"); err == nil {
-		t.Fatalf("Write after Close: wanted  error, got nil")
-	}
-	if err := lb.Close(); err == nil {
-		t.Fatalf("second Close: wanted error, got nil")
-	}
-}
diff --git a/metropolis/pkg/logbuffer/logbuffer.go b/metropolis/pkg/logbuffer/logbuffer.go
deleted file mode 100644
index cd18420..0000000
--- a/metropolis/pkg/logbuffer/logbuffer.go
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package logbuffer implements a fixed-size in-memory ring buffer for
-// line-separated logs. It implements io.Writer and splits the data into lines.
-// The lines are kept in a ring where the oldest are overwritten once it's
-// full. It allows retrieval of the last n lines. There is a built-in line
-// length limit to bound the memory usage at maxLineLength * size.
-package logbuffer
-
-import (
-	"sync"
-)
-
-// LogBuffer implements a fixed-size in-memory ring buffer for line-separated logs
-type LogBuffer struct {
-	mu      sync.RWMutex
-	content []Line
-	length  int
-	*LineBuffer
-}
-
-// New creates a new LogBuffer with a given ringbuffer size and maximum line
-// length.
-func New(size, maxLineLength int) *LogBuffer {
-	lb := &LogBuffer{
-		content: make([]Line, size),
-	}
-	lb.LineBuffer = NewLineBuffer(maxLineLength, lb.lineCallback)
-	return lb
-}
-
-func (b *LogBuffer) lineCallback(line *Line) {
-	b.mu.Lock()
-	defer b.mu.Unlock()
-
-	b.content[b.length%len(b.content)] = *line
-	b.length++
-}
-
-// capToContentLength caps the number of requested lines to what is actually
-// available
-func (b *LogBuffer) capToContentLength(n int) int {
-	// If there aren't enough lines to read, reduce the request size
-	if n > b.length {
-		n = b.length
-	}
-	// If there isn't enough ringbuffer space, reduce the request size
-	if n > len(b.content) {
-		n = len(b.content)
-	}
-	return n
-}
-
-// ReadLines reads the last n lines from the buffer in chronological order. If
-// n is bigger than the ring buffer or the number of available lines only the
-// number of stored lines are returned.
-func (b *LogBuffer) ReadLines(n int) []Line {
-	b.mu.RLock()
-	defer b.mu.RUnlock()
-
-	n = b.capToContentLength(n)
-
-	// Copy references out to keep them around
-	outArray := make([]Line, n)
-	for i := 1; i <= n; i++ {
-		outArray[n-i] = b.content[(b.length-i)%len(b.content)]
-	}
-	return outArray
-}
-
-// ReadLinesTruncated works exactly the same as ReadLines, but adds an ellipsis
-// at the end of every line that was truncated because it was over
-// MaxLineLength
-func (b *LogBuffer) ReadLinesTruncated(n int, ellipsis string) []string {
-	b.mu.RLock()
-	defer b.mu.RUnlock()
-	// This does not use ReadLines() to prevent excessive reference copying and
-	// associated GC pressure since it could process a lot of lines.
-
-	n = b.capToContentLength(n)
-
-	outArray := make([]string, n)
-	for i := 1; i <= n; i++ {
-		line := b.content[(b.length-i)%len(b.content)]
-		outArray[n-i] = line.String()
-	}
-	return outArray
-}
diff --git a/metropolis/pkg/logbuffer/logbuffer_test.go b/metropolis/pkg/logbuffer/logbuffer_test.go
deleted file mode 100644
index c38d7a6..0000000
--- a/metropolis/pkg/logbuffer/logbuffer_test.go
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logbuffer
-
-import (
-	"testing"
-
-	"github.com/stretchr/testify/require"
-)
-
-func TestSingleLine(t *testing.T) {
-	buf := New(1, 16000)
-	buf.Write([]byte("Hello World\n"))
-	out := buf.ReadLines(1)
-	require.Len(t, out, 1, "Invalid number of lines read")
-	require.Equal(t, "Hello World", out[0].Data, "Read bad log line")
-	require.Equal(t, 11, out[0].OriginalLength, "Invalid line length")
-}
-
-func TestPartialWritesAndReads(t *testing.T) {
-	buf := New(2, 16000)
-	buf.Write([]byte("Hello "))
-	buf.Write([]byte("World\nTest "))
-	buf.Write([]byte("2\n"))
-
-	out := buf.ReadLines(1)
-	require.Len(t, out, 1, "Invalid number of lines for partial read")
-	require.Equal(t, "Test 2", out[0].Data, "Read bad log line")
-
-	out2 := buf.ReadLines(2)
-	require.Len(t, out2, 2, "Invalid number of lines read")
-	require.Equal(t, "Hello World", out2[0].Data, "Read bad log line")
-	require.Equal(t, "Test 2", out2[1].Data, "Read bad log line")
-}
-
-func TestBufferOverwrite(t *testing.T) {
-	buf := New(3, 16000)
-	buf.Write([]byte("Test1\nTest2\nTest3\nTest4\n"))
-
-	out := buf.ReadLines(3)
-	require.Equal(t, out[0].Data, "Test2", "Read bad log line")
-	require.Equal(t, out[1].Data, "Test3", "Read bad log line")
-	require.Equal(t, out[2].Data, "Test4", "Overwritten data is invalid")
-}
-
-func TestTooLargeRequests(t *testing.T) {
-	buf := New(1, 16000)
-	outEmpty := buf.ReadLines(1)
-	require.Len(t, outEmpty, 0, "Returned more data than there is")
-
-	buf.Write([]byte("1\n2\n"))
-	out := buf.ReadLines(2)
-	require.Len(t, out, 1, "Returned more data than the ring buffer can hold")
-}
-
-func TestSpecialCases(t *testing.T) {
-	buf := New(2, 16000)
-	buf.Write([]byte("Test1"))
-	buf.Write([]byte("\nTest2\n"))
-	out := buf.ReadLines(2)
-	require.Len(t, out, 2, "Too many lines written")
-	require.Equal(t, out[0].Data, "Test1", "Read bad log line")
-	require.Equal(t, out[1].Data, "Test2", "Read bad log line")
-}
-
-func TestLineLengthLimit(t *testing.T) {
-	buf := New(2, 6)
-
-	testStr := "Just Testing"
-
-	buf.Write([]byte(testStr + "\nShort\n"))
-
-	out := buf.ReadLines(2)
-	require.Equal(t, len(testStr), out[0].OriginalLength, "Line is over length limit")
-	require.Equal(t, "Just T", out[0].Data, "Log line not properly truncated")
-
-	out2 := buf.ReadLinesTruncated(2, "...")
-	require.Equal(t, out2[0], "Just T...", "Line is over length limit")
-	require.Equal(t, out2[1], "Short", "Truncated small enough line")
-}
diff --git a/metropolis/pkg/logtree/BUILD.bazel b/metropolis/pkg/logtree/BUILD.bazel
deleted file mode 100644
index a2f86ad..0000000
--- a/metropolis/pkg/logtree/BUILD.bazel
+++ /dev/null
@@ -1,58 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
-
-go_library(
-    name = "logtree",
-    srcs = [
-        "doc.go",
-        "grpc.go",
-        "journal.go",
-        "journal_entry.go",
-        "journal_subscriber.go",
-        "klog.go",
-        "kmsg.go",
-        "leveled.go",
-        "leveled_payload.go",
-        "logtree.go",
-        "logtree_access.go",
-        "logtree_entry.go",
-        "logtree_publisher.go",
-        "testhelpers.go",
-        "zap.go",
-    ],
-    # TODO(#189): move logtree to //go
-    importpath = "source.monogon.dev/metropolis/pkg/logtree",
-    visibility = ["//visibility:public"],
-    deps = [
-        "//metropolis/pkg/logbuffer",
-        "//metropolis/pkg/logtree/proto",
-        "@com_github_mitchellh_go_wordwrap//:go-wordwrap",
-        "@org_golang_google_grpc//grpclog",
-        "@org_golang_google_protobuf//types/known/timestamppb",
-        "@org_uber_go_zap//:zap",
-        "@org_uber_go_zap//zapcore",
-    ] + select({
-        "@io_bazel_rules_go//go/platform:android": [
-            "@org_golang_x_sys//unix",
-        ],
-        "@io_bazel_rules_go//go/platform:linux": [
-            "@org_golang_x_sys//unix",
-        ],
-        "//conditions:default": [],
-    }),
-)
-
-go_test(
-    name = "logtree_test",
-    srcs = [
-        "journal_test.go",
-        "klog_test.go",
-        "kmsg_test.go",
-        "logtree_test.go",
-        "zap_test.go",
-    ],
-    embed = [":logtree"],
-    deps = [
-        "@com_github_google_go_cmp//cmp",
-        "@org_uber_go_zap//:zap",
-    ],
-)
diff --git a/metropolis/pkg/logtree/doc.go b/metropolis/pkg/logtree/doc.go
deleted file mode 100644
index ab3c537..0000000
--- a/metropolis/pkg/logtree/doc.go
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-Package logtree implements a tree-shaped logger for debug events. It provides log publishers (ie. Go code) with a
-glog-like API and io.Writer API, with loggers placed in a hierarchical structure defined by a dot-delimited path
-(called a DN, short for Distinguished Name).
-
-    tree.MustLeveledFor("foo.bar.baz").Warningf("Houston, we have a problem: %v", err)
-    fmt.Fprintf(tree.MustRawFor("foo.bar.baz"), "some\nunstructured\ndata\n")
-
-Logs in this context are unstructured, operational and developer-centric human readable text messages presented as lines
-of text to consumers, with some attached metadata. Logtree does not deal with 'structured' logs as some parts of the
-industry do, and instead defers any machine-readable logs to either be handled by metrics systems like Prometheus or
-event sourcing systems like Kafka.
-
-Tree Structure
-
-As an example, consider an application that produces logs with the following DNs:
-
-    listener.http
-    listener.grpc
-    svc
-    svc.cache
-    svc.cache.gc
-
-This would correspond to a tree as follows:
-
-                          .------.
-                         |   ""   |
-                         | (root) |
-                          '------'
-           .----------------'   '------.
-    .--------------.           .---------------.
-    |     svc      |           |    listener   |
-    '--------------'           '---------------'
-           |                   .----'      '----.
-    .--------------.  .---------------.  .---------------.
-    |  svc.cache   |  | listener.http |  | listener.grpc |
-    '--------------'  '---------------'  '---------------'
-           |
-    .--------------.
-    | svc.cache.gc |
-    '--------------'
-
-In this setup, every DN acts as a separate logging target, each with its own retention policy and quota. Logging to a DN
-under foo.bar does NOT automatically log to foo - all tree mechanisms are applied on log access by consumers. Loggers
-are automatically created on first use, and importantly, can be created at any time, and will automatically be created
-if a sub-DN is created that requires a parent DN to exist first. Note, for instance, that a `listener` logging node was
-created even though the example application only logged to `listener.http` and `listener.grpc`.
-
-An implicit root node is always present in the tree, accessed by DN "" (an empty string). All other logger nodes are
-children (or transitive children) of the root node.
-
-Log consumers (application code that reads the log and passes them on to operators, or ships them off for aggregation in
-other systems) to select subtrees of logs for readout. In the example tree, a consumer could select to either read all
-logs of the entire tree, just a single DN (like svc), or a subtree (like everything under listener, ie. messages emitted
-to listener.http and listener.grpc).
-
-Leveled Log Producer API
-
-As part of the glog-like logging API available to producers, the following metadata is attached to emitted logs in
-addition to the DN of the logger to which the log entry was emitted:
-
- - timestamp at which the entry was emitted
- - a severity level (one of FATAL, ERROR, WARN or INFO)
- - a source of the message (file name and line number)
-
-In addition, the logger mechanism supports a variable verbosity level (so-called 'V-logging') that can be set at every
-node of the tree. For more information about the producer-facing logging API, see the documentation of the LeveledLogger
-interface, which is the main interface exposed to log producers.
-
-If the submitted message contains newlines, it will be split accordingly into a single log entry that contains multiple
-string lines. This allows for log producers to submit long, multi-line messages that are guaranteed to be non-interleaved
-with other entries, and allows for access API consumers to maintain semantic linking between multiple lines being emitted
-as a single atomic entry.
-
-Raw Log Producer API
-
-In addition to leveled, glog-like logging, LogTree supports 'raw logging'. This is implemented as an io.Writer that will
-split incoming bytes into newline-delimited lines, and log them into that logtree's DN. This mechanism is primarily
-intended to support storage of unstructured log data from external processes - for example binaries running with redirected
-stdout/stderr.
-
-Log Access API
-
-The Log Access API is mostly exposed via a single function on the LogTree struct: Read. It allows access to log entries
-that have been already buffered inside LogTree and to subscribe to receive future entries over a channel. As outlined
-earlier, any access can specify whether it is just interested in a single logger (addressed by DN), or a subtree of
-loggers.
-
-Due to the current implementation of the logtree, subtree accesses of backlogged data is significantly slower than
-accessing data of just one DN, or the whole tree (as every subtree backlog access performs a scan on all logged data).
-Thus, log consumers should be aware that it is much better to stream and buffer logs specific to some long-standing
-logging request on their own, rather than repeatedly perform reads of a subtree backlog.
-
-The data returned from the log access API is a LogEntry, which itself can contain either a raw logging entry, or a leveled
-logging entry. Helper functions are available on LogEntry that allow canonical string representations to be returned, for
-easy use in consuming tools/interfaces. Alternatively, the consumer can itself access the internal raw/leveled entries and
-print them according to their own preferred format.
-
-*/
-package logtree
diff --git a/metropolis/pkg/logtree/grpc.go b/metropolis/pkg/logtree/grpc.go
deleted file mode 100644
index 3b2594d..0000000
--- a/metropolis/pkg/logtree/grpc.go
+++ /dev/null
@@ -1,75 +0,0 @@
-package logtree
-
-import "google.golang.org/grpc/grpclog"
-
-// GRPCify turns a LeveledLogger into a go-grpc compatible logger.
-func GRPCify(logger LeveledLogger) grpclog.LoggerV2 {
-	lp, ok := logger.(*leveledPublisher)
-	if !ok {
-		// Fail fast, as this is a programming error.
-		panic("Expected *leveledPublisher in LeveledLogger from supervisor")
-	}
-
-	lp2 := *lp
-	lp2.depth += 1
-
-	return &leveledGRPCV2{
-		lp: &lp2,
-	}
-}
-
-type leveledGRPCV2 struct {
-	lp *leveledPublisher
-}
-
-func (g *leveledGRPCV2) Info(args ...interface{}) {
-	g.lp.Info(args...)
-}
-
-func (g *leveledGRPCV2) Infoln(args ...interface{}) {
-	g.lp.Info(args...)
-}
-
-func (g *leveledGRPCV2) Infof(format string, args ...interface{}) {
-	g.lp.Infof(format, args...)
-}
-
-func (g *leveledGRPCV2) Warning(args ...interface{}) {
-	g.lp.Warning(args...)
-}
-
-func (g *leveledGRPCV2) Warningln(args ...interface{}) {
-	g.lp.Warning(args...)
-}
-
-func (g *leveledGRPCV2) Warningf(format string, args ...interface{}) {
-	g.lp.Warningf(format, args...)
-}
-
-func (g *leveledGRPCV2) Error(args ...interface{}) {
-	g.lp.Error(args...)
-}
-
-func (g *leveledGRPCV2) Errorln(args ...interface{}) {
-	g.lp.Error(args...)
-}
-
-func (g *leveledGRPCV2) Errorf(format string, args ...interface{}) {
-	g.lp.Errorf(format, args...)
-}
-
-func (g *leveledGRPCV2) Fatal(args ...interface{}) {
-	g.lp.Fatal(args...)
-}
-
-func (g *leveledGRPCV2) Fatalln(args ...interface{}) {
-	g.lp.Fatal(args...)
-}
-
-func (g *leveledGRPCV2) Fatalf(format string, args ...interface{}) {
-	g.lp.Fatalf(format, args...)
-}
-
-func (g *leveledGRPCV2) V(l int) bool {
-	return g.lp.V(VerbosityLevel(l)).Enabled()
-}
diff --git a/metropolis/pkg/logtree/journal.go b/metropolis/pkg/logtree/journal.go
deleted file mode 100644
index 412c042..0000000
--- a/metropolis/pkg/logtree/journal.go
+++ /dev/null
@@ -1,319 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logtree
-
-import (
-	"errors"
-	"sort"
-	"strings"
-	"sync"
-)
-
-// DN is the Distinguished Name, a dot-delimited path used to address loggers
-// within a LogTree. For example, "foo.bar" designates the 'bar' logger node
-// under the 'foo' logger node under the root node of the logger. An empty
-// string is the root node of the tree.
-type DN string
-
-var (
-	ErrInvalidDN = errors.New("invalid DN")
-)
-
-// Path return the parts of a DN, ie. all the elements of the dot-delimited DN
-// path.  For the root node, an empty list will be returned. An error will be
-// returned if the DN is invalid (contains empty parts, eg. `foo..bar`, `.foo`
-// or `foo.`.
-func (d DN) Path() ([]string, error) {
-	if d == "" {
-		return nil, nil
-	}
-	parts := strings.Split(string(d), ".")
-	for _, p := range parts {
-		if p == "" {
-			return nil, ErrInvalidDN
-		}
-	}
-	return parts, nil
-}
-
-// journal is the main log recording structure of logtree. It manages linked lists
-// containing the actual log entries, and implements scans across them. It does not
-// understand the hierarchical nature of logtree, and instead sees all entries as
-// part of a global linked list and a local linked list for a given DN.
-//
-// The global linked list is represented by the head/tail pointers in journal and
-// nextGlobal/prevGlobal pointers in entries. The local linked lists are
-// represented by heads[DN]/tails[DN] pointers in journal and nextLocal/prevLocal
-// pointers in entries:
-//
-//	      .------------.        .------------.        .------------.
-//	      | dn: A.B    |        | dn: Z      |        | dn: A.B    |
-//	      | time: 1    |        | time: 2    |        | time: 3    |
-//	      |------------|        |------------|        |------------|
-//	      | nextGlobal :------->| nextGlobal :------->| nextGlobal :--> nil
-//	nil <-: prevGlobal |<-------: prevGlobal |<-------| prevGlobal |
-//	      |------------|        |------------|  n     |------------|
-//	      | nextLocal  :---. n  | nextLocal  :->i .-->| nextLocal  :--> nil
-//	nil <-: prevLocal  |<--: i<-: prevLocal  |  l :---| prevLocal  |
-//	      '------------'   | l  '------------'    |   '------------'
-//	           ^           '----------------------'         ^
-//	           |                      ^                     |
-//	           |                      |                     |
-//	        ( head )             ( tails[Z] )            ( tail )
-//	     ( heads[A.B] )          ( heads[Z] )         ( tails[A.B] )
-type journal struct {
-	// mu locks the rest of the structure. It must be taken during any operation on the
-	// journal.
-	mu sync.RWMutex
-
-	// tail is the side of the global linked list that contains the newest log entry,
-	// ie. the one that has been pushed the most recently. It can be nil when no log
-	// entry has yet been pushed. The global linked list contains all log entries
-	// pushed to the journal.
-	tail *entry
-	// head is the side of the global linked list that contains the oldest log entry.
-	// It can be nil when no log entry has yet been pushed.
-	head *entry
-
-	// tails are the tail sides of a local linked list for a given DN, ie. the sides
-	// that contain the newest entry. They are nil if there are no log entries for that
-	// DN.
-	tails map[DN]*entry
-	// heads are the head sides of a local linked list for a given DN, ie. the sides
-	// that contain the oldest entry. They are nil if there are no log entries for that
-	// DN.
-	heads map[DN]*entry
-
-	// quota is a map from DN to quota structure, representing the quota policy of a
-	// particular DN-designated logger.
-	quota map[DN]*quota
-
-	// subscribers are observer to logs. New log entries get emitted to channels
-	// present in the subscriber structure, after filtering them through subscriber-
-	// provided filters (eg. to limit events to subtrees that interest that particular
-	// subscriber).
-	subscribers []*subscriber
-}
-
-// newJournal creates a new empty journal. All journals are independent from
-// eachother, and as such, all LogTrees are also independent.
-func newJournal() *journal {
-	return &journal{
-		tails: make(map[DN]*entry),
-		heads: make(map[DN]*entry),
-
-		quota: make(map[DN]*quota),
-	}
-}
-
-// filter is a predicate that returns true if a log subscriber or reader is
-// interested in a given log entry.
-type filter func(*entry) bool
-
-// filterAll returns a filter that accepts all log entries.
-func filterAll() filter {
-	return func(*entry) bool { return true }
-}
-
-// filterExact returns a filter that accepts only log entries at a given exact
-// DN.  This filter should not be used in conjunction with journal.scanEntries
-// - instead, journal.getEntries should be used, as it is much faster.
-func filterExact(dn DN) filter {
-	return func(e *entry) bool {
-		return e.origin == dn
-	}
-}
-
-// filterSubtree returns a filter that accepts all log entries at a given DN and
-// sub-DNs. For example, filterSubtree at "foo.bar" would allow entries at
-// "foo.bar", "foo.bar.baz", but not "foo" or "foo.barr".
-func filterSubtree(root DN) filter {
-	if root == "" {
-		return filterAll()
-	}
-
-	rootParts := strings.Split(string(root), ".")
-	return func(e *entry) bool {
-		parts := strings.Split(string(e.origin), ".")
-		if len(parts) < len(rootParts) {
-			return false
-		}
-
-		for i, p := range rootParts {
-			if parts[i] != p {
-				return false
-			}
-		}
-
-		return true
-	}
-}
-
-// filterSeverity returns a filter that accepts log entries at a given severity
-// level or above. See the Severity type for more information about severity
-// levels.
-func filterSeverity(atLeast Severity) filter {
-	return func(e *entry) bool {
-		return e.leveled != nil && e.leveled.severity.AtLeast(atLeast)
-	}
-}
-
-func filterOnlyRaw(e *entry) bool {
-	return e.raw != nil
-}
-
-func filterOnlyLeveled(e *entry) bool {
-	return e.leveled != nil
-}
-
-// scanEntries does a linear scan through the global entry list and returns all
-// entries that match the given filters. If retrieving entries for an exact event,
-// getEntries should be used instead, as it will leverage DN-local linked lists to
-// retrieve them faster. journal.mu must be taken at R or RW level when calling
-// this function.
-func (j *journal) scanEntries(count int, filters ...filter) (res []*entry) {
-	cur := j.tail
-	for {
-		if cur == nil {
-			break
-		}
-
-		passed := true
-		for _, filter := range filters {
-			if !filter(cur) {
-				passed = false
-				break
-			}
-		}
-		if passed {
-			res = append(res, cur)
-		}
-		if count != BacklogAllAvailable && len(res) >= count {
-			break
-		}
-		cur = cur.prevGlobal
-	}
-
-	// Reverse entries back into chronological order.
-	sort.SliceStable(res, func(i, j int) bool {
-		return i > j
-	})
-	return
-}
-
-// getEntries returns all entries at a given DN. This is faster than a
-// scanEntries(filterExact), as it uses the special local linked list pointers to
-// traverse the journal. Additional filters can be passed to further limit the
-// entries returned, but a scan through this DN's local linked list will be
-// performed regardless. journal.mu must be taken at R or RW level when calling
-// this function.
-func (j *journal) getEntries(count int, exact DN, filters ...filter) (res []*entry) {
-	cur := j.tails[exact]
-	for {
-		if cur == nil {
-			break
-		}
-
-		passed := true
-		for _, filter := range filters {
-			if !filter(cur) {
-				passed = false
-				break
-			}
-		}
-		if passed {
-			res = append(res, cur)
-		}
-		if count != BacklogAllAvailable && len(res) >= count {
-			break
-		}
-		cur = cur.prevLocal
-	}
-
-	// Reverse entries back into chronological order.
-	sort.SliceStable(res, func(i, j int) bool {
-		return i > j
-	})
-	return
-}
-
-// Shorten returns a shortened version of this DN for constrained logging
-// environments like tty0 logging.
-//
-// If ShortenDictionary is given, it will be used to replace DN parts with
-// shorter equivalents. For example, with the dictionary:
-//
-// { "foobar": "foo", "manager": "mgr" }
-//
-// The DN some.foobar.logger will be turned into some.foo.logger before further
-// being processed by the shortening mechanism.
-//
-// The shortening rules applied are Metropolis-specific.
-func (d DN) Shorten(dict ShortenDictionary, maxLen int) string {
-	path, _ := d.Path()
-	// Apply DN part shortening rules.
-	if dict != nil {
-		for i, p := range path {
-			if sh, ok := dict[p]; ok {
-				path[i] = sh
-			}
-		}
-	}
-
-	// This generally shouldn't happen.
-	if len(path) == 0 {
-		return "?"
-	}
-
-	// Strip 'root.' prefix.
-	if len(path) > 1 && path[0] == "root" {
-		path = path[1:]
-	}
-
-	// Replace role.xxx.yyy.zzz with xxx.zzz - stripping everything between the role
-	// name and the last element of the path.
-	if path[0] == "role" && len(path) > 1 {
-		if len(path) == 2 {
-			path = path[1:]
-		} else {
-			path = []string{
-				path[1],
-				path[len(path)-1],
-			}
-		}
-	}
-
-	// Join back to be ' '-delimited, and ellipsize if too long.
-	s := strings.Join(path, " ")
-	if overflow := len(s) - maxLen; overflow > 0 {
-		s = "..." + s[overflow+3:]
-	}
-	return s
-}
-
-type ShortenDictionary map[string]string
-
-var MetropolisShortenDict = ShortenDictionary{
-	"controlplane":           "cplane",
-	"map-cluster-membership": "map-membership",
-	"cluster-membership":     "cluster",
-	"controller-manager":     "controllers",
-	"networking":             "net",
-	"network":                "net",
-	"interfaces":             "ifaces",
-	"kubernetes":             "k8s",
-}
diff --git a/metropolis/pkg/logtree/journal_entry.go b/metropolis/pkg/logtree/journal_entry.go
deleted file mode 100644
index 1580f54..0000000
--- a/metropolis/pkg/logtree/journal_entry.go
+++ /dev/null
@@ -1,176 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logtree
-
-import "source.monogon.dev/metropolis/pkg/logbuffer"
-
-// entry is a journal entry, representing a single log event (encompassed in a
-// Payload) at a given DN. See the journal struct for more information about the
-// global/local linked lists.
-type entry struct {
-	// origin is the DN at which the log entry was recorded, or conversely, in which DN
-	// it will be available at.
-	origin DN
-	// journal is the parent journal of this entry. An entry can belong only to a
-	// single journal. This pointer is used to mutate the journal's head/tail pointers
-	// when unlinking an entry.
-	journal *journal
-	// leveled is the leveled log entry for this entry, if this log entry was emitted
-	// by leveled logging. Otherwise it is nil.
-	leveled *LeveledPayload
-	// raw is the raw log entry for this entry, if this log entry was emitted by raw
-	// logging. Otherwise it is nil.
-	raw *logbuffer.Line
-
-	// prevGlobal is the previous entry in the global linked list, or nil if this entry
-	// is the oldest entry in the global linked list.
-	prevGlobal *entry
-	// nextGlobal is the next entry in the global linked list, or nil if this entry is
-	// the newest entry in the global linked list.
-	nextGlobal *entry
-
-	// prevLocal is the previous entry in this entry DN's local linked list, or nil if
-	// this entry is the oldest entry in this local linked list.
-	prevLocal *entry
-	// prevLocal is the next entry in this entry DN's local linked list, or nil if this
-	// entry is the newest entry in this local linked list.
-	nextLocal *entry
-
-	// seqLocal is a counter within a local linked list that increases by one each time
-	// a new log entry is added. It is used to quickly establish local linked list
-	// sizes (by subtracting seqLocal from both ends). This setup allows for O(1)
-	// length calculation for local linked lists as long as entries are only unlinked
-	// from the head or tail (which is the case in the current implementation).
-	seqLocal uint64
-}
-
-// external returns a LogEntry object for this entry, ie. the public version of
-// this object, without fields relating to the parent journal, linked lists,
-// sequences, etc. These objects are visible to library consumers.
-func (e *entry) external() *LogEntry {
-	return &LogEntry{
-		DN:      e.origin,
-		Leveled: e.leveled,
-		Raw:     e.raw,
-	}
-}
-
-// unlink removes this entry from both global and local linked lists, updating the
-// journal's head/tail pointers if needed. journal.mu must be taken as RW
-func (e *entry) unlink() {
-	// Unlink from the global linked list.
-	if e.prevGlobal != nil {
-		e.prevGlobal.nextGlobal = e.nextGlobal
-	}
-	if e.nextGlobal != nil {
-		e.nextGlobal.prevGlobal = e.prevGlobal
-	}
-	// Update journal head/tail pointers.
-	if e.journal.head == e {
-		e.journal.head = e.nextGlobal
-	}
-	if e.journal.tail == e {
-		e.journal.tail = e.prevGlobal
-	}
-
-	// Unlink from the local linked list.
-	if e.prevLocal != nil {
-		e.prevLocal.nextLocal = e.nextLocal
-	}
-	if e.nextLocal != nil {
-		e.nextLocal.prevLocal = e.prevLocal
-	}
-	// Update journal head/tail pointers.
-	if e.journal.heads[e.origin] == e {
-		e.journal.heads[e.origin] = e.nextLocal
-	}
-	if e.journal.tails[e.origin] == e {
-		e.journal.tails[e.origin] = e.prevLocal
-	}
-}
-
-// quota describes the quota policy for logging at a given DN.
-type quota struct {
-	// origin is the exact DN that this quota applies to.
-	origin DN
-	// max is the maximum count of log entries permitted for this DN - ie, the maximum
-	// size of the local linked list.
-	max uint64
-}
-
-// append adds an entry at the head of the global and local linked lists.
-func (j *journal) append(e *entry) {
-	j.mu.Lock()
-	defer j.mu.Unlock()
-
-	e.journal = j
-
-	// Insert at head in global linked list, set pointers.
-	e.nextGlobal = nil
-	e.prevGlobal = j.tail
-	if j.tail != nil {
-		j.tail.nextGlobal = e
-	}
-	j.tail = e
-	if j.head == nil {
-		j.head = e
-	}
-
-	// Create quota if necessary.
-	if _, ok := j.quota[e.origin]; !ok {
-		j.quota[e.origin] = &quota{origin: e.origin, max: 8192}
-	}
-
-	// Insert at head in local linked list, calculate seqLocal, set pointers.
-	e.nextLocal = nil
-	e.prevLocal = j.tails[e.origin]
-	if j.tails[e.origin] != nil {
-		j.tails[e.origin].nextLocal = e
-		e.seqLocal = e.prevLocal.seqLocal + 1
-	} else {
-		e.seqLocal = 0
-	}
-	j.tails[e.origin] = e
-	if j.heads[e.origin] == nil {
-		j.heads[e.origin] = e
-	}
-
-	// Apply quota to the local linked list that this entry got inserted to, ie. remove
-	// elements in excess of the quota.max count.
-	quota := j.quota[e.origin]
-	count := (j.tails[e.origin].seqLocal - j.heads[e.origin].seqLocal) + 1
-	if count > quota.max {
-		// Keep popping elements off the head of the local linked list until quota is not
-		// violated.
-		left := count - quota.max
-		cur := j.heads[e.origin]
-		for {
-			// This shouldn't happen if quota.max >= 1.
-			if cur == nil {
-				break
-			}
-			if left == 0 {
-				break
-			}
-			el := cur
-			cur = el.nextLocal
-			// Unlinking the entry unlinks it from both the global and local linked lists.
-			el.unlink()
-			left -= 1
-		}
-	}
-}
diff --git a/metropolis/pkg/logtree/journal_subscriber.go b/metropolis/pkg/logtree/journal_subscriber.go
deleted file mode 100644
index dc9750f..0000000
--- a/metropolis/pkg/logtree/journal_subscriber.go
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logtree
-
-import (
-	"sync/atomic"
-)
-
-// subscriber is an observer for new entries that are appended to the journal.
-type subscriber struct {
-	// filters that entries need to pass through in order to be sent to the subscriber.
-	filters []filter
-	// dataC is the channel to which entries that pass filters will be sent. The
-	// channel must be drained regularly in order to prevent accumulation of goroutines
-	// and possible reordering of messages.
-	dataC chan *LogEntry
-	// doneC is a channel that is closed once the subscriber wishes to stop receiving
-	// notifications.
-	doneC chan struct{}
-	// missed is the amount of messages missed by the subscriber by not receiving from
-	// dataC fast enough
-	missed uint64
-}
-
-// subscribe attaches a subscriber to the journal.
-// mu must be taken in W mode
-func (j *journal) subscribe(sub *subscriber) {
-	j.subscribers = append(j.subscribers, sub)
-}
-
-// notify sends an entry to all subscribers that wish to receive it.
-func (j *journal) notify(e *entry) {
-	j.mu.Lock()
-	defer j.mu.Unlock()
-
-	newSub := make([]*subscriber, 0, len(j.subscribers))
-	for _, sub := range j.subscribers {
-		select {
-		case <-sub.doneC:
-			close(sub.dataC)
-			continue
-		default:
-			newSub = append(newSub, sub)
-		}
-
-		for _, filter := range sub.filters {
-			if !filter(e) {
-				continue
-			}
-		}
-		select {
-		case sub.dataC <- e.external():
-		default:
-			atomic.AddUint64(&sub.missed, 1)
-		}
-	}
-	j.subscribers = newSub
-}
diff --git a/metropolis/pkg/logtree/journal_test.go b/metropolis/pkg/logtree/journal_test.go
deleted file mode 100644
index e9fc3b4..0000000
--- a/metropolis/pkg/logtree/journal_test.go
+++ /dev/null
@@ -1,173 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logtree
-
-import (
-	"fmt"
-	"strings"
-	"testing"
-	"time"
-)
-
-func testPayload(msg string) *LeveledPayload {
-	return &LeveledPayload{
-		messages:  []string{msg},
-		timestamp: time.Now(),
-		severity:  INFO,
-		file:      "main.go",
-		line:      1337,
-	}
-}
-
-func TestJournalRetention(t *testing.T) {
-	j := newJournal()
-
-	for i := 0; i < 9000; i += 1 {
-		e := &entry{
-			origin:  "main",
-			leveled: testPayload(fmt.Sprintf("test %d", i)),
-		}
-		j.append(e)
-	}
-
-	entries := j.getEntries(BacklogAllAvailable, "main")
-	if want, got := 8192, len(entries); want != got {
-		t.Fatalf("wanted %d entries, got %d", want, got)
-	}
-	for i, entry := range entries {
-		want := fmt.Sprintf("test %d", (9000-8192)+i)
-		got := strings.Join(entry.leveled.messages, "\n")
-		if want != got {
-			t.Fatalf("wanted entry %q, got %q", want, got)
-		}
-	}
-}
-
-func TestJournalQuota(t *testing.T) {
-	j := newJournal()
-
-	for i := 0; i < 9000; i += 1 {
-		j.append(&entry{
-			origin:  "chatty",
-			leveled: testPayload(fmt.Sprintf("chatty %d", i)),
-		})
-		if i%10 == 0 {
-			j.append(&entry{
-				origin:  "solemn",
-				leveled: testPayload(fmt.Sprintf("solemn %d", i)),
-			})
-		}
-	}
-
-	entries := j.getEntries(BacklogAllAvailable, "chatty")
-	if want, got := 8192, len(entries); want != got {
-		t.Fatalf("wanted %d chatty entries, got %d", want, got)
-	}
-	entries = j.getEntries(BacklogAllAvailable, "solemn")
-	if want, got := 900, len(entries); want != got {
-		t.Fatalf("wanted %d solemn entries, got %d", want, got)
-	}
-	entries = j.getEntries(BacklogAllAvailable, "absent")
-	if want, got := 0, len(entries); want != got {
-		t.Fatalf("wanted %d absent entries, got %d", want, got)
-	}
-
-	entries = j.scanEntries(BacklogAllAvailable, filterAll())
-	if want, got := 8192+900, len(entries); want != got {
-		t.Fatalf("wanted %d total entries, got %d", want, got)
-	}
-	setMessages := make(map[string]bool)
-	for _, entry := range entries {
-		setMessages[strings.Join(entry.leveled.messages, "\n")] = true
-	}
-
-	for i := 0; i < 900; i += 1 {
-		want := fmt.Sprintf("solemn %d", i*10)
-		if !setMessages[want] {
-			t.Fatalf("could not find entry %q in journal", want)
-		}
-	}
-	for i := 0; i < 8192; i += 1 {
-		want := fmt.Sprintf("chatty %d", i+(9000-8192))
-		if !setMessages[want] {
-			t.Fatalf("could not find entry %q in journal", want)
-		}
-	}
-}
-
-func TestJournalSubtree(t *testing.T) {
-	j := newJournal()
-	j.append(&entry{origin: "a", leveled: testPayload("a")})
-	j.append(&entry{origin: "a.b", leveled: testPayload("a.b")})
-	j.append(&entry{origin: "a.b.c", leveled: testPayload("a.b.c")})
-	j.append(&entry{origin: "a.b.d", leveled: testPayload("a.b.d")})
-	j.append(&entry{origin: "e.f", leveled: testPayload("e.f")})
-	j.append(&entry{origin: "e.g", leveled: testPayload("e.g")})
-
-	expect := func(f filter, msgs ...string) string {
-		res := j.scanEntries(BacklogAllAvailable, f)
-		set := make(map[string]bool)
-		for _, entry := range res {
-			set[strings.Join(entry.leveled.messages, "\n")] = true
-		}
-
-		for _, want := range msgs {
-			if !set[want] {
-				return fmt.Sprintf("missing entry %q", want)
-			}
-		}
-		return ""
-	}
-
-	if res := expect(filterAll(), "a", "a.b", "a.b.c", "a.b.d", "e.f", "e.g"); res != "" {
-		t.Fatalf("All: %s", res)
-	}
-	if res := expect(filterSubtree("a"), "a", "a.b", "a.b.c", "a.b.d"); res != "" {
-		t.Fatalf("Subtree(a): %s", res)
-	}
-	if res := expect(filterSubtree("a.b"), "a.b", "a.b.c", "a.b.d"); res != "" {
-		t.Fatalf("Subtree(a.b): %s", res)
-	}
-	if res := expect(filterSubtree("e"), "e.f", "e.g"); res != "" {
-		t.Fatalf("Subtree(a.b): %s", res)
-	}
-}
-
-func TestDN_Shorten(t *testing.T) {
-	for i, te := range []struct {
-		input  string
-		maxLen int
-		want   string
-	}{
-		{"root.role.controlplane.launcher.consensus.autopromoter", 20, "cplane autopromoter"},
-		{"networking.interfaces", 20, "net ifaces"},
-		{"hostsfile", 20, "hostsfile"},
-		{"root.dhcp-server", 20, "dhcp-server"},
-		{"root.role.kubernetes.run.kubernetes.apiserver", 20, "k8s apiserver"},
-		{"some.very.long.dn.that.cant.be.shortened", 20, "...cant be shortened"},
-		{"network.interfaces.dhcp", 20, "net ifaces dhcp"},
-	} {
-		got := DN(te.input).Shorten(MetropolisShortenDict, te.maxLen)
-		if len(got) > te.maxLen {
-			t.Errorf("case %d: output %q too long, got %d bytes, wanted %d", i, got, len(got), te.maxLen)
-		} else {
-			if te.want != got {
-				t.Errorf("case %d: wanted %q, got %q", i, te.want, got)
-			}
-		}
-	}
-}
diff --git a/metropolis/pkg/logtree/klog.go b/metropolis/pkg/logtree/klog.go
deleted file mode 100644
index 9d9b1ee..0000000
--- a/metropolis/pkg/logtree/klog.go
+++ /dev/null
@@ -1,214 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logtree
-
-import (
-	"fmt"
-	"io"
-	"regexp"
-	"strconv"
-	"strings"
-	"time"
-
-	"source.monogon.dev/metropolis/pkg/logbuffer"
-)
-
-// KLogParser returns an io.WriteCloser to which raw logging from a klog emitter
-// can be piped. It will attempt to parse all lines from this log as
-// glog/klog-style entries, and pass them over to a LeveledLogger as if they were
-// emitted locally.
-//
-// This allows for piping in external processes that emit klog logging into a
-// logtree, leading to niceties like transparently exposing the log severity or
-// source file/line.
-//
-// One caveat, however, is that V-leveled logs will not be translated
-// appropriately - anything that the klog-emitter pumps out as Info will be
-// directly ingested as Info logging. There is no way to work around this.
-//
-// Another important limitation is that any written line is interpreted as having
-// happened recently (ie. within one hour of the time of execution of this
-// function). This is important as klog/glog-formatted loglines don't have a year
-// attached, so we have to infer it based on the current timestamp (note: parsed
-// lines do not necessarily have their year aleays equal to the current year, as
-// the code handles the edge case of parsing a line from the end of a previous
-// year at the beginning of the next).
-func KLogParser(logger LeveledLogger) io.WriteCloser {
-	p, ok := logger.(*leveledPublisher)
-	if !ok {
-		// Fail fast, as this is a programming error.
-		panic("Expected *leveledPublisher in LeveledLogger from supervisor")
-	}
-
-	k := &klogParser{
-		publisher: p,
-	}
-	// klog seems to have no line length limit. Let's assume some sane sort of default.
-	k.buffer = logbuffer.NewLineBuffer(1024, k.consumeLine)
-	return k
-}
-
-type klogParser struct {
-	publisher *leveledPublisher
-	buffer    *logbuffer.LineBuffer
-}
-
-func (k *klogParser) Write(p []byte) (n int, err error) {
-	return k.buffer.Write(p)
-}
-
-// Close must be called exactly once after the parser is done being used. It will
-// pipe any leftover data in its write buffer as one last line to parse.
-func (k *klogParser) Close() error {
-	return k.buffer.Close()
-}
-
-// consumeLine is called by the internal LineBuffer any time a new line is fully
-// written.
-func (k *klogParser) consumeLine(l *logbuffer.Line) {
-	p := parse(time.Now(), l.Data)
-	if p == nil {
-		// We could instead emit that line as a raw log - however, this would lead to
-		// interleaving raw logging and leveled logging.
-		k.publisher.Errorf("Invalid klog line: %s", l.Data)
-		return
-	}
-	// TODO(q3k): should this be exposed as an API on LeveledLogger? How much should
-	// we permit library users to 'fake' logs? This would also permit us to get rid
-	// of the type assertion in KLogParser().
-	e := &entry{
-		origin:  k.publisher.node.dn,
-		leveled: p,
-	}
-	k.publisher.node.tree.journal.append(e)
-	k.publisher.node.tree.journal.notify(e)
-}
-
-var (
-	// reKLog matches and parses klog/glog-formatted log lines. Format: I0312
-	// 14:20:04.240540     204 shared_informer.go:247] Caches are synced for attach
-	// detach
-	reKLog = regexp.MustCompile(`^([IEWF])(\d{4})\s+(\d{2}:\d{2}:\d{2}(\.\d+)?)\s+(\d+)\s+([^:]+):(\d+)]\s+(.+)$`)
-)
-
-// parse attempts to parse a klog-formatted line. Returns nil if the line
-// couldn't have been parsed successfully.
-func parse(now time.Time, s string) *LeveledPayload {
-	parts := reKLog.FindStringSubmatch(s)
-	if parts == nil {
-		return nil
-	}
-
-	severityS := parts[1]
-	date := parts[2]
-	timestamp := parts[3]
-	pid := parts[5]
-	file := parts[6]
-	lineS := parts[7]
-	message := parts[8]
-
-	var severity Severity
-	switch severityS {
-	case "I":
-		severity = INFO
-	case "W":
-		severity = WARNING
-	case "E":
-		severity = ERROR
-	case "F":
-		severity = FATAL
-	default:
-		return nil
-	}
-
-	// Possible race due to klog's/glog's format not containing a year.
-	// On 2020/12/31 at 23:59:59.99999 a klog logger emits this line:
-	//
-	//   I1231 23:59:59.99999 1 example.go:10] It's almost 2021! Hooray.
-	//
-	// Then, if this library parses that line at 2021/01/01 00:00:00.00001, the
-	// time will be interpreted as:
-	//
-	//   2021/12/31 23:59:59
-	//
-	// So around one year in the future. We attempt to fix this case further down in
-	// this function.
-	year := now.Year()
-	ts, err := parseKLogTime(year, date, timestamp)
-	if err != nil {
-		return nil
-	}
-
-	// Attempt to fix the aforementioned year-in-the-future issue.
-	if ts.After(now) && ts.Sub(now) > time.Hour {
-		// Parsed timestamp is in the future. How close is it to One-Year-From-Now?
-		oyfn := now.Add(time.Hour * 24 * 365)
-		dOyfn := ts.Sub(oyfn)
-		// Let's make sure Duration-To-One-Year-From-Now is always positive. This
-		// simplifies the rest of the checks and papers over some possible edge cases.
-		if dOyfn < 0 {
-			dOyfn = -dOyfn
-		}
-
-		// Okay, is that very close? Then the issue above happened and we should
-		// attempt to reparse it with last year. We can't just manipulate the date we
-		// already have, as it's difficult to 'subtract one year'.
-		if dOyfn < (time.Hour * 24 * 2) {
-			ts, err = parseKLogTime(year-1, date, timestamp)
-			if err != nil {
-				return nil
-			}
-		} else {
-			// Otherwise, we received some seriously time traveling log entry. Abort.
-			return nil
-		}
-	}
-
-	line, err := strconv.Atoi(lineS)
-	if err != nil {
-		return nil
-	}
-
-	// The PID is discarded.
-	_ = pid
-
-	// Finally we have extracted all the data from the line. Inject into the log
-	// publisher.
-	return &LeveledPayload{
-		timestamp: ts,
-		severity:  severity,
-		messages:  []string{message},
-		file:      file,
-		line:      line,
-	}
-}
-
-// parseKLogTime parses a klog date and time (eg. "0314", "12:13:14.12345") into
-// a time.Time happening at a given year.
-func parseKLogTime(year int, d, t string) (time.Time, error) {
-	var layout string
-	if strings.Contains(t, ".") {
-		layout = "2006 0102 15:04:05.000000"
-	} else {
-		layout = "2006 0102 15:04:05"
-	}
-	// Make up a string that contains the current year. This permits us to parse
-	// fully into an actual timestamp.
-	// TODO(q3k): add a timezone? This currently behaves as UTC, which is probably
-	// what we want, but we should formalize this.
-	return time.Parse(layout, fmt.Sprintf("%d %s %s", year, d, t))
-}
diff --git a/metropolis/pkg/logtree/klog_test.go b/metropolis/pkg/logtree/klog_test.go
deleted file mode 100644
index d53df3f..0000000
--- a/metropolis/pkg/logtree/klog_test.go
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logtree
-
-import (
-	"testing"
-	"time"
-
-	"github.com/google/go-cmp/cmp"
-)
-
-func TestParse(t *testing.T) {
-	// Injected 'now'. Used to make these tests reproducible and to allow for
-	// testing the log-across-year edgecase.
-	// Fri 12 Mar 2021 03:46:26 PM UTC
-	now := time.Unix(1615563986, 123456789)
-	// Sat 01 Jan 2000 12:00:01 AM UTC
-	nowNewYear := time.Unix(946684801, 0)
-
-	for i, te := range []struct {
-		now  time.Time
-		line string
-		want *LeveledPayload
-	}{
-		// 0: Simple case: everything should parse correctly.
-		{now, "E0312 14:20:04.240540    204 shared_informer.go:247] Caches are synced for attach detach", &LeveledPayload{
-			messages:  []string{"Caches are synced for attach detach"},
-			timestamp: time.Date(2021, 03, 12, 14, 20, 4, 240540000, time.UTC),
-			severity:  ERROR,
-			file:      "shared_informer.go",
-			line:      247,
-		}},
-		// 1: Mumbling line, should fail.
-		{now, "Application starting up...", nil},
-		// 2: Empty line, should fail.
-		{now, "", nil},
-		// 3: Line from the future, should fail.
-		{now, "I1224 14:20:04.240540    204 john_titor.go:247] I'm sorry, what day is it today? Uuuh, and what year?", nil},
-		// 4: Log-across-year edge case. The log was emitted right before a year
-		//    rollover, and parsed right after it. It should be attributed to the
-		//    previous year.
-		{nowNewYear, "I1231 23:59:43.123456    123 fry.go:123] Here's to another lousy millenium!", &LeveledPayload{
-			messages:  []string{"Here's to another lousy millenium!"},
-			timestamp: time.Date(1999, 12, 31, 23, 59, 43, 123456000, time.UTC),
-			severity:  INFO,
-			file:      "fry.go",
-			line:      123,
-		}},
-		// 5: Invalid severity, should fail.
-		{now, "D0312 14:20:04.240540    204 shared_informer.go:247] Caches are synced for attach detach", nil},
-		// 6: Invalid time, should fail.
-		{now, "D0312 25:20:04.240540    204 shared_informer.go:247] Caches are synced for attach detach", nil},
-		// 7: Simple case without sub-second timing: everything should parse correctly
-		{now, "E0312 14:20:04 204 shared_informer.go:247] Caches are synced for attach detach", &LeveledPayload{
-			messages:  []string{"Caches are synced for attach detach"},
-			timestamp: time.Date(2021, 03, 12, 14, 20, 4, 0, time.UTC),
-			severity:  ERROR,
-			file:      "shared_informer.go",
-			line:      247,
-		}},
-	} {
-		got := parse(te.now, te.line)
-		if diff := cmp.Diff(te.want, got, cmp.AllowUnexported(LeveledPayload{})); diff != "" {
-			t.Errorf("%d: mismatch (-want +got):\n%s", i, diff)
-		}
-	}
-}
diff --git a/metropolis/pkg/logtree/kmsg.go b/metropolis/pkg/logtree/kmsg.go
deleted file mode 100644
index 03bb6ff..0000000
--- a/metropolis/pkg/logtree/kmsg.go
+++ /dev/null
@@ -1,141 +0,0 @@
-//go:build linux
-// +build linux
-
-package logtree
-
-import (
-	"bytes"
-	"context"
-	"errors"
-	"fmt"
-	"os"
-	"strconv"
-	"strings"
-	"time"
-
-	"golang.org/x/sys/unix"
-)
-
-const (
-	loglevelEmergency = 0
-	loglevelAlert     = 1
-	loglevelCritical  = 2
-	loglevelError     = 3
-	loglevelWarning   = 4
-	loglevelNotice    = 5
-	loglevelInfo      = 6
-	loglevelDebug     = 7
-)
-
-// KmsgPipe pipes logs from the kernel kmsg interface at /dev/kmsg into the
-// given logger.
-func KmsgPipe(ctx context.Context, lt LeveledLogger) error {
-	publisher, ok := lt.(*leveledPublisher)
-	if !ok {
-		// Fail fast, as this is a programming error.
-		panic("Expected *leveledPublisher in LeveledLogger from supervisor")
-	}
-	kmsgFile, err := os.Open("/dev/kmsg")
-	if err != nil {
-		return err
-	}
-	defer kmsgFile.Close()
-	var lastOverflow time.Time
-	// PRINTK_MESSAGE_MAX in @linux//kernel/printk:internal.h
-	linebuf := make([]byte, 2048)
-	for {
-		n, err := kmsgFile.Read(linebuf)
-		// Best-effort, in Go it is not possible to cancel a Read on-demand.
-		select {
-		case <-ctx.Done():
-			return ctx.Err()
-		default:
-		}
-		if errors.Is(err, unix.EPIPE) {
-			now := time.Now()
-			// Rate-limit to 1 per second
-			if lastOverflow.Add(1 * time.Second).Before(now) {
-				lt.Warning("Lost messages due to kernel ring buffer overflow")
-				lastOverflow = now
-			}
-			continue
-		}
-		if err != nil {
-			return fmt.Errorf("while reading from kmsg: %w", err)
-		}
-		var monotonicRaw unix.Timespec
-		if err := unix.ClockGettime(unix.CLOCK_MONOTONIC_RAW, &monotonicRaw); err != nil {
-			return fmt.Errorf("while getting monotonic timestamp: %w", err)
-		}
-		p := parseKmsg(time.Now(), time.Duration(monotonicRaw.Nano())*time.Nanosecond, linebuf[:n])
-		if p == nil {
-			continue
-		}
-		e := &entry{
-			origin:  publisher.node.dn,
-			leveled: p,
-		}
-		publisher.node.tree.journal.append(e)
-		publisher.node.tree.journal.notify(e)
-	}
-}
-
-// See https://www.kernel.org/doc/Documentation/ABI/testing/dev-kmsg for format.
-func parseKmsg(now time.Time, monotonicSinceBoot time.Duration, data []byte) *LeveledPayload {
-	meta, message, ok := bytes.Cut(data, []byte(";"))
-	if !ok {
-		// Unknown message format
-		return nil
-	}
-	endOfMsgIdx := bytes.IndexByte(message, '\n')
-	if endOfMsgIdx == -1 {
-		return nil
-	}
-	message = message[:endOfMsgIdx]
-	metaFields := strings.FieldsFunc(string(meta), func(r rune) bool { return r == ',' })
-	if len(metaFields) < 4 {
-		return nil
-	}
-	loglevel, err := strconv.ParseUint(metaFields[0], 10, 64)
-	if err != nil {
-		return nil
-	}
-
-	monotonicMicro, err := strconv.ParseUint(metaFields[2], 10, 64)
-	if err != nil {
-		return nil
-	}
-
-	// Kmsg entries are timestamped with CLOCK_MONOTONIC_RAW, a clock which does
-	// not have a direct correspondence with civil time (UTC). To assign best-
-	// effort timestamps, use the current monotonic clock reading to determine
-	// the elapsed time between the kmsg entry and now on the monotonic clock.
-	// This does not correspond well to elapsed UTC time on longer timescales as
-	// CLOCK_MONOTONIC_RAW is not trimmed to run true to UTC, but up to in the
-	// order of hours this is close. As the pipe generally processes messages
-	// very close to their creation date, the elapsed time and thus the accrued
-	// error is extremely small.
-	monotonic := time.Duration(monotonicMicro) * time.Microsecond
-
-	monotonicFromNow := monotonic - monotonicSinceBoot
-
-	var severity Severity
-	switch loglevel {
-	case loglevelEmergency, loglevelAlert:
-		severity = FATAL
-	case loglevelCritical, loglevelError:
-		severity = ERROR
-	case loglevelWarning:
-		severity = WARNING
-	case loglevelNotice, loglevelInfo, loglevelDebug:
-		severity = INFO
-	default:
-		severity = INFO
-	}
-
-	return &LeveledPayload{
-		timestamp: now.Add(monotonicFromNow),
-		severity:  severity,
-		messages:  []string{string(message)},
-	}
-}
diff --git a/metropolis/pkg/logtree/kmsg_test.go b/metropolis/pkg/logtree/kmsg_test.go
deleted file mode 100644
index e2faf82..0000000
--- a/metropolis/pkg/logtree/kmsg_test.go
+++ /dev/null
@@ -1,43 +0,0 @@
-//go:build linux
-// +build linux
-
-package logtree
-
-import (
-	"testing"
-	"time"
-
-	"github.com/google/go-cmp/cmp"
-)
-
-func TestParseKmsg(t *testing.T) {
-	now := time.Unix(1691593045, 128027944)
-	nowMonotonic := time.Duration(1501096434537722)
-
-	for i, te := range []struct {
-		line string
-		want *LeveledPayload
-	}{
-		// Empty line
-		{"", nil},
-		// Unknown format
-		{"Not a valid line", nil},
-		// Normal entry
-		{"6,30962,1501094342185,-;test\n", &LeveledPayload{
-			messages:  []string{"test"},
-			timestamp: time.Date(2023, 8, 9, 14, 57, 23, 35675222, time.UTC),
-			severity:  INFO,
-		}},
-		// With metadata and different severity
-		{"4,30951,1486884175312,-;nvme nvme2: starting error recovery\n SUBSYSTEM=nvme\n DEVICE=c239:2\n", &LeveledPayload{
-			messages:  []string{"nvme nvme2: starting error recovery"},
-			timestamp: time.Date(2023, 8, 9, 11, 00, 32, 868802222, time.UTC),
-			severity:  WARNING,
-		}},
-	} {
-		got := parseKmsg(now, nowMonotonic, []byte(te.line))
-		if diff := cmp.Diff(te.want, got, cmp.AllowUnexported(LeveledPayload{})); diff != "" {
-			t.Errorf("%d: mismatch (-want +got):\n%s", i, diff)
-		}
-	}
-}
diff --git a/metropolis/pkg/logtree/leveled.go b/metropolis/pkg/logtree/leveled.go
deleted file mode 100644
index 0facbb1..0000000
--- a/metropolis/pkg/logtree/leveled.go
+++ /dev/null
@@ -1,175 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logtree
-
-import (
-	"fmt"
-
-	lpb "source.monogon.dev/metropolis/pkg/logtree/proto"
-)
-
-// LeveledLogger is a generic interface for glog-style logging. There are four
-// hardcoded log severities, in increasing order: INFO, WARNING, ERROR, FATAL.
-// Logging at a certain severity level logs not only to consumers expecting data at
-// that severity level, but also all lower severity levels. For example, an ERROR
-// log will also be passed to consumers looking at INFO or WARNING logs.
-type LeveledLogger interface {
-	// Info logs at the INFO severity. Arguments are handled in the manner of
-	// fmt.Print, a terminating newline is added if missing.
-	Info(args ...interface{})
-	// Infof logs at the INFO severity. Arguments are handled in the manner of
-	// fmt.Printf, a terminating newline is added if missing.
-	Infof(format string, args ...interface{})
-
-	// Warning logs at the WARNING severity. Arguments are handled in the manner of
-	// fmt.Print, a terminating newline is added if missing.
-	Warning(args ...interface{})
-	// Warningf logs at the WARNING severity. Arguments are handled in the manner of
-	// fmt.Printf, a terminating newline is added if missing.
-	Warningf(format string, args ...interface{})
-
-	// Error logs at the ERROR severity. Arguments are handled in the manner of
-	// fmt.Print, a terminating newline is added if missing.
-	Error(args ...interface{})
-	// Errorf logs at the ERROR severity. Arguments are handled in the manner of
-	// fmt.Printf, a terminating newline is added if missing.
-	Errorf(format string, args ...interface{})
-
-	// Fatal logs at the FATAL severity and aborts the current program. Arguments are
-	// handled in the manner of fmt.Print, a terminating newline is added if missing.
-	Fatal(args ...interface{})
-	// Fatalf logs at the FATAL severity and aborts the current program. Arguments are
-	// handled in the manner of fmt.Printf, a terminating newline is added if missing.
-	Fatalf(format string, args ...interface{})
-
-	// V returns a VerboseLeveledLogger at a given verbosity level. These verbosity
-	// levels can be dynamically set and unset on a package-granular level by consumers
-	// of the LeveledLogger logs. The returned value represents whether logging at the
-	// given verbosity level was active at that time, and as such should not be a long-
-	// lived object in programs. This construct is further refered to as 'V-logs'.
-	V(level VerbosityLevel) VerboseLeveledLogger
-
-	// WithAddedStackDepth returns the same LeveledLogger, but adjusted with an
-	// additional 'extra stack depth' which will be used to skip a given number of
-	// stack/call frames when determining the location where the error originated.
-	// For example, WithStackDepth(1) will return a logger that will skip one
-	// stack/call frame. Then, with function foo() calling function helper() which
-	// in turns call l.Infof(), the log line will be emitted with the call site of
-	// helper() within foo(), instead of the default behaviour of logging the
-	// call site of Infof() within helper().
-	//
-	// This is useful for functions which somehow wrap loggers in helper functions,
-	// for example to expose a slightly different API.
-	WithAddedStackDepth(depth int) LeveledLogger
-}
-
-// VerbosityLevel is a verbosity level defined for V-logs. This can be changed
-// programmatically per Go package. When logging at a given VerbosityLevel V, the
-// current level must be equal or higher to V for the logs to be recorded.
-// Conversely, enabling a V-logging at a VerbosityLevel V also enables all logging
-// at lower levels [Int32Min .. (V-1)].
-type VerbosityLevel int32
-
-type VerboseLeveledLogger interface {
-	// Enabled returns if this level was enabled. If not enabled, all logging into this
-	// logger will be discarded immediately. Thus, Enabled() can be used to check the
-	// verbosity level before performing any logging:
-	//    if l.V(3).Enabled() { l.Info("V3 is enabled") }
-	// or, in simple cases, the convenience function .Info can be used:
-	//    l.V(3).Info("V3 is enabled")
-	// The second form is shorter and more convenient, but more expensive, as its
-	// arguments are always evaluated.
-	Enabled() bool
-	// Info is the equivalent of a LeveledLogger's Info call, guarded by whether this
-	// VerboseLeveledLogger is enabled.
-	Info(args ...interface{})
-	// Infof is the equivalent of a LeveledLogger's Infof call, guarded by whether this
-	// VerboseLeveledLogger is enabled.
-	Infof(format string, args ...interface{})
-}
-
-// Severity is one of the severities as described in LeveledLogger.
-type Severity string
-
-const (
-	INFO    Severity = "I"
-	WARNING Severity = "W"
-	ERROR   Severity = "E"
-	FATAL   Severity = "F"
-)
-
-var (
-	// SeverityAtLeast maps a given severity to a list of severities that at that
-	// severity or higher. In other words, SeverityAtLeast[X] returns a list of
-	// severities that might be seen in a log at severity X.
-	SeverityAtLeast = map[Severity][]Severity{
-		INFO:    {INFO, WARNING, ERROR, FATAL},
-		WARNING: {WARNING, ERROR, FATAL},
-		ERROR:   {ERROR, FATAL},
-		FATAL:   {FATAL},
-	}
-)
-
-func (s Severity) AtLeast(other Severity) bool {
-	for _, el := range SeverityAtLeast[other] {
-		if el == s {
-			return true
-		}
-	}
-	return false
-}
-
-// Valid returns whether true if this severity is one of the known levels
-// (INFO, WARNING, ERROR or FATAL), false otherwise.
-func (s Severity) Valid() bool {
-	switch s {
-	case INFO, WARNING, ERROR, FATAL:
-		return true
-	default:
-		return false
-	}
-}
-
-func SeverityFromProto(s lpb.LeveledLogSeverity) (Severity, error) {
-	switch s {
-	case lpb.LeveledLogSeverity_INFO:
-		return INFO, nil
-	case lpb.LeveledLogSeverity_WARNING:
-		return WARNING, nil
-	case lpb.LeveledLogSeverity_ERROR:
-		return ERROR, nil
-	case lpb.LeveledLogSeverity_FATAL:
-		return FATAL, nil
-	default:
-		return "", fmt.Errorf("unknown severity value %d", s)
-	}
-}
-
-func (s Severity) ToProto() lpb.LeveledLogSeverity {
-	switch s {
-	case INFO:
-		return lpb.LeveledLogSeverity_INFO
-	case WARNING:
-		return lpb.LeveledLogSeverity_WARNING
-	case ERROR:
-		return lpb.LeveledLogSeverity_ERROR
-	case FATAL:
-		return lpb.LeveledLogSeverity_FATAL
-	default:
-		return lpb.LeveledLogSeverity_INVALID
-	}
-}
diff --git a/metropolis/pkg/logtree/leveled_payload.go b/metropolis/pkg/logtree/leveled_payload.go
deleted file mode 100644
index b4a0630..0000000
--- a/metropolis/pkg/logtree/leveled_payload.go
+++ /dev/null
@@ -1,200 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logtree
-
-import (
-	"fmt"
-	"strconv"
-	"strings"
-	"time"
-
-	tpb "google.golang.org/protobuf/types/known/timestamppb"
-
-	lpb "source.monogon.dev/metropolis/pkg/logtree/proto"
-)
-
-// LeveledPayload is a log entry for leveled logs (as per leveled.go). It contains
-// the input to these calls (severity and message split into newline-delimited
-// messages) and additional metadata that would be usually seen in a text
-// representation of a leveled log entry.
-type LeveledPayload struct {
-	// messages is the list of messages contained in this payload. This list is built
-	// from splitting up the given message from the user by newline.
-	messages []string
-	// timestamp is the time at which this message was emitted.
-	timestamp time.Time
-	// severity is the leveled Severity at which this message was emitted.
-	severity Severity
-	// file is the filename of the caller that emitted this message.
-	file string
-	// line is the line number within the file of the caller that emitted this message.
-	line int
-}
-
-// String returns a canonical representation of this payload as a single string
-// prefixed with metadata. If the original message was logged with newlines, this
-// representation will also contain newlines, with each original message part
-// prefixed by the metadata. For an alternative call that will instead return a
-// canonical prefix and a list of lines in the message, see Strings().
-func (p *LeveledPayload) String() string {
-	prefix, lines := p.Strings()
-	res := make([]string, len(p.messages))
-	for i, line := range lines {
-		res[i] = fmt.Sprintf("%s%s", prefix, line)
-	}
-	return strings.Join(res, "\n")
-}
-
-// Strings returns the canonical representation of this payload split into a
-// prefix and all lines that were contained in the original message. This is
-// meant to be displayed to the user by showing the prefix before each line,
-// concatenated together - possibly in a table form with the prefixes all
-// unified with a rowspan- like mechanism.
-//
-// For example, this function can return:
-//
-//	prefix = "I1102 17:20:06.921395 foo.go:42] "
-//	lines = []string{"current tags:", " - one", " - two"}
-//
-// With this data, the result should be presented to users this way in text form:
-// I1102 17:20:06.921395 foo.go:42] current tags:
-// I1102 17:20:06.921395 foo.go:42]  - one
-// I1102 17:20:06.921395 foo.go:42]  - two
-//
-// Or, in a table layout:
-// .-----------------------------------------------------------.
-// | I1102 17:20:06.921395     0 foo.go:42] : current tags:    |
-// |                                        :------------------|
-// |                                        :  - one           |
-// |                                        :------------------|
-// |                                        :  - two           |
-// '-----------------------------------------------------------'
-func (p *LeveledPayload) Strings() (prefix string, lines []string) {
-	_, month, day := p.timestamp.Date()
-	hour, minute, second := p.timestamp.Clock()
-	nsec := p.timestamp.Nanosecond() / 1000
-
-	// Same format as in glog, but without treadid.
-	// Lmmdd hh:mm:ss.uuuuuu file:line]
-	// TODO(q3k): rewrite this to printf-less code.
-	prefix = fmt.Sprintf("%s%02d%02d %02d:%02d:%02d.%06d %s:%d] ", p.severity, month, day, hour, minute, second, nsec, p.file, p.line)
-
-	lines = p.messages
-	return
-}
-
-// Messages returns the inner message lines of this entry, ie. what was passed
-// to the actual logging method, but split by newlines.
-func (p *LeveledPayload) Messages() []string { return p.messages }
-
-func (p *LeveledPayload) MessagesJoined() string { return strings.Join(p.messages, "\n") }
-
-// Timestamp returns the time at which this entry was logged.
-func (p *LeveledPayload) Timestamp() time.Time { return p.timestamp }
-
-// Location returns a string in the form of file_name:line_number that shows the
-// origin of the log entry in the program source.
-func (p *LeveledPayload) Location() string { return fmt.Sprintf("%s:%d", p.file, p.line) }
-
-// Severity returns the Severity with which this entry was logged.
-func (p *LeveledPayload) Severity() Severity { return p.severity }
-
-// Proto converts a LeveledPayload to protobuf format.
-func (p *LeveledPayload) Proto() *lpb.LogEntry_Leveled {
-	return &lpb.LogEntry_Leveled{
-		Lines:     p.Messages(),
-		Timestamp: tpb.New(p.Timestamp()),
-		Severity:  p.Severity().ToProto(),
-		Location:  p.Location(),
-	}
-}
-
-// LeveledPayloadFromProto parses a protobuf message into the internal format.
-func LeveledPayloadFromProto(p *lpb.LogEntry_Leveled) (*LeveledPayload, error) {
-	severity, err := SeverityFromProto(p.Severity)
-	if err != nil {
-		return nil, fmt.Errorf("could not convert severity: %w", err)
-	}
-	parts := strings.Split(p.Location, ":")
-	if len(parts) != 2 {
-		return nil, fmt.Errorf("invalid location, must be two :-delimited parts, is %d parts", len(parts))
-	}
-	file := parts[0]
-	line, err := strconv.Atoi(parts[1])
-	if err != nil {
-		return nil, fmt.Errorf("invalid location line number: %w", err)
-	}
-	return &LeveledPayload{
-		messages:  p.Lines,
-		timestamp: p.Timestamp.AsTime(),
-		severity:  severity,
-		file:      file,
-		line:      line,
-	}, nil
-}
-
-// ExternalLeveledPayload is a LeveledPayload received from an external source,
-// eg. from parsing the logging output of third-party programs. It can be
-// converted into a LeveledPayload and inserted into a leveled logger, but will
-// be sanitized before that, ensuring that potentially buggy
-// emitters/converters do not end up polluting the leveled logger data.
-//
-// This type should be used only when inserting data from external systems, not
-// by code that just wishes to log things. In the future, data inserted this
-// way might be explicitly marked as tainted so operators can understand that
-// parts of this data might not give the same guarantees as the log entries
-// emitted by the native LeveledLogger API.
-type ExternalLeveledPayload struct {
-	// Log line. If any newlines are found, they will split the message into
-	// multiple messages within LeveledPayload. Empty messages are accepted
-	// verbatim.
-	Message string
-	// Timestamp when this payload was emitted according to its source. If not
-	// given, will default to the time of conversion to LeveledPayload.
-	Timestamp time.Time
-	// Log severity. If invalid or unset will default to INFO.
-	Severity Severity
-	// File name of originating code. Defaults to "unknown" if not set.
-	File string
-	// Line in File. Zero indicates the line is not known.
-	Line int
-}
-
-// sanitize the given ExternalLeveledPayload by creating a corresponding
-// LeveledPayload. The original object is unaltered.
-func (e *ExternalLeveledPayload) sanitize() *LeveledPayload {
-	l := &LeveledPayload{
-		messages:  strings.Split(e.Message, "\n"),
-		timestamp: e.Timestamp,
-		severity:  e.Severity,
-		file:      e.File,
-		line:      e.Line,
-	}
-	if l.timestamp.IsZero() {
-		l.timestamp = time.Now()
-	}
-	if !l.severity.Valid() {
-		l.severity = INFO
-	}
-	if l.file == "" {
-		l.file = "unknown"
-	}
-	if l.line < 0 {
-		l.line = 0
-	}
-	return l
-}
diff --git a/metropolis/pkg/logtree/logtree.go b/metropolis/pkg/logtree/logtree.go
deleted file mode 100644
index a773b7b..0000000
--- a/metropolis/pkg/logtree/logtree.go
+++ /dev/null
@@ -1,158 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logtree
-
-import (
-	"fmt"
-	"strings"
-	"sync"
-
-	"source.monogon.dev/metropolis/pkg/logbuffer"
-)
-
-// LogTree is a tree-shaped logging system. For more information, see the package-
-// level documentation.
-type LogTree struct {
-	// journal is the tree's journal, storing all log data and managing subscribers.
-	journal *journal
-	// root is the root node of the actual tree of the log tree. The nodes contain per-
-	// DN configuration options, notably the current verbosity level of that DN.
-	root *node
-}
-
-func New() *LogTree {
-	lt := &LogTree{
-		journal: newJournal(),
-	}
-	lt.root = newNode(lt, "")
-	return lt
-}
-
-// node represents a given DN as a discrete 'logger'. It implements the
-// LeveledLogger interface for log publishing, entries from which it passes over to
-// the logtree's journal.
-type node struct {
-	// dn is the DN which this node represents (or "" if this is the root node).
-	dn DN
-	// tree is the LogTree to which this node belongs.
-	tree *LogTree
-	// verbosity is the current verbosity level of this DN/node, affecting .V(n)
-	// LeveledLogger calls
-	verbosity     VerbosityLevel
-	rawLineBuffer *logbuffer.LineBuffer
-
-	// mu guards children.
-	mu sync.Mutex
-	// children is a map of DN-part to a children node in the logtree. A DN-part is a
-	// string representing a part of the DN between the deliming dots, as returned by
-	// DN.Path.
-	children map[string]*node
-}
-
-// newNode returns a node at a given DN in the LogTree - but doesn't set up the
-// LogTree to insert it accordingly.
-func newNode(tree *LogTree, dn DN) *node {
-	n := &node{
-		dn:       dn,
-		tree:     tree,
-		children: make(map[string]*node),
-	}
-	// TODO(q3k): make this limit configurable. If this happens, or the default (1024) gets changes, max chunk size
-	// calculations when serving the logs (eg. in NodeDebugService) must reflect this.
-	n.rawLineBuffer = logbuffer.NewLineBuffer(1024, n.logRaw)
-	return n
-}
-
-// nodeByDN returns the LogTree node corresponding to a given DN. If either the
-// node or some of its parents do not exist they will be created as needed.
-func (l *LogTree) nodeByDN(dn DN) (*node, error) {
-	traversal, err := newTraversal(dn)
-	if err != nil {
-		return nil, fmt.Errorf("traversal failed: %w", err)
-	}
-	return traversal.execute(l.root), nil
-}
-
-// nodeTraversal represents a request to traverse the LogTree in search of a given
-// node by DN.
-type nodeTraversal struct {
-	// want is the DN of the node's that requested to be found.
-	want DN
-	// current is the path already taken to find the node, in the form of DN parts. It
-	// starts out as want.Parts() and progresses to become empty as the traversal
-	// continues.
-	current []string
-	// left is the path that's still needed to be taken in order to find the node, in
-	// the form of DN parts. It starts out empty and progresses to become wants.Parts()
-	// as the traversal continues.
-	left []string
-}
-
-// next adjusts the traversal's current/left slices to the next element of the
-// traversal, returns the part that's now being looked for (or "" if the traveral
-// is done) and the full DN of the element that's being looked for.
-//
-// For example, a traversal of foo.bar.baz will cause .next() to return the
-// following on each invocation:
-//   - part: foo, full: foo
-//   - part: bar, full: foo.bar
-//   - part: baz, full: foo.bar.baz
-//   - part: "",  full: foo.bar.baz
-func (t *nodeTraversal) next() (part string, full DN) {
-	if len(t.left) == 0 {
-		return "", t.want
-	}
-	part = t.left[0]
-	t.current = append(t.current, part)
-	t.left = t.left[1:]
-	full = DN(strings.Join(t.current, "."))
-	return
-}
-
-// newTraversal returns a nodeTraversal fora a given wanted DN.
-func newTraversal(dn DN) (*nodeTraversal, error) {
-	parts, err := dn.Path()
-	if err != nil {
-		return nil, err
-	}
-	return &nodeTraversal{
-		want: dn,
-		left: parts,
-	}, nil
-}
-
-// execute the traversal in order to find the node. This can only be called once
-// per traversal. Nodes will be created within the tree until the target node is
-// reached. Existing nodes will be reused. This is effectively an idempotent way of
-// accessing a node in the tree based on a traversal.
-func (t *nodeTraversal) execute(n *node) *node {
-	cur := n
-	for {
-		part, full := t.next()
-		if part == "" {
-			return cur
-		}
-
-		mu := &cur.mu
-		mu.Lock()
-		if _, ok := cur.children[part]; !ok {
-			cur.children[part] = newNode(n.tree, full)
-		}
-		cur = cur.children[part]
-		mu.Unlock()
-	}
-}
diff --git a/metropolis/pkg/logtree/logtree_access.go b/metropolis/pkg/logtree/logtree_access.go
deleted file mode 100644
index b601ea4..0000000
--- a/metropolis/pkg/logtree/logtree_access.go
+++ /dev/null
@@ -1,189 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logtree
-
-import (
-	"errors"
-	"sync/atomic"
-)
-
-// LogReadOption describes options for the LogTree.Read call.
-type LogReadOption struct {
-	withChildren               bool
-	withStream                 bool
-	withBacklog                int
-	onlyLeveled                bool
-	onlyRaw                    bool
-	leveledWithMinimumSeverity Severity
-}
-
-// WithChildren makes Read return/stream data for both a given DN and all its
-// children.
-func WithChildren() LogReadOption { return LogReadOption{withChildren: true} }
-
-// WithStream makes Read return a stream of data. This works alongside WithBacklog
-// to create a read-and-stream construct.
-func WithStream() LogReadOption { return LogReadOption{withStream: true} }
-
-// WithBacklog makes Read return already recorded log entries, up to count
-// elements.
-func WithBacklog(count int) LogReadOption { return LogReadOption{withBacklog: count} }
-
-// BacklogAllAvailable makes WithBacklog return all backlogged log data that
-// logtree possesses.
-const BacklogAllAvailable int = -1
-
-func OnlyRaw() LogReadOption { return LogReadOption{onlyRaw: true} }
-
-func OnlyLeveled() LogReadOption { return LogReadOption{onlyLeveled: true} }
-
-// LeveledWithMinimumSeverity makes Read return only log entries that are at least
-// at a given Severity. If only leveled entries are needed, OnlyLeveled must be
-// used. This is a no-op when OnlyRaw is used.
-func LeveledWithMinimumSeverity(s Severity) LogReadOption {
-	return LogReadOption{leveledWithMinimumSeverity: s}
-}
-
-// LogReader permits reading an already existing backlog of log entries and to
-// stream further ones.
-type LogReader struct {
-	// Backlog are the log entries already logged by LogTree. This will only be set if
-	// WithBacklog has been passed to Read.
-	Backlog []*LogEntry
-	// Stream is a channel of new entries as received live by LogTree. This will only
-	// be set if WithStream has been passed to Read. In this case, entries from this
-	// channel must be read as fast as possible by the consumer in order to prevent
-	// missing entries.
-	Stream <-chan *LogEntry
-	// done is channel used to signal (by closing) that the log consumer is not
-	// interested in more Stream data.
-	done chan<- struct{}
-	// missed is an atomic integer pointer that tells the subscriber how many messages
-	// in Stream they missed. This pointer is nil if no streaming has been requested.
-	missed *uint64
-}
-
-// Missed returns the amount of entries that were missed from Stream (as the
-// channel was not drained fast enough).
-func (l *LogReader) Missed() uint64 {
-	// No Stream.
-	if l.missed == nil {
-		return 0
-	}
-	return atomic.LoadUint64(l.missed)
-}
-
-// Close closes the LogReader's Stream. This must be called once the Reader does
-// not wish to receive streaming messages anymore.
-func (l *LogReader) Close() {
-	if l.done != nil {
-		close(l.done)
-	}
-}
-
-var (
-	ErrRawAndLeveled = errors.New("cannot return logs that are simultaneously OnlyRaw and OnlyLeveled")
-)
-
-// Read and/or stream entries from a LogTree. The returned LogReader is influenced
-// by the LogReadOptions passed, which influence whether the Read will return
-// existing entries, a stream, or both. In addition the options also dictate
-// whether only entries for that particular DN are returned, or for all sub-DNs as
-// well.
-func (l *LogTree) Read(dn DN, opts ...LogReadOption) (*LogReader, error) {
-	l.journal.mu.RLock()
-	defer l.journal.mu.RUnlock()
-
-	var backlog int
-	var stream bool
-	var recursive bool
-	var leveledSeverity Severity
-	var onlyRaw, onlyLeveled bool
-
-	for _, opt := range opts {
-		if opt.withBacklog > 0 || opt.withBacklog == BacklogAllAvailable {
-			backlog = opt.withBacklog
-		}
-		if opt.withStream {
-			stream = true
-		}
-		if opt.withChildren {
-			recursive = true
-		}
-		if opt.leveledWithMinimumSeverity != "" {
-			leveledSeverity = opt.leveledWithMinimumSeverity
-		}
-		if opt.onlyLeveled {
-			onlyLeveled = true
-		}
-		if opt.onlyRaw {
-			onlyRaw = true
-		}
-	}
-
-	if onlyLeveled && onlyRaw {
-		return nil, ErrRawAndLeveled
-	}
-
-	var filters []filter
-	if onlyLeveled {
-		filters = append(filters, filterOnlyLeveled)
-	}
-	if onlyRaw {
-		filters = append(filters, filterOnlyRaw)
-	}
-	if recursive {
-		filters = append(filters, filterSubtree(dn))
-	} else {
-		filters = append(filters, filterExact(dn))
-	}
-	if leveledSeverity != "" {
-		filters = append(filters, filterSeverity(leveledSeverity))
-	}
-
-	var entries []*entry
-	if backlog > 0 || backlog == BacklogAllAvailable {
-		if recursive {
-			entries = l.journal.scanEntries(backlog, filters...)
-		} else {
-			entries = l.journal.getEntries(backlog, dn, filters...)
-		}
-	}
-
-	var sub *subscriber
-	if stream {
-		sub = &subscriber{
-			// TODO(q3k): make buffer size configurable
-			dataC:   make(chan *LogEntry, 128),
-			doneC:   make(chan struct{}),
-			filters: filters,
-		}
-		l.journal.subscribe(sub)
-	}
-
-	lr := &LogReader{}
-	lr.Backlog = make([]*LogEntry, len(entries))
-	for i, entry := range entries {
-		lr.Backlog[i] = entry.external()
-	}
-	if stream {
-		lr.Stream = sub.dataC
-		lr.done = sub.doneC
-		lr.missed = &sub.missed
-	}
-	return lr, nil
-}
diff --git a/metropolis/pkg/logtree/logtree_entry.go b/metropolis/pkg/logtree/logtree_entry.go
deleted file mode 100644
index d1c700e..0000000
--- a/metropolis/pkg/logtree/logtree_entry.go
+++ /dev/null
@@ -1,259 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logtree
-
-import (
-	"fmt"
-	"strings"
-
-	"github.com/mitchellh/go-wordwrap"
-
-	"source.monogon.dev/metropolis/pkg/logbuffer"
-	lpb "source.monogon.dev/metropolis/pkg/logtree/proto"
-)
-
-// LogEntry contains a log entry, combining both leveled and raw logging into a
-// single stream of events. A LogEntry will contain exactly one of either
-// LeveledPayload or RawPayload.
-type LogEntry struct {
-	// If non-nil, this is a leveled logging entry.
-	Leveled *LeveledPayload
-	// If non-nil, this is a raw logging entry line.
-	Raw *logbuffer.Line
-	// DN from which this entry was logged.
-	DN DN
-}
-
-// String returns a canonical representation of this payload as a single string
-// prefixed with metadata. If the entry is a leveled log entry that originally was
-// logged with newlines this representation will also contain newlines, with each
-// original message part prefixed by the metadata. For an alternative call that
-// will instead return a canonical prefix and a list of lines in the message, see
-// Strings().
-func (l *LogEntry) String() string {
-	if l.Leveled != nil {
-		prefix, messages := l.Leveled.Strings()
-		res := make([]string, len(messages))
-		for i, m := range messages {
-			res[i] = fmt.Sprintf("%-32s %s%s", l.DN, prefix, m)
-		}
-		return strings.Join(res, "\n")
-	}
-	if l.Raw != nil {
-		return fmt.Sprintf("%-32s R %s", l.DN, l.Raw)
-	}
-	return "INVALID"
-}
-
-// ConciseString returns a concise representation of this log entry for
-// constrained environments, like TTY consoles.
-//
-// The output format is as follows:
-//
-//	  shortened dn I Hello there
-//	some component W Something went wrong
-//	  shortened dn I Goodbye there
-//	external stuff R I am en external process using raw logging.
-//
-// The above output is the result of calling ConciseString on three different
-// LogEntries.
-//
-// If maxWidth is greater than zero, word wrapping will be applied. For example,
-// with maxWidth set to 40:
-//
-//	     shortened I Hello there
-//	some component W Something went wrong and here are the very long details that
-//	               | describe this particular issue: according to all known laws of
-//	               | aviation, there is no way a bee should be able to fly.
-//	  shortened dn I Goodbye there
-//	external stuff R I am en external process using raw logging.
-//
-// The above output is also the result of calling ConciseString on three
-// different LogEntries.
-//
-// Multi-line log entries will emit 'continuation' lines (with '|') in the same
-// way as word wrapping does. That means that even with word wrapping disabled,
-// the result of this function might be multiline.
-//
-// The width of the first column (the 'shortened DN' column) is automatically
-// selected based on maxWidth. If maxWidth is less than 60, the column will be
-// omitted. For example, with maxWidth set to 20:
-//
-//	I Hello there
-//	W Something went wrong and here are the very long details that
-//	| describe this particular issue: according to all known laws of
-//	| aviation, there is no way a bee should be able to fly.
-//	I Goodbye there
-//	R I am en external process using raw logging.
-//
-// The given `dict` implements simple replacement rules for shortening the DN
-// parts of a log entry's DN. Some rules are hardcoded for Metropolis' DN tree.
-// If no extra shortening rules should be applied, dict can be set to ni// The
-// given `dict` implements simple replacement rules for shortening the DN parts
-// of a log entry's DN. Some rules are hardcoded for Metropolis' DN tree. If no
-// extra shortening rules should be applied, dict can be set to nil.
-func (l *LogEntry) ConciseString(dict ShortenDictionary, maxWidth int) string {
-	// Decide on a dnWidth.
-	dnWidth := 0
-	switch {
-	case maxWidth >= 80:
-		dnWidth = 20
-	case maxWidth >= 60:
-		dnWidth = 16
-	case maxWidth <= 0:
-		// No word wrapping.
-		dnWidth = 20
-	}
-
-	// Compute shortened DN, if needed.
-	sh := ""
-	if dnWidth > 0 {
-		sh = l.DN.Shorten(dict, dnWidth)
-		sh = fmt.Sprintf("%*s ", dnWidth, sh)
-	}
-
-	// Prefix of the first line emitted.
-	var prefix string
-	switch {
-	case l.Leveled != nil:
-		prefix = sh + string(l.Leveled.Severity()) + " "
-	case l.Raw != nil:
-		prefix = sh + "R "
-	}
-	// Prefix of rest of lines emitted.
-	continuationPrefix := strings.Repeat(" ", len(sh)) + "| "
-
-	// Collect lines based on the type of LogEntry.
-	var lines []string
-	collect := func(message string) {
-		if maxWidth > 0 {
-			message = wordwrap.WrapString(message, uint(maxWidth-len(prefix)))
-		}
-		for _, m2 := range strings.Split(message, "\n") {
-			if len(m2) == 0 {
-				continue
-			}
-			if len(lines) == 0 {
-				lines = append(lines, prefix+m2)
-			} else {
-				lines = append(lines, continuationPrefix+m2)
-			}
-		}
-	}
-	switch {
-	case l.Leveled != nil:
-		_, messages := l.Leveled.Strings()
-		for _, m := range messages {
-			collect(m)
-		}
-	case l.Raw != nil:
-		collect(l.Raw.String())
-	default:
-		return ""
-	}
-
-	return strings.Join(lines, "\n")
-}
-
-// Strings returns the canonical representation of this payload split into a
-// prefix and all lines that were contained in the original message. This is
-// meant to be displayed to the user by showing the prefix before each line,
-// concatenated together - possibly in a table form with the prefixes all
-// unified with a rowspan- like mechanism.
-//
-// For example, this function can return:
-//   prefix = "root.foo.bar                    I1102 17:20:06.921395     0 foo.go:42] "
-//   lines = []string{"current tags:", " - one", " - two"}
-//
-// With this data, the result should be presented to users this way in text form:
-// root.foo.bar                    I1102 17:20:06.921395 foo.go:42] current tags:
-// root.foo.bar                    I1102 17:20:06.921395 foo.go:42]  - one
-// root.foo.bar                    I1102 17:20:06.921395 foo.go:42]  - two
-//
-// Or, in a table layout:
-// .----------------------------------------------------------------------.
-// | root.foo.bar     I1102 17:20:06.921395 foo.go:42] : current tags:    |
-// |                                                   :------------------|
-// |                                                   :  - one           |
-// |                                                   :------------------|
-// |                                                   :  - two           |
-// '----------------------------------------------------------------------'
-
-func (l *LogEntry) Strings() (prefix string, lines []string) {
-	if l.Leveled != nil {
-		prefix, messages := l.Leveled.Strings()
-		prefix = fmt.Sprintf("%-32s %s", l.DN, prefix)
-		return prefix, messages
-	}
-	if l.Raw != nil {
-		return fmt.Sprintf("%-32s R ", l.DN), []string{l.Raw.Data}
-	}
-	return "INVALID ", []string{"INVALID"}
-}
-
-// Proto converts this LogEntry to proto. Returned value may be nil if given
-// LogEntry is invalid, eg. contains neither a Raw nor Leveled entry.
-func (l *LogEntry) Proto() *lpb.LogEntry {
-	p := &lpb.LogEntry{
-		Dn: string(l.DN),
-	}
-	switch {
-	case l.Leveled != nil:
-		leveled := l.Leveled
-		p.Kind = &lpb.LogEntry_Leveled_{
-			Leveled: leveled.Proto(),
-		}
-	case l.Raw != nil:
-		raw := l.Raw
-		p.Kind = &lpb.LogEntry_Raw_{
-			Raw: raw.ProtoLog(),
-		}
-	default:
-		return nil
-	}
-	return p
-}
-
-// LogEntryFromProto parses a proto LogEntry back into internal structure.
-// This can be used in log proto API consumers to easily print received log
-// entries.
-func LogEntryFromProto(l *lpb.LogEntry) (*LogEntry, error) {
-	dn := DN(l.Dn)
-	if _, err := dn.Path(); err != nil {
-		return nil, fmt.Errorf("could not convert DN: %w", err)
-	}
-	res := &LogEntry{
-		DN: dn,
-	}
-	switch inner := l.Kind.(type) {
-	case *lpb.LogEntry_Leveled_:
-		leveled, err := LeveledPayloadFromProto(inner.Leveled)
-		if err != nil {
-			return nil, fmt.Errorf("could not convert leveled entry: %w", err)
-		}
-		res.Leveled = leveled
-	case *lpb.LogEntry_Raw_:
-		line, err := logbuffer.LineFromLogProto(inner.Raw)
-		if err != nil {
-			return nil, fmt.Errorf("could not convert raw entry: %w", err)
-		}
-		res.Raw = line
-	default:
-		return nil, fmt.Errorf("proto has neither Leveled nor Raw set")
-	}
-	return res, nil
-}
diff --git a/metropolis/pkg/logtree/logtree_publisher.go b/metropolis/pkg/logtree/logtree_publisher.go
deleted file mode 100644
index 0b945e3..0000000
--- a/metropolis/pkg/logtree/logtree_publisher.go
+++ /dev/null
@@ -1,229 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logtree
-
-import (
-	"fmt"
-	"io"
-	"runtime"
-	"strings"
-	"time"
-
-	"source.monogon.dev/metropolis/pkg/logbuffer"
-)
-
-type leveledPublisher struct {
-	node  *node
-	depth int
-}
-
-// LeveledFor returns a LeveledLogger publishing interface for a given DN. An error
-// may be returned if the DN is malformed.
-func (l *LogTree) LeveledFor(dn DN) (LeveledLogger, error) {
-	node, err := l.nodeByDN(dn)
-	if err != nil {
-		return nil, err
-	}
-	return &leveledPublisher{
-		node:  node,
-		depth: 0,
-	}, nil
-}
-
-func (l *LogTree) RawFor(dn DN) (io.Writer, error) {
-	node, err := l.nodeByDN(dn)
-	if err != nil {
-		return nil, fmt.Errorf("could not retrieve raw logger: %w", err)
-	}
-	return node.rawLineBuffer, nil
-}
-
-// MustLeveledFor returns a LeveledLogger publishing interface for a given DN, or
-// panics if the given DN is invalid.
-func (l *LogTree) MustLeveledFor(dn DN) LeveledLogger {
-	leveled, err := l.LeveledFor(dn)
-	if err != nil {
-		panic(fmt.Errorf("LeveledFor returned: %w", err))
-	}
-	return leveled
-}
-
-func (l *LogTree) MustRawFor(dn DN) io.Writer {
-	raw, err := l.RawFor(dn)
-	if err != nil {
-		panic(fmt.Errorf("RawFor returned: %w", err))
-	}
-	return raw
-}
-
-// SetVerbosity sets the verbosity for a given DN (non-recursively, ie. for that DN
-// only, not its children).
-func (l *LogTree) SetVerbosity(dn DN, level VerbosityLevel) error {
-	node, err := l.nodeByDN(dn)
-	if err != nil {
-		return err
-	}
-	node.verbosity = level
-	return nil
-}
-
-// logRaw is called by this node's LineBuffer any time a raw log line is completed.
-// It will create a new entry, append it to the journal, and notify all pertinent
-// subscribers.
-func (n *node) logRaw(line *logbuffer.Line) {
-	e := &entry{
-		origin: n.dn,
-		raw:    line,
-	}
-	n.tree.journal.append(e)
-	n.tree.journal.notify(e)
-}
-
-// LogExternalLeveled injects a ExternalLeveledPayload into a given
-// LeveledLogger. This should only be used by systems which translate external
-// data sources into leveled logging - see ExternelLeveledPayload for more
-// information.
-func LogExternalLeveled(l LeveledLogger, e *ExternalLeveledPayload) error {
-	publisher, ok := l.(*leveledPublisher)
-	if !ok {
-		return fmt.Errorf("the given LeveledLogger is not a *leveledPublisher")
-	}
-	p := e.sanitize()
-	entry := &entry{
-		origin:  publisher.node.dn,
-		leveled: p,
-	}
-	publisher.node.tree.journal.append(entry)
-	publisher.node.tree.journal.notify(entry)
-	return nil
-}
-
-// log builds a LeveledPayload and entry for a given message, including all related
-// metadata. It will create a new entry append it to the journal, and notify all
-// pertinent subscribers.
-func (l *leveledPublisher) logLeveled(depth int, severity Severity, msg string) {
-	_, file, line, ok := runtime.Caller(2 + depth)
-	if !ok {
-		file = "???"
-		line = 1
-	} else {
-		slash := strings.LastIndex(file, "/")
-		if slash >= 0 {
-			file = file[slash+1:]
-		}
-	}
-
-	// Remove leading/trailing newlines and split.
-	messages := strings.Split(strings.Trim(msg, "\n"), "\n")
-
-	p := &LeveledPayload{
-		timestamp: time.Now(),
-		severity:  severity,
-		messages:  messages,
-		file:      file,
-		line:      line,
-	}
-	e := &entry{
-		origin:  l.node.dn,
-		leveled: p,
-	}
-	l.node.tree.journal.append(e)
-	l.node.tree.journal.notify(e)
-}
-
-// Info implements the LeveledLogger interface.
-func (l *leveledPublisher) Info(args ...interface{}) {
-	l.logLeveled(l.depth, INFO, fmt.Sprint(args...))
-}
-
-// Infof implements the LeveledLogger interface.
-func (l *leveledPublisher) Infof(format string, args ...interface{}) {
-	l.logLeveled(l.depth, INFO, fmt.Sprintf(format, args...))
-}
-
-// Warning implements the LeveledLogger interface.
-func (l *leveledPublisher) Warning(args ...interface{}) {
-	l.logLeveled(l.depth, WARNING, fmt.Sprint(args...))
-}
-
-// Warningf implements the LeveledLogger interface.
-func (l *leveledPublisher) Warningf(format string, args ...interface{}) {
-	l.logLeveled(l.depth, WARNING, fmt.Sprintf(format, args...))
-}
-
-// Error implements the LeveledLogger interface.
-func (l *leveledPublisher) Error(args ...interface{}) {
-	l.logLeveled(l.depth, ERROR, fmt.Sprint(args...))
-}
-
-// Errorf implements the LeveledLogger interface.
-func (l *leveledPublisher) Errorf(format string, args ...interface{}) {
-	l.logLeveled(l.depth, ERROR, fmt.Sprintf(format, args...))
-}
-
-// Fatal implements the LeveledLogger interface.
-func (l *leveledPublisher) Fatal(args ...interface{}) {
-	l.logLeveled(l.depth, FATAL, fmt.Sprint(args...))
-}
-
-// Fatalf implements the LeveledLogger interface.
-func (l *leveledPublisher) Fatalf(format string, args ...interface{}) {
-	l.logLeveled(l.depth, FATAL, fmt.Sprintf(format, args...))
-}
-
-// WithAddedStackDepth impleemnts the LeveledLogger interface.
-func (l *leveledPublisher) WithAddedStackDepth(depth int) LeveledLogger {
-	l2 := *l
-	l2.depth += depth
-	return &l2
-}
-
-// V implements the LeveledLogger interface.
-func (l *leveledPublisher) V(v VerbosityLevel) VerboseLeveledLogger {
-	return &verbose{
-		publisher: l,
-		enabled:   l.node.verbosity >= v,
-	}
-}
-
-// verbose implements the VerboseLeveledLogger interface. It is a thin wrapper
-// around node, with an 'enabled' bool. This means that V(n)-returned
-// VerboseLeveledLoggers must be short lived, as a changed in verbosity will not
-// affect all already existing VerboseLeveledLoggers.
-type verbose struct {
-	publisher *leveledPublisher
-	node      *node
-	enabled   bool
-}
-
-func (v *verbose) Enabled() bool {
-	return v.enabled
-}
-
-func (v *verbose) Info(args ...interface{}) {
-	if !v.enabled {
-		return
-	}
-	v.publisher.logLeveled(v.publisher.depth, INFO, fmt.Sprint(args...))
-}
-
-func (v *verbose) Infof(format string, args ...interface{}) {
-	if !v.enabled {
-		return
-	}
-	v.publisher.logLeveled(v.publisher.depth, INFO, fmt.Sprintf(format, args...))
-}
diff --git a/metropolis/pkg/logtree/logtree_test.go b/metropolis/pkg/logtree/logtree_test.go
deleted file mode 100644
index 54eabb7..0000000
--- a/metropolis/pkg/logtree/logtree_test.go
+++ /dev/null
@@ -1,386 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logtree
-
-import (
-	"fmt"
-	"strings"
-	"testing"
-	"time"
-)
-
-func expect(tree *LogTree, t *testing.T, dn DN, entries ...string) string {
-	t.Helper()
-	res, err := tree.Read(dn, WithChildren(), WithBacklog(BacklogAllAvailable))
-	if err != nil {
-		t.Fatalf("Read: %v", err)
-	}
-	defer res.Close()
-	if want, got := len(entries), len(res.Backlog); want != got {
-		t.Fatalf("wanted %v backlog entries, got %v", want, got)
-	}
-	got := make(map[string]bool)
-	for _, entry := range res.Backlog {
-		if entry.Leveled != nil {
-			got[entry.Leveled.MessagesJoined()] = true
-		}
-		if entry.Raw != nil {
-			got[entry.Raw.Data] = true
-		}
-	}
-	for _, entry := range entries {
-		if !got[entry] {
-			return fmt.Sprintf("missing entry %q", entry)
-		}
-	}
-	return ""
-}
-
-func readBacklog(tree *LogTree, t *testing.T, dn DN, backlog int, recursive bool) []string {
-	t.Helper()
-	opts := []LogReadOption{
-		WithBacklog(backlog),
-	}
-	if recursive {
-		opts = append(opts, WithChildren())
-	}
-	res, err := tree.Read(dn, opts...)
-	if err != nil {
-		t.Fatalf("Read: %v", err)
-	}
-	defer res.Close()
-
-	var lines []string
-	for _, e := range res.Backlog {
-		lines = append(lines, e.Leveled.Messages()...)
-	}
-	return lines
-}
-
-func TestMultiline(t *testing.T) {
-	tree := New()
-	// Two lines in a single message.
-	tree.MustLeveledFor("main").Info("foo\nbar")
-	// Two lines in a single message with a hanging newline that should get stripped.
-	tree.MustLeveledFor("main").Info("one\ntwo\n")
-
-	if res := expect(tree, t, "main", "foo\nbar", "one\ntwo"); res != "" {
-		t.Errorf("retrieval at main failed: %s", res)
-	}
-}
-
-func TestBacklogAll(t *testing.T) {
-	tree := New()
-	tree.MustLeveledFor("main").Info("hello, main!")
-	tree.MustLeveledFor("main.foo").Info("hello, main.foo!")
-	tree.MustLeveledFor("main.bar").Info("hello, main.bar!")
-	tree.MustLeveledFor("aux").Info("hello, aux!")
-	// No newline at the last entry - shouldn't get propagated to the backlog.
-	fmt.Fprintf(tree.MustRawFor("aux.process"), "processing foo\nprocessing bar\nbaz")
-
-	if res := expect(tree, t, "main", "hello, main!", "hello, main.foo!", "hello, main.bar!"); res != "" {
-		t.Errorf("retrieval at main failed: %s", res)
-	}
-	if res := expect(tree, t, "", "hello, main!", "hello, main.foo!", "hello, main.bar!", "hello, aux!", "processing foo", "processing bar"); res != "" {
-		t.Errorf("retrieval at root failed: %s", res)
-	}
-	if res := expect(tree, t, "aux", "hello, aux!", "processing foo", "processing bar"); res != "" {
-		t.Errorf("retrieval at aux failed: %s", res)
-	}
-}
-
-func TestBacklogExact(t *testing.T) {
-	tree := New()
-	tree.MustLeveledFor("main").Info("hello, main!")
-	tree.MustLeveledFor("main.foo").Info("hello, main.foo!")
-	tree.MustLeveledFor("main.bar").Info("hello, main.bar!")
-	tree.MustLeveledFor("main.bar.chatty").Info("hey there how are you")
-	tree.MustLeveledFor("main.bar.quiet").Info("fine how are you")
-	tree.MustLeveledFor("main.bar.chatty").Info("i've been alright myself")
-	tree.MustLeveledFor("main.bar.chatty").Info("but to tell you honestly...")
-	tree.MustLeveledFor("main.bar.chatty").Info("i feel like i'm stuck?")
-	tree.MustLeveledFor("main.bar.quiet").Info("mhm")
-	tree.MustLeveledFor("main.bar.chatty").Info("like you know what i'm saying, stuck in like")
-	tree.MustLeveledFor("main.bar.chatty").Info("like a go test?")
-	tree.MustLeveledFor("main.bar.quiet").Info("yeah totally")
-	tree.MustLeveledFor("main.bar.chatty").Info("it's hard to put my finger on it")
-	tree.MustLeveledFor("main.bar.chatty").Info("anyway, how's the wife doing?")
-
-	check := func(a []string, b ...string) {
-		t.Helper()
-		if len(a) != len(b) {
-			t.Errorf("Legth mismatch: wanted %d, got %d", len(b), len(a))
-		}
-		count := len(a)
-		if len(b) < count {
-			count = len(b)
-		}
-		for i := 0; i < count; i++ {
-			if want, got := b[i], a[i]; want != got {
-				t.Errorf("Message %d: wanted %q, got %q", i, want, got)
-			}
-		}
-	}
-
-	check(readBacklog(tree, t, "main", 3, true), "yeah totally", "it's hard to put my finger on it", "anyway, how's the wife doing?")
-	check(readBacklog(tree, t, "main.foo", 3, false), "hello, main.foo!")
-	check(readBacklog(tree, t, "main.bar.quiet", 2, true), "mhm", "yeah totally")
-}
-
-func TestStream(t *testing.T) {
-	tree := New()
-	tree.MustLeveledFor("main").Info("hello, backlog")
-	fmt.Fprintf(tree.MustRawFor("main.process"), "hello, raw backlog\n")
-
-	res, err := tree.Read("", WithBacklog(BacklogAllAvailable), WithChildren(), WithStream())
-	if err != nil {
-		t.Fatalf("Read: %v", err)
-	}
-	defer res.Close()
-	if want, got := 2, len(res.Backlog); want != got {
-		t.Errorf("wanted %d backlog item, got %d", want, got)
-	}
-
-	tree.MustLeveledFor("main").Info("hello, stream")
-	fmt.Fprintf(tree.MustRawFor("main.raw"), "hello, raw stream\n")
-
-	entries := make(map[string]bool)
-	timeout := time.After(time.Second * 1)
-	for {
-		done := false
-		select {
-		case <-timeout:
-			done = true
-		case p := <-res.Stream:
-			if p.Leveled != nil {
-				entries[p.Leveled.MessagesJoined()] = true
-			}
-			if p.Raw != nil {
-				entries[p.Raw.Data] = true
-			}
-		}
-		if done {
-			break
-		}
-	}
-	if entry := "hello, stream"; !entries[entry] {
-		t.Errorf("Missing entry %q", entry)
-	}
-	if entry := "hello, raw stream"; !entries[entry] {
-		t.Errorf("Missing entry %q", entry)
-	}
-}
-
-func TestVerbose(t *testing.T) {
-	tree := New()
-
-	tree.MustLeveledFor("main").V(10).Info("this shouldn't get logged")
-
-	reader, err := tree.Read("", WithBacklog(BacklogAllAvailable), WithChildren())
-	if err != nil {
-		t.Fatalf("Read: %v", err)
-	}
-	if want, got := 0, len(reader.Backlog); want != got {
-		t.Fatalf("expected nothing to be logged, got %+v", reader.Backlog)
-	}
-
-	tree.SetVerbosity("main", 10)
-	tree.MustLeveledFor("main").V(10).Info("this should get logged")
-
-	reader, err = tree.Read("", WithBacklog(BacklogAllAvailable), WithChildren())
-	if err != nil {
-		t.Fatalf("Read: %v", err)
-	}
-	if want, got := 1, len(reader.Backlog); want != got {
-		t.Fatalf("expected %d entries to get logged, got %d", want, got)
-	}
-}
-
-func TestMetadata(t *testing.T) {
-	tree := New()
-	tree.MustLeveledFor("main").Error("i am an error")
-	tree.MustLeveledFor("main").Warning("i am a warning")
-	tree.MustLeveledFor("main").Info("i am informative")
-	tree.MustLeveledFor("main").V(0).Info("i am a zero-level debug")
-
-	reader, err := tree.Read("", WithChildren(), WithBacklog(BacklogAllAvailable))
-	if err != nil {
-		t.Fatalf("Read: %v", err)
-	}
-	if want, got := 4, len(reader.Backlog); want != got {
-		t.Fatalf("expected %d entries, got %d", want, got)
-	}
-
-	for _, te := range []struct {
-		ix       int
-		severity Severity
-		message  string
-	}{
-		{0, ERROR, "i am an error"},
-		{1, WARNING, "i am a warning"},
-		{2, INFO, "i am informative"},
-		{3, INFO, "i am a zero-level debug"},
-	} {
-		p := reader.Backlog[te.ix]
-		if want, got := te.severity, p.Leveled.Severity(); want != got {
-			t.Errorf("wanted element %d to have severity %s, got %s", te.ix, want, got)
-		}
-		if want, got := te.message, p.Leveled.MessagesJoined(); want != got {
-			t.Errorf("wanted element %d to have message %q, got %q", te.ix, want, got)
-		}
-		if want, got := "logtree_test.go", strings.Split(p.Leveled.Location(), ":")[0]; want != got {
-			t.Errorf("wanted element %d to have file %q, got %q", te.ix, want, got)
-		}
-	}
-}
-
-func TestSeverity(t *testing.T) {
-	tree := New()
-	tree.MustLeveledFor("main").Error("i am an error")
-	tree.MustLeveledFor("main").Warning("i am a warning")
-	tree.MustLeveledFor("main").Info("i am informative")
-	tree.MustLeveledFor("main").V(0).Info("i am a zero-level debug")
-
-	reader, err := tree.Read("main", WithBacklog(BacklogAllAvailable), LeveledWithMinimumSeverity(WARNING))
-	if err != nil {
-		t.Fatalf("Read: %v", err)
-	}
-	if want, got := 2, len(reader.Backlog); want != got {
-		t.Fatalf("wanted %d entries, got %d", want, got)
-	}
-	if want, got := "i am an error", reader.Backlog[0].Leveled.MessagesJoined(); want != got {
-		t.Fatalf("wanted entry %q, got %q", want, got)
-	}
-	if want, got := "i am a warning", reader.Backlog[1].Leveled.MessagesJoined(); want != got {
-		t.Fatalf("wanted entry %q, got %q", want, got)
-	}
-}
-
-func TestAddedStackDepth(t *testing.T) {
-	tree := New()
-	helper := func(msg string) {
-		tree.MustLeveledFor("main").WithAddedStackDepth(1).Infof("oh no: %s", msg)
-	}
-
-	// The next three lines are tested to be next to each other.
-	helper("it failed")
-	tree.MustLeveledFor("main").Infof("something else")
-
-	reader, err := tree.Read("main", WithBacklog(BacklogAllAvailable))
-	if err != nil {
-		t.Fatalf("Read: %v", err)
-	}
-	if want, got := 2, len(reader.Backlog); want != got {
-		t.Fatalf("wanted %d entries, got %d", want, got)
-	}
-	if want, got := "oh no: it failed", reader.Backlog[0].Leveled.MessagesJoined(); want != got {
-		t.Errorf("wanted entry %q, got %q", want, got)
-	}
-	if want, got := "something else", reader.Backlog[1].Leveled.MessagesJoined(); want != got {
-		t.Errorf("wanted entry %q, got %q", want, got)
-	}
-	if first, second := reader.Backlog[0].Leveled.line, reader.Backlog[1].Leveled.line; first+1 != second {
-		t.Errorf("first entry at %d, second at %d, wanted one after the other", first, second)
-	}
-}
-
-func TestLogEntry_ConciseString(t *testing.T) {
-	trim := func(s string) string {
-		return strings.Trim(s, "\n")
-	}
-	for i, te := range []struct {
-		entry    *LogEntry
-		maxWidth int
-		want     string
-	}{
-		{
-			&LogEntry{
-				Leveled: &LeveledPayload{
-					messages: []string{"Hello there!"},
-					severity: WARNING,
-				},
-				DN: "root.role.kubernetes.run.kubernetes.apiserver",
-			},
-			120,
-			"       k8s apiserver W Hello there!",
-		},
-		{
-			&LogEntry{
-				Leveled: &LeveledPayload{
-					messages: []string{"Hello there!", "I am multiline."},
-					severity: WARNING,
-				},
-				DN: "root.role.kubernetes.run.kubernetes.apiserver",
-			},
-			120,
-			trim(`
-       k8s apiserver W Hello there!
-                     | I am multiline.
-`),
-		},
-		{
-			&LogEntry{
-				Leveled: &LeveledPayload{
-					messages: []string{"Hello there! I am a very long string, and I will get wrapped to 120 columns because that's just how life is for long strings."},
-					severity: WARNING,
-				},
-				DN: "root.role.kubernetes.run.kubernetes.apiserver",
-			},
-			120,
-			trim(`
-       k8s apiserver W Hello there! I am a very long string, and I will get wrapped to 120 columns because that's just
-                     | how life is for long strings.
-`),
-		},
-		{
-			&LogEntry{
-				Leveled: &LeveledPayload{
-					messages: []string{"Hello there!"},
-					severity: WARNING,
-				},
-				DN: "root.role.kubernetes.run.kubernetes.apiserver",
-			},
-			60,
-			trim(`
-   k8s apiserver W Hello there!
-`),
-		},
-		{
-			&LogEntry{
-				Leveled: &LeveledPayload{
-					messages: []string{"Hello there!"},
-					severity: WARNING,
-				},
-				DN: "root.role.kubernetes.run.kubernetes.apiserver",
-			},
-			40,
-			"W Hello there!",
-		},
-	} {
-		got := te.entry.ConciseString(MetropolisShortenDict, te.maxWidth)
-		for _, line := range strings.Split(got, "\n") {
-			if want, got := te.maxWidth, len(line); got > want {
-				t.Errorf("Case %d, line %q too long (%d bytes, wanted at most %d)", i, line, got, want)
-			}
-		}
-		if te.want != got {
-			t.Errorf("Case %d, message diff", i)
-			t.Logf("Wanted:\n%s", te.want)
-			t.Logf("Got:\n%s", got)
-		}
-	}
-}
diff --git a/metropolis/pkg/logtree/proto/BUILD.bazel b/metropolis/pkg/logtree/proto/BUILD.bazel
deleted file mode 100644
index e7f8c82..0000000
--- a/metropolis/pkg/logtree/proto/BUILD.bazel
+++ /dev/null
@@ -1,24 +0,0 @@
-load("@rules_proto//proto:defs.bzl", "proto_library")
-load("@io_bazel_rules_go//go:def.bzl", "go_library")
-load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
-
-proto_library(
-    name = "proto_proto",
-    srcs = ["logtree.proto"],
-    visibility = ["//visibility:public"],
-    deps = ["@com_google_protobuf//:timestamp_proto"],
-)
-
-go_proto_library(
-    name = "proto_go_proto",
-    importpath = "source.monogon.dev/metropolis/pkg/logtree/proto",
-    proto = ":proto_proto",
-    visibility = ["//visibility:public"],
-)
-
-go_library(
-    name = "proto",
-    embed = [":proto_go_proto"],
-    importpath = "source.monogon.dev/metropolis/pkg/logtree/proto",
-    visibility = ["//visibility:public"],
-)
diff --git a/metropolis/pkg/logtree/proto/gomod-generated-placeholder.go b/metropolis/pkg/logtree/proto/gomod-generated-placeholder.go
deleted file mode 100644
index 92256db..0000000
--- a/metropolis/pkg/logtree/proto/gomod-generated-placeholder.go
+++ /dev/null
@@ -1 +0,0 @@
-package proto
diff --git a/metropolis/pkg/logtree/proto/logtree.proto b/metropolis/pkg/logtree/proto/logtree.proto
deleted file mode 100644
index 7586187..0000000
--- a/metropolis/pkg/logtree/proto/logtree.proto
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-package metropolis.pkg.logtree.proto;
-option go_package = "source.monogon.dev/metropolis/pkg/logtree/proto";
-
-import "google/protobuf/timestamp.proto";
-
-// Severity level corresponding to //metropolis/pkg/logtree.Severity.
-enum LeveledLogSeverity {
-  INVALID = 0;
-  INFO = 1;
-  WARNING = 2;
-  ERROR = 3;
-  FATAL = 4;
-}
-
-// LogEntry corresponding to logtree.LogEntry in //metropolis/pkg/logtree.
-message LogEntry {
-  // A leveled log entry emitted from a compatible system, eg. Metorpolis code
-  // or a klog-parsed line.
-  message Leveled {
-    repeated string lines = 1;
-    google.protobuf.Timestamp timestamp = 2;
-    LeveledLogSeverity severity = 3;
-    // Source of the error, expressed as file:line.
-    string location = 4;
-  }
-  // Raw log entry, captured from an external system without parting. Might
-  // contain some timestamp/level/origin information embedded in data. Data
-  // contained within should be treated as unsanitized external data.
-  message Raw {
-    string data = 1;
-    // Original length of line, set if data was truncated.
-    int64 original_length = 2;
-  }
-
-  // Origin DN (Distinguished Name), a unique identifier which is provided by
-  // the supervisor system.
-  string dn = 1;
-  oneof kind {
-    Leveled leveled = 2;
-    Raw raw = 3;
-  }
-}
diff --git a/metropolis/pkg/logtree/testhelpers.go b/metropolis/pkg/logtree/testhelpers.go
deleted file mode 100644
index 45bcaf2..0000000
--- a/metropolis/pkg/logtree/testhelpers.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package logtree
-
-import (
-	"context"
-	"testing"
-)
-
-// PipeAllToTest starts a goroutine that will forward all logtree entries
-// t.Logf(), in the canonical logtree payload representation.
-//
-// It's designed to be used in tests, and will automatically stop when the
-// test/benchmark it's running in exits.
-func PipeAllToTest(t testing.TB, lt *LogTree) {
-	t.Helper()
-
-	reader, err := lt.Read("", WithChildren(), WithStream())
-	if err != nil {
-		t.Fatalf("Failed to set up logtree reader: %v", err)
-	}
-
-	// Internal context used to cancel the goroutine. This could also be a
-	// implemented via a channel.
-	ctx, ctxC := context.WithCancel(context.Background())
-	t.Cleanup(ctxC)
-
-	go func() {
-		t.Helper()
-		for {
-			select {
-			case <-ctx.Done():
-				return
-			case p := <-reader.Stream:
-				t.Logf("%s", p.String())
-			}
-		}
-	}()
-}
diff --git a/metropolis/pkg/logtree/unraw/BUILD.bazel b/metropolis/pkg/logtree/unraw/BUILD.bazel
deleted file mode 100644
index 646d631..0000000
--- a/metropolis/pkg/logtree/unraw/BUILD.bazel
+++ /dev/null
@@ -1,24 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
-
-go_library(
-    name = "unraw",
-    srcs = ["unraw.go"],
-    importpath = "source.monogon.dev/metropolis/pkg/logtree/unraw",
-    visibility = ["//visibility:public"],
-    deps = [
-        "//metropolis/pkg/logbuffer",
-        "//metropolis/pkg/logtree",
-        "//metropolis/pkg/supervisor",
-    ],
-)
-
-go_test(
-    name = "unraw_test",
-    srcs = ["unraw_test.go"],
-    embed = [":unraw"],
-    deps = [
-        "//metropolis/pkg/logbuffer",
-        "//metropolis/pkg/logtree",
-        "//metropolis/pkg/supervisor",
-    ],
-)
diff --git a/metropolis/pkg/logtree/unraw/unraw.go b/metropolis/pkg/logtree/unraw/unraw.go
deleted file mode 100644
index ef9d913..0000000
--- a/metropolis/pkg/logtree/unraw/unraw.go
+++ /dev/null
@@ -1,148 +0,0 @@
-// unraw implements a facility to convert raw logs from external sources into
-// leveled logs.
-//
-// This is not the same as raw logging inside the logtree, which exists to
-// ingest logs that are either fully arbitrary or do not map cleanly to the
-// leveled logging concept. The unraw library is instead made to parse logs
-// from systems that also use leveled logs internally, but emit them to a
-// serialized byte stream that then needs to be turned back into something
-// leveled inside metropolis.
-//
-// Logs converted this way are unfortunately lossy and do not come with the
-// same guarantees as logs directly emitted via logtree. For example, there's
-// no built-in protection against systems emiting fudged timestamps or file
-// locations. Thus, this functionality should be used to interact with trusted
-// systems, not fully arbitrary logs.
-package unraw
-
-import (
-	"context"
-	"fmt"
-	"io"
-	"os"
-	"sync"
-	"syscall"
-	"time"
-
-	"source.monogon.dev/metropolis/pkg/logbuffer"
-	"source.monogon.dev/metropolis/pkg/logtree"
-	"source.monogon.dev/metropolis/pkg/supervisor"
-)
-
-// Parser is a user-defined function for converting a log line received from an
-// external system into a leveled logging payload.
-// The given LeveledWriter should be called for every leveled log entry that
-// results from this line. This means that a parser might skip some lines, or
-// emit multiple leveled payloads per line.
-type Parser func(*logbuffer.Line, LeveledWriter)
-
-// Converter is the main entrypoint of the unraw library. It wraps a
-// LeveledLogger in combination with a Parser to create an io.Writer that can
-// be sent raw log data.
-type Converter struct {
-	// Parser is the user-defined parsing function for converting log lines
-	// into leveled logging payloads. This must be set.
-	Parser Parser
-	// MaximumLineLength is the maximum length of a single log line when
-	// splitting incoming writes into lines. If a line is longer than this, it
-	// will be truncated (and will be sent to the Parser regardless).
-	//
-	// If not set, this defaults to 1024 bytes.
-	MaximumLineLength int
-	// LeveledLogger is the logtree leveled logger into which events from the
-	// Parser will be sent.
-	LeveledLogger logtree.LeveledLogger
-
-	// mu guards lb.
-	mu sync.Mutex
-	// lb is the underlying line buffer used to split incoming data into lines.
-	// It will be initialized on first Write.
-	lb *logbuffer.LineBuffer
-}
-
-// LeveledWriter is called by a Parser for every ExternelLeveledPayload it
-// wishes to emit into a backing LeveledLogger. If the payload is missing some
-// fields, these will default to some sensible values - see the
-// ExternalLeveledPayload structure definition for more information.
-type LeveledWriter func(*logtree.ExternalLeveledPayload)
-
-// Write implements io.Writer. Any write performed into the Converter will
-// populate the converter's internal buffer, and any time that buffer contains
-// a full line it will be sent over to the Parser for processing.
-func (e *Converter) Write(p []byte) (int, error) {
-	e.mu.Lock()
-	defer e.mu.Unlock()
-
-	if e.MaximumLineLength <= 0 {
-		e.MaximumLineLength = 1024
-	}
-	if e.lb == nil {
-		e.lb = logbuffer.NewLineBuffer(e.MaximumLineLength, func(l *logbuffer.Line) {
-			e.Parser(l, e.insert)
-		})
-	}
-	return e.lb.Write(p)
-}
-
-// insert implements LeveledWriter.
-func (e *Converter) insert(d *logtree.ExternalLeveledPayload) {
-	if err := logtree.LogExternalLeveled(e.LeveledLogger, d); err != nil {
-		e.LeveledLogger.Fatal("Could not insert unrawed entry: %v", err)
-	}
-}
-
-// NamedPipeReader returns a supervisor runnable that continously reads logs
-// from the given path and attempts to parse them into leveled logs using this
-// Converter.
-//
-// If the given path doesn't exist, a named pipe will be created there before
-// the function exits. This guarantee means that as long as any writing process
-// is not started before NamedPipeReader returns ther is no need to
-// remove/recreate the named pipe.
-//
-// TODO(q3k): defer the creation of the FIFO to localstorage so this doesn't
-// need to be taken care of in the first place.
-func (e *Converter) NamedPipeReader(path string) (supervisor.Runnable, error) {
-	if _, err := os.Stat(path); os.IsNotExist(err) {
-		if err := syscall.Mkfifo(path, 0666); err != nil {
-			return nil, fmt.Errorf("when creating named pipe: %w", err)
-		}
-	}
-	return func(ctx context.Context) error {
-		fifo, err := os.OpenFile(path, os.O_RDONLY, os.ModeNamedPipe)
-		if err != nil {
-			return fmt.Errorf("when opening named pipe: %w", err)
-		}
-		go func() {
-			<-ctx.Done()
-			fifo.Close()
-		}()
-		defer fifo.Close()
-		supervisor.Signal(ctx, supervisor.SignalHealthy)
-		for {
-			// Quit if requested.
-			if ctx.Err() != nil {
-				return ctx.Err()
-			}
-
-			n, err := io.Copy(e, fifo)
-			if n == 0 && err == nil {
-				// Hack because pipes/FIFOs can return zero reads when nobody
-				// is writing. To avoid busy-looping, sleep a bit before
-				// retrying. This does not loose data since the FIFO internal
-				// buffer will stall writes when it becomes full. 10ms maximum
-				// stall in a non-latency critical process (reading debug logs)
-				// is not an issue for us.
-				time.Sleep(10 * time.Millisecond)
-			} else if err != nil {
-				// Since we close fifo on context cancel, we'll get a 'file is already closed'
-				// io error here. Translate that over to the context error that caused it.
-				if ctx.Err() != nil {
-					return ctx.Err()
-				}
-				return fmt.Errorf("log pump failed: %w", err)
-			}
-
-		}
-	}, nil
-}
diff --git a/metropolis/pkg/logtree/unraw/unraw_test.go b/metropolis/pkg/logtree/unraw/unraw_test.go
deleted file mode 100644
index 71c88fd..0000000
--- a/metropolis/pkg/logtree/unraw/unraw_test.go
+++ /dev/null
@@ -1,138 +0,0 @@
-package unraw
-
-import (
-	"context"
-	"errors"
-	"fmt"
-	"os"
-	"syscall"
-	"testing"
-
-	"source.monogon.dev/metropolis/pkg/logbuffer"
-	"source.monogon.dev/metropolis/pkg/logtree"
-	"source.monogon.dev/metropolis/pkg/supervisor"
-)
-
-func testParser(l *logbuffer.Line, w LeveledWriter) {
-	w(&logtree.ExternalLeveledPayload{
-		Message: l.Data,
-	})
-}
-
-func TestNamedPipeReader(t *testing.T) {
-	dir, err := os.MkdirTemp("/tmp", "metropolis-test-named-pipe-reader")
-	if err != nil {
-		t.Fatalf("could not create tempdir: %v", err)
-	}
-	defer os.RemoveAll(dir)
-	fifoPath := dir + "/fifo"
-
-	// Start named pipe reader.
-	started := make(chan struct{})
-	stop, lt := supervisor.TestHarness(t, func(ctx context.Context) error {
-		converter := Converter{
-			Parser:        testParser,
-			LeveledLogger: supervisor.Logger(ctx),
-		}
-
-		r, err := converter.NamedPipeReader(fifoPath)
-		if err != nil {
-			return fmt.Errorf("could not create pipe reader: %w", err)
-		}
-		close(started)
-		return r(ctx)
-	})
-
-	<-started
-
-	// Open FIFO...
-	f, err := os.OpenFile(fifoPath, os.O_WRONLY, 0)
-	if err != nil {
-		t.Fatalf("could not open fifo: %v", err)
-	}
-
-	// Start reading all logs.
-	reader, err := lt.Read("root", logtree.WithChildren(), logtree.WithStream())
-	if err != nil {
-		t.Fatalf("could not get logtree reader: %v", err)
-	}
-	defer reader.Close()
-
-	// Write two lines to the fifo.
-	fmt.Fprintf(f, "foo\nbar\n")
-	f.Close()
-
-	// Expect lines to end up in logtree.
-	if got, want := (<-reader.Stream).Leveled.MessagesJoined(), "foo"; want != got {
-		t.Errorf("expected first message to be %q, got %q", want, got)
-	}
-	if got, want := (<-reader.Stream).Leveled.MessagesJoined(), "bar"; want != got {
-		t.Errorf("expected second message to be %q, got %q", want, got)
-	}
-
-	// Fully restart the entire supervisor and pipe reader, redo test, things
-	// should continue to work.
-	stop()
-
-	// Block until FIFO isn't being read anymore. This ensures that the
-	// NamedPipeReader actually stopped running, otherwise the following write to
-	// the fifo can race by writing to the old NamedPipeReader and making the test
-	// time out. This can also happen in production, but that will just cause us to
-	// lose piped data in the very small race window when this can happen
-	// (statistically in this test, <0.1%).
-	//
-	// The check is being done by opening the FIFO in 'non-blocking mode', which
-	// returns ENXIO immediately if the FIFO has no corresponding writer, and
-	// succeeds otherwise.
-	for {
-		ft, err := os.OpenFile(fifoPath, os.O_WRONLY|syscall.O_NONBLOCK, 0)
-		if err == nil {
-			// There's still a writer, keep trying.
-			ft.Close()
-		} else if errors.Is(err, syscall.ENXIO) {
-			// No writer, break.
-			break
-		} else {
-			// Something else?
-			t.Fatalf("OpenFile(%q): %v", fifoPath, err)
-		}
-	}
-
-	started = make(chan struct{})
-	stop, lt = supervisor.TestHarness(t, func(ctx context.Context) error {
-		converter := Converter{
-			Parser:        testParser,
-			LeveledLogger: supervisor.Logger(ctx),
-		}
-
-		r, err := converter.NamedPipeReader(fifoPath)
-		if err != nil {
-			return fmt.Errorf("could not create pipe reader: %w", err)
-		}
-		close(started)
-		return r(ctx)
-	})
-	defer stop()
-
-	<-started
-
-	// Start reading all logs.
-	reader, err = lt.Read("root", logtree.WithChildren(), logtree.WithStream())
-	if err != nil {
-		t.Fatalf("could not get logtree reader: %v", err)
-	}
-	defer reader.Close()
-
-	// Write line to the fifo.
-	f, err = os.OpenFile(fifoPath, os.O_WRONLY, 0)
-	if err != nil {
-		t.Fatalf("could not open fifo: %v", err)
-	}
-	fmt.Fprintf(f, "baz\n")
-	f.Close()
-
-	// Expect lines to end up in logtree.
-	if got, want := (<-reader.Stream).Leveled.MessagesJoined(), "baz"; want != got {
-		t.Errorf("expected first message to be %q, got %q", want, got)
-	}
-}
diff --git a/metropolis/pkg/logtree/zap.go b/metropolis/pkg/logtree/zap.go
deleted file mode 100644
index 7fd364a..0000000
--- a/metropolis/pkg/logtree/zap.go
+++ /dev/null
@@ -1,141 +0,0 @@
-package logtree
-
-import (
-	"encoding/json"
-	"fmt"
-	"strconv"
-	"strings"
-	"time"
-
-	"go.uber.org/zap"
-	"go.uber.org/zap/zapcore"
-
-	"source.monogon.dev/metropolis/pkg/logbuffer"
-)
-
-// Zapify turns a LeveledLogger into a zap.Logger which pipes its output into the
-// LeveledLogger. The message, severity and caller are carried over. Extra fields
-// are appended as JSON to the end of the log line.
-func Zapify(logger LeveledLogger, minimumLevel zapcore.Level) *zap.Logger {
-	p, ok := logger.(*leveledPublisher)
-	if !ok {
-		// Fail fast, as this is a programming error.
-		panic("Expected *leveledPublisher in LeveledLogger from supervisor")
-	}
-
-	ec := zapcore.EncoderConfig{
-		MessageKey:   "message",
-		LevelKey:     "level",
-		TimeKey:      "time",
-		CallerKey:    "caller",
-		EncodeLevel:  zapcore.LowercaseLevelEncoder,
-		EncodeTime:   zapcore.EpochTimeEncoder,
-		EncodeCaller: zapcore.ShortCallerEncoder,
-	}
-	s := zapSink{
-		publisher: p,
-	}
-	s.buffer = logbuffer.NewLineBuffer(4096, s.consumeLine)
-	zc := zapcore.NewCore(zapcore.NewJSONEncoder(ec), s.buffer, minimumLevel)
-	return zap.New(zc, zap.AddCaller())
-}
-
-type zapSink struct {
-	publisher *leveledPublisher
-	buffer    *logbuffer.LineBuffer
-}
-
-func (z *zapSink) consumeLine(l *logbuffer.Line) {
-	ze, err := parseZapJSON(l.Data)
-	if err != nil {
-		z.publisher.Warningf("failed to parse zap JSON: %v: %q", err, l.Data)
-		return
-	}
-	message := ze.message
-	if len(ze.extra) > 0 {
-		message += " " + ze.extra
-	}
-	e := &entry{
-		origin: z.publisher.node.dn,
-		leveled: &LeveledPayload{
-			timestamp: ze.time,
-			severity:  ze.severity,
-			messages:  []string{message},
-			file:      ze.file,
-			line:      ze.line,
-		},
-	}
-	z.publisher.node.tree.journal.append(e)
-	z.publisher.node.tree.journal.notify(e)
-}
-
-type zapEntry struct {
-	message  string
-	severity Severity
-	time     time.Time
-	file     string
-	line     int
-	extra    string
-}
-
-func parseZapJSON(s string) (*zapEntry, error) {
-	entry := make(map[string]any)
-	if err := json.Unmarshal([]byte(s), &entry); err != nil {
-		return nil, fmt.Errorf("invalid JSON: %v", err)
-	}
-	message, ok := entry["message"].(string)
-	if !ok {
-		return nil, fmt.Errorf("no message field")
-	}
-	level, ok := entry["level"].(string)
-	if !ok {
-		return nil, fmt.Errorf("no level field")
-	}
-	t, ok := entry["time"].(float64)
-	if !ok {
-		return nil, fmt.Errorf("no time field")
-	}
-	caller, ok := entry["caller"].(string)
-	if !ok {
-		return nil, fmt.Errorf("no caller field")
-	}
-
-	callerParts := strings.Split(caller, ":")
-	if len(callerParts) != 2 {
-		return nil, fmt.Errorf("invalid caller")
-	}
-	callerDirFile := strings.Split(callerParts[0], "/")
-	callerFile := callerDirFile[len(callerDirFile)-1]
-	callerLineS := callerParts[1]
-	callerLine, _ := strconv.Atoi(callerLineS)
-
-	var severity Severity
-	switch level {
-	case "warn":
-		severity = WARNING
-	case "error", "dpanic", "panic", "fatal":
-		severity = ERROR
-	default:
-		severity = INFO
-	}
-
-	secs := int64(t)
-	nsecs := int64((t - float64(secs)) * 1e9)
-
-	delete(entry, "message")
-	delete(entry, "level")
-	delete(entry, "time")
-	delete(entry, "caller")
-	var extra []byte
-	if len(entry) > 0 {
-		extra, _ = json.Marshal(entry)
-	}
-	return &zapEntry{
-		message:  message,
-		severity: severity,
-		time:     time.Unix(secs, nsecs),
-		file:     callerFile,
-		line:     callerLine,
-		extra:    string(extra),
-	}, nil
-}
diff --git a/metropolis/pkg/logtree/zap_test.go b/metropolis/pkg/logtree/zap_test.go
deleted file mode 100644
index 3917cd8..0000000
--- a/metropolis/pkg/logtree/zap_test.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package logtree
-
-import (
-	"testing"
-
-	"go.uber.org/zap"
-)
-
-func TestZapify(t *testing.T) {
-	lt := New()
-
-	z := Zapify(lt.MustLeveledFor("zap"), zap.InfoLevel)
-	z.Info("foo", zap.String("strp", "strv"), zap.Int("intp", 42))
-	z.Warn("foo!", zap.String("strp", "strv"), zap.Int("intp", 1337))
-	z.Error("foo!!")
-
-	res, err := lt.Read("zap", WithBacklog(BacklogAllAvailable))
-	if err != nil {
-		t.Fatalf("Read: %v", err)
-	}
-	defer res.Close()
-
-	if want, got := 3, len(res.Backlog); want != got {
-		t.Errorf("Wanted %d entries, got %d", want, got)
-	} else {
-		for i, te := range []struct {
-			msg string
-			sev Severity
-		}{
-			{`foo {"intp":42,"strp":"strv"}`, INFO},
-			{`foo! {"intp":1337,"strp":"strv"}`, WARNING},
-			{`foo!!`, ERROR},
-		} {
-			if want, got := te.msg, res.Backlog[i].Leveled.messages[0]; want != got {
-				t.Errorf("Line %d: wanted message %q, got %q", i, want, got)
-			}
-			if want, got := te.sev, res.Backlog[i].Leveled.severity; want != got {
-				t.Errorf("Line %d: wanted level %s, got %s", i, want, got)
-			}
-		}
-	}
-}
diff --git a/metropolis/pkg/loop/BUILD.bazel b/metropolis/pkg/loop/BUILD.bazel
deleted file mode 100644
index 8c4e10a..0000000
--- a/metropolis/pkg/loop/BUILD.bazel
+++ /dev/null
@@ -1,25 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
-load("//metropolis/test/ktest:ktest.bzl", "ktest")
-
-go_library(
-    name = "loop",
-    srcs = ["loop.go"],
-    importpath = "source.monogon.dev/metropolis/pkg/loop",
-    visibility = ["//visibility:public"],
-    deps = ["@org_golang_x_sys//unix"],
-)
-
-go_test(
-    name = "loop_test",
-    srcs = ["loop_test.go"],
-    embed = [":loop"],
-    deps = [
-        "@com_github_stretchr_testify//assert",
-        "@com_github_stretchr_testify//require",
-        "@org_golang_x_sys//unix",
-    ],
-)
-
-ktest(
-    tester = ":loop_test",
-)
diff --git a/metropolis/pkg/loop/loop.go b/metropolis/pkg/loop/loop.go
deleted file mode 100644
index b302c0b..0000000
--- a/metropolis/pkg/loop/loop.go
+++ /dev/null
@@ -1,266 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package loop implements an interface to configure Linux loop devices.
-//
-// This package requires Linux 5.8 or higher because it uses the newer
-// LOOP_CONFIGURE ioctl, which is better-behaved and twice as fast as the old
-// approach. It doesn't support all of the cryptloop functionality as it has
-// been superseded by dm-crypt and has known vulnerabilities. It also doesn't
-// support on-the-fly reconfiguration of loop devices as this is rather
-// unusual, works only under very specific circumstances and would make the API
-// less clean.
-package loop
-
-import (
-	"errors"
-	"fmt"
-	"math/bits"
-	"os"
-	"sync"
-	"syscall"
-	"unsafe"
-
-	"golang.org/x/sys/unix"
-)
-
-// Lazily-initialized file descriptor for the control device /dev/loop-control
-// (singleton)
-var (
-	mutex         sync.Mutex
-	loopControlFd *os.File
-)
-
-const (
-	// LOOP_CONFIGURE from @linux//include/uapi/linux:loop.h
-	loopConfigure = 0x4C0A
-	// LOOP_MAJOR from @linux//include/uapi/linux:major.h
-	loopMajor = 7
-)
-
-// struct loop_config from @linux//include/uapi/linux:loop.h
-type loopConfig struct {
-	fd uint32
-	// blockSize is a power of 2 between 512 and os.Getpagesize(), defaults
-	// reasonably
-	blockSize uint32
-	info      loopInfo64
-	_reserved [64]byte
-}
-
-// struct loop_info64 from @linux//include/uapi/linux:loop.h
-type loopInfo64 struct {
-	device         uint64
-	inode          uint64
-	rdevice        uint64
-	offset         uint64 // used
-	sizeLimit      uint64 // used
-	number         uint32
-	encryptType    uint32
-	encryptKeySize uint32
-	flags          uint32   // Flags from Flag constant
-	filename       [64]byte // used
-	cryptname      [64]byte
-	encryptkey     [32]byte
-	init           [2]uint64
-}
-
-type Config struct {
-	// Block size of the loop device in bytes. Power of 2 between 512 and page
-	// size.  Zero defaults to an reasonable block size.
-	BlockSize uint32
-	// Combination of flags from the Flag constants in this package.
-	Flags uint32
-	// Offset in bytes from the start of the file to the first byte of the
-	// device. Usually zero.
-	Offset uint64
-	// Maximum size of the loop device in bytes. Zero defaults to the whole
-	// file.
-	SizeLimit uint64
-}
-
-func (c *Config) validate() error {
-	// Additional validation because of inconsistent kernel-side enforcement
-	if c.BlockSize != 0 {
-		if c.BlockSize < 512 || c.BlockSize > uint32(os.Getpagesize()) || bits.OnesCount32(c.BlockSize) > 1 {
-			return errors.New("BlockSize needs to be a power of two between 512 bytes and the OS page size")
-		}
-	}
-	return nil
-}
-
-// ensureFds lazily initializes control devices
-func ensureFds() (err error) {
-	mutex.Lock()
-	defer mutex.Unlock()
-	if loopControlFd != nil {
-		return
-	}
-	loopControlFd, err = os.Open("/dev/loop-control")
-	return
-}
-
-// Device represents a loop device.
-type Device struct {
-	num uint32
-	dev *os.File
-
-	closed bool
-}
-
-// All from @linux//include/uapi/linux:loop.h
-const (
-	// Makes the loop device read-only even if the backing file is read-write.
-	FlagReadOnly = 1
-	// Unbinds the backing file as soon as the last user is gone. Useful for
-	// unbinding after unmount.
-	FlagAutoclear = 4
-	// Enables kernel-side partition scanning on the loop device. Needed if you
-	// want to access specific partitions on a loop device.
-	FlagPartscan = 8
-	// Enables direct IO for the loop device, bypassing caches and buffer
-	// copying.
-	FlagDirectIO = 16
-)
-
-// Create creates a new loop device backed with the given file.
-func Create(f *os.File, c Config) (*Device, error) {
-	if err := c.validate(); err != nil {
-		return nil, err
-	}
-	if err := ensureFds(); err != nil {
-		return nil, fmt.Errorf("failed to access loop control device: %w", err)
-	}
-	for {
-		devNum, _, errno := syscall.Syscall(unix.SYS_IOCTL, loopControlFd.Fd(), unix.LOOP_CTL_GET_FREE, 0)
-		if errno != unix.Errno(0) {
-			return nil, fmt.Errorf("failed to allocate loop device: %w", os.NewSyscallError("ioctl(LOOP_CTL_GET_FREE)", errno))
-		}
-		dev, err := os.OpenFile(fmt.Sprintf("/dev/loop%v", devNum), os.O_RDWR|os.O_EXCL, 0)
-		var pe *os.PathError
-		if errors.As(err, &pe) && errors.Is(pe.Err, unix.EBUSY) {
-			// We have lost the race, get a new device
-			continue
-		}
-		if err != nil {
-			return nil, fmt.Errorf("failed to open newly-allocated loop device: %w", err)
-		}
-
-		var config loopConfig
-		config.fd = uint32(f.Fd())
-		config.blockSize = c.BlockSize
-		config.info.flags = c.Flags
-		config.info.offset = c.Offset
-		config.info.sizeLimit = c.SizeLimit
-
-		if _, _, err := syscall.Syscall(unix.SYS_IOCTL, dev.Fd(), loopConfigure, uintptr(unsafe.Pointer(&config))); err != 0 {
-			if err == unix.EBUSY {
-				// We have lost the race, get a new device
-				continue
-			}
-			return nil, os.NewSyscallError("ioctl(LOOP_CONFIGURE)", err)
-		}
-		return &Device{dev: dev, num: uint32(devNum)}, nil
-	}
-}
-
-// Open opens a loop device at the given path. It returns an error if the path
-// is not a loop device.
-func Open(path string) (*Device, error) {
-	potentialDevice, err := os.Open(path)
-	if err != nil {
-		return nil, fmt.Errorf("failed to open device: %w", err)
-	}
-	var loopInfo loopInfo64
-	_, _, errNo := syscall.Syscall(unix.SYS_IOCTL, potentialDevice.Fd(), unix.LOOP_GET_STATUS64, uintptr(unsafe.Pointer(&loopInfo)))
-	if errNo == syscall.Errno(0) {
-		return &Device{dev: potentialDevice, num: loopInfo.number}, nil
-	}
-	potentialDevice.Close()
-	if errNo == syscall.EINVAL {
-		return nil, errors.New("not a loop device")
-	}
-	return nil, fmt.Errorf("failed to determine state of potential loop device: %w", errNo)
-}
-
-func (d *Device) ensureOpen() error {
-	if d.closed {
-		return errors.New("device is closed")
-	}
-	return nil
-}
-
-// DevPath returns the canonical path of this loop device in /dev.
-func (d *Device) DevPath() (string, error) {
-	if err := d.ensureOpen(); err != nil {
-		return "", err
-	}
-	return fmt.Sprintf("/dev/loop%d", d.num), nil
-}
-
-// Dev returns the Linux device ID of the loop device.
-func (d *Device) Dev() (uint64, error) {
-	if err := d.ensureOpen(); err != nil {
-		return 0, err
-	}
-	return unix.Mkdev(loopMajor, d.num), nil
-}
-
-// BackingFilePath returns the path of the backing file
-func (d *Device) BackingFilePath() (string, error) {
-	backingFile, err := os.ReadFile(fmt.Sprintf("/sys/block/loop%d/loop/backing_file", d.num))
-	if err != nil {
-		return "", fmt.Errorf("failed to get backing file path: %w", err)
-	}
-	return string(backingFile), err
-}
-
-// RefreshSize recalculates the size of the loop device based on the config and
-// the size of the backing file.
-func (d *Device) RefreshSize() error {
-	if err := d.ensureOpen(); err != nil {
-		return err
-	}
-	return unix.IoctlSetInt(int(d.dev.Fd()), unix.LOOP_SET_CAPACITY, 0)
-}
-
-// Close closes all file descriptors open to the device. Does not remove the
-// device itself or alter its configuration.
-func (d *Device) Close() error {
-	if err := d.ensureOpen(); err != nil {
-		return err
-	}
-	d.closed = true
-	return d.dev.Close()
-}
-
-// Remove removes the loop device.
-func (d *Device) Remove() error {
-	if err := d.ensureOpen(); err != nil {
-		return err
-	}
-	err := unix.IoctlSetInt(int(d.dev.Fd()), unix.LOOP_CLR_FD, 0)
-	if err != nil {
-		return err
-	}
-	if err := d.Close(); err != nil {
-		return fmt.Errorf("failed to close device: %w", err)
-	}
-	if err := unix.IoctlSetInt(int(loopControlFd.Fd()), unix.LOOP_CTL_REMOVE, int(d.num)); err != nil {
-		return err
-	}
-	return nil
-}
diff --git a/metropolis/pkg/loop/loop_test.go b/metropolis/pkg/loop/loop_test.go
deleted file mode 100644
index 7f23f3e..0000000
--- a/metropolis/pkg/loop/loop_test.go
+++ /dev/null
@@ -1,209 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package loop
-
-import (
-	"encoding/binary"
-	"io"
-	"math"
-	"os"
-	"runtime"
-	"syscall"
-	"testing"
-	"unsafe"
-
-	"github.com/stretchr/testify/assert"
-	"github.com/stretchr/testify/require"
-	"golang.org/x/sys/unix"
-)
-
-// Write a test file with a very specific pattern (increasing little-endian 16
-// bit unsigned integers) to detect offset correctness. File is always 128KiB
-// large (2^16 * 2 bytes).
-func makeTestFile() *os.File {
-	f, err := os.CreateTemp("/tmp", "")
-	if err != nil {
-		panic(err)
-	}
-	for i := 0; i <= math.MaxUint16; i++ {
-		if err := binary.Write(f, binary.LittleEndian, uint16(i)); err != nil {
-			panic(err)
-		}
-	}
-	if _, err := f.Seek(0, io.SeekStart); err != nil {
-		panic(err)
-	}
-	return f
-}
-
-func getBlkdevSize(f *os.File) (size uint64) {
-	if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, f.Fd(), unix.BLKGETSIZE64, uintptr(unsafe.Pointer(&size))); err != 0 {
-		panic(err)
-	}
-	return
-}
-
-func getOffsetFromContent(dev *Device) (firstIndex uint16) {
-	if err := binary.Read(dev.dev, binary.LittleEndian, &firstIndex); err != nil {
-		panic(err)
-	}
-	firstIndex *= 2 // 2 bytes per index
-	return
-}
-
-func setupCreate(t *testing.T, config Config) *Device {
-	f := makeTestFile()
-	dev, err := Create(f, config)
-	defer f.Close()
-	assert.NoError(t, err)
-	t.Cleanup(func() {
-		if dev != nil {
-			dev.Remove()
-		}
-		os.Remove(f.Name())
-	})
-	if dev == nil {
-		t.FailNow()
-	}
-	return dev
-}
-
-func TestDeviceAccessors(t *testing.T) {
-	if os.Getenv("IN_KTEST") != "true" {
-		t.Skip("Not in ktest")
-	}
-	dev := setupCreate(t, Config{})
-
-	devPath, err := dev.DevPath()
-	assert.NoError(t, err)
-	require.Equal(t, "/dev/loop0", devPath)
-
-	var stat unix.Stat_t
-	assert.NoError(t, unix.Stat("/dev/loop0", &stat))
-	devNum, err := dev.Dev()
-	assert.NoError(t, err)
-	require.Equal(t, stat.Rdev, devNum)
-
-	backingFile, err := dev.BackingFilePath()
-	assert.NoError(t, err)
-	// The filename of the temporary file is not available in this context, but
-	// we know that the file needs to be in /tmp, which should be a good-enough
-	// test.
-	assert.Contains(t, backingFile, "/tmp/")
-}
-
-func TestCreate(t *testing.T) {
-	if os.Getenv("IN_KTEST") != "true" {
-		t.Skip("Not in ktest")
-	}
-	t.Parallel()
-	tests := []struct {
-		name     string
-		config   Config
-		validate func(t *testing.T, dev *Device)
-	}{
-		{"NoOpts", Config{}, func(t *testing.T, dev *Device) {
-			require.Equal(t, uint64(128*1024), getBlkdevSize(dev.dev))
-			require.Equal(t, uint16(0), getOffsetFromContent(dev))
-
-			_, err := dev.dev.WriteString("test")
-			assert.NoError(t, err)
-		}},
-		{"DirectIO", Config{Flags: FlagDirectIO}, func(t *testing.T, dev *Device) {
-			require.Equal(t, uint64(128*1024), getBlkdevSize(dev.dev))
-
-			_, err := dev.dev.WriteString("test")
-			assert.NoError(t, err)
-		}},
-		{"ReadOnly", Config{Flags: FlagReadOnly}, func(t *testing.T, dev *Device) {
-			_, err := dev.dev.WriteString("test")
-			assert.Error(t, err)
-		}},
-		{"Mapping", Config{BlockSize: 512, SizeLimit: 2048, Offset: 4096}, func(t *testing.T, dev *Device) {
-			assert.Equal(t, uint16(4096), getOffsetFromContent(dev))
-			assert.Equal(t, uint64(2048), getBlkdevSize(dev.dev))
-		}},
-	}
-	for _, test := range tests {
-		t.Run(test.name, func(t *testing.T) {
-			dev := setupCreate(t, test.config)
-			test.validate(t, dev)
-			assert.NoError(t, dev.Remove())
-		})
-	}
-}
-
-func TestOpenBadDevice(t *testing.T) {
-	if os.Getenv("IN_KTEST") != "true" {
-		t.Skip("Not in ktest")
-	}
-	dev, err := Open("/dev/null")
-	require.Error(t, err)
-	if dev != nil { // Prevent leaks in case this test fails
-		dev.Close()
-	}
-}
-
-func TestOpen(t *testing.T) {
-	if os.Getenv("IN_KTEST") != "true" {
-		t.Skip("Not in ktest")
-	}
-	f := makeTestFile()
-	defer os.Remove(f.Name())
-	defer f.Close()
-	dev, err := Create(f, Config{})
-	assert.NoError(t, err)
-	path, err := dev.DevPath()
-	assert.NoError(t, err)
-	assert.NoError(t, dev.Close())
-	reopenedDev, err := Open(path)
-	assert.NoError(t, err)
-	defer reopenedDev.Remove()
-	reopenedDevPath, err := reopenedDev.DevPath()
-	assert.NoError(t, err)
-	require.Equal(t, path, reopenedDevPath) // Still needs to be the same device
-}
-
-func TestResize(t *testing.T) {
-	if os.Getenv("IN_KTEST") != "true" {
-		t.Skip("Not in ktest")
-	}
-	f, err := os.CreateTemp("/tmp", "")
-	assert.NoError(t, err)
-	empty1K := make([]byte, 1024)
-	for i := 0; i < 64; i++ {
-		_, err := f.Write(empty1K)
-		assert.NoError(t, err)
-	}
-	dev, err := Create(f, Config{})
-	assert.NoError(t, err)
-	require.Equal(t, uint64(64*1024), getBlkdevSize(dev.dev))
-	for i := 0; i < 32; i++ {
-		_, err := f.Write(empty1K)
-		assert.NoError(t, err)
-	}
-	assert.NoError(t, f.Sync())
-	assert.NoError(t, dev.RefreshSize())
-	require.Equal(t, uint64(96*1024), getBlkdevSize(dev.dev))
-}
-
-func TestStructSize(t *testing.T) {
-	if runtime.GOOS != "linux" && runtime.GOARCH != "amd64" {
-		t.Skip("Reference value not available")
-	}
-	require.Equal(t, uintptr(304), unsafe.Sizeof(loopConfig{}))
-}
diff --git a/metropolis/pkg/msguid/BUILD.bazel b/metropolis/pkg/msguid/BUILD.bazel
deleted file mode 100644
index 9be5fdb..0000000
--- a/metropolis/pkg/msguid/BUILD.bazel
+++ /dev/null
@@ -1,19 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
-
-go_library(
-    name = "msguid",
-    srcs = ["msguid.go"],
-    importpath = "source.monogon.dev/metropolis/pkg/msguid",
-    visibility = ["//visibility:public"],
-    deps = ["@com_github_google_uuid//:uuid"],
-)
-
-go_test(
-    name = "msguid_test",
-    srcs = ["msguid_test.go"],
-    embed = [":msguid"],
-    deps = [
-        "@com_github_google_go_cmp//cmp",
-        "@com_github_google_uuid//:uuid",
-    ],
-)
diff --git a/metropolis/pkg/msguid/msguid.go b/metropolis/pkg/msguid/msguid.go
deleted file mode 100644
index 4074f8a..0000000
--- a/metropolis/pkg/msguid/msguid.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Package msguid provides functions to convert UUIDs/GUIDs to and from
-// Microsoft's idiosyncratic "mixed-endian" format.
-// See https://uefi.org/specs/UEFI/2.10/Apx_A_GUID_and_Time_Formats.html#text-representation-relationships-apxa-guid-and-time-formats
-// for an explanation of the format.
-package msguid
-
-import "github.com/google/uuid"
-
-var mixedEndianTranspose = []int{3, 2, 1, 0, 5, 4, 7, 6, 8, 9, 10, 11, 12, 13, 14, 15}
-
-// From converts from a standard UUID into its mixed-endian encoding.
-func From(u uuid.UUID) (o [16]byte) {
-	for dest, from := range mixedEndianTranspose {
-		o[dest] = u[from]
-	}
-	return
-}
-
-// To converts a mixed-endian-encoded UUID to its standard format.
-func To(i [16]byte) (o uuid.UUID) {
-	for from, dest := range mixedEndianTranspose {
-		o[dest] = i[from]
-	}
-	return
-}
diff --git a/metropolis/pkg/msguid/msguid_test.go b/metropolis/pkg/msguid/msguid_test.go
deleted file mode 100644
index b8cdfea..0000000
--- a/metropolis/pkg/msguid/msguid_test.go
+++ /dev/null
@@ -1,40 +0,0 @@
-package msguid
-
-import (
-	"testing"
-
-	"github.com/google/go-cmp/cmp"
-	"github.com/google/uuid"
-)
-
-func TestRoundTrip(t *testing.T) {
-	cases := []struct {
-		name     string
-		uuid     string
-		expected [16]byte
-	}{
-		{
-			"WikipediaExample1",
-			"00112233-4455-6677-8899-AABBCCDDEEFF",
-			[16]byte{
-				0x33, 0x22, 0x11, 0x00, 0x55, 0x44, 0x77, 0x66,
-				0x88, 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF,
-			},
-		},
-	}
-	for _, c := range cases {
-		t.Run(c.name, func(t *testing.T) {
-			origUUID := uuid.MustParse(c.uuid)
-			got := From(origUUID)
-			diff := cmp.Diff(c.expected, got)
-			if diff != "" {
-				t.Fatalf("To(%q) returned unexpected result: %v", origUUID, diff)
-			}
-			back := To(got)
-			diff2 := cmp.Diff(origUUID, back)
-			if diff2 != "" {
-				t.Errorf("From(To(%q)) did not return original value: %v", origUUID, diff2)
-			}
-		})
-	}
-}
diff --git a/metropolis/pkg/nvme/BUILD.bazel b/metropolis/pkg/nvme/BUILD.bazel
deleted file mode 100644
index 6a2438b..0000000
--- a/metropolis/pkg/nvme/BUILD.bazel
+++ /dev/null
@@ -1,33 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
-
-go_library(
-    name = "nvme",
-    srcs = [
-        "cmd_linux.go",
-        "cmd_unsupported.go",
-        "error.go",
-        "format.go",
-        "health.go",
-        "identify.go",
-        "nvme.go",
-        "selftest.go",
-        "uint128le.go",
-    ],
-    importpath = "source.monogon.dev/metropolis/pkg/nvme",
-    visibility = ["//visibility:public"],
-    deps = select({
-        "@io_bazel_rules_go//go/platform:android": [
-            "@org_golang_x_sys//unix",
-        ],
-        "@io_bazel_rules_go//go/platform:linux": [
-            "@org_golang_x_sys//unix",
-        ],
-        "//conditions:default": [],
-    }),
-)
-
-go_test(
-    name = "nvme_test",
-    srcs = ["struct_test.go"],
-    embed = [":nvme"],
-)
diff --git a/metropolis/pkg/nvme/cmd_linux.go b/metropolis/pkg/nvme/cmd_linux.go
deleted file mode 100644
index 96054ff..0000000
--- a/metropolis/pkg/nvme/cmd_linux.go
+++ /dev/null
@@ -1,110 +0,0 @@
-//go:build linux
-
-package nvme
-
-import (
-	"errors"
-	"fmt"
-	"math"
-	"runtime"
-	"unsafe"
-
-	"golang.org/x/sys/unix"
-)
-
-// From @linux//include/uapi/linux/nvme_ioctl.h
-const (
-	nvmeIoctlAdminCmd = 0xC0484E41 // _IOWR('N', 0x41, sizeof cmd)
-)
-
-// From @linux//include/uapi/linux/nvme_ioctl.h
-type passthruCmd struct {
-	// Corresponding to Figure 88
-	opcode      uint8
-	flags       uint8
-	rsvd1       uint16
-	nsid        uint32
-	cdw2        uint32
-	cdw3        uint32
-	metadata    uint64
-	addr        uint64
-	metadataLen uint32
-	dataLen     uint32
-	cdw10       uint32
-	cdw11       uint32
-	cdw12       uint32
-	cdw13       uint32
-	cdw14       uint32
-	cdw15       uint32
-
-	// Linux ioctl-specific
-	timeoutMs uint32
-	result    uint32
-}
-
-// RawCommand runs a raw command on the NVMe device.
-// Please note that depending on the payload this can be very dangerous and can
-// cause data loss or even firmware issues.
-func (d *Device) RawCommand(cmd *Command) error {
-	conn, err := d.fd.SyscallConn()
-	if err != nil {
-		return fmt.Errorf("unable to get RawConn: %w", err)
-	}
-	cmdRaw := passthruCmd{
-		opcode:    cmd.Opcode,
-		flags:     cmd.Flags,
-		nsid:      cmd.NamespaceID,
-		cdw2:      cmd.CDW2,
-		cdw3:      cmd.CDW3,
-		cdw10:     cmd.CDW10,
-		cdw11:     cmd.CDW11,
-		cdw12:     cmd.CDW12,
-		cdw13:     cmd.CDW13,
-		cdw14:     cmd.CDW14,
-		cdw15:     cmd.CDW15,
-		timeoutMs: uint32(cmd.Timeout.Milliseconds()),
-	}
-	var ioctlPins runtime.Pinner
-	defer ioctlPins.Unpin()
-	if cmd.Data != nil {
-		if len(cmd.Data) > math.MaxUint32 {
-			return errors.New("data buffer larger than uint32, this is unsupported")
-		}
-		ioctlPins.Pin(&cmd.Data[0])
-		cmdRaw.dataLen = uint32(len(cmd.Data))
-		cmdRaw.addr = uint64(uintptr(unsafe.Pointer(&cmd.Data[0])))
-	}
-	if cmd.Metadata != nil {
-		if len(cmd.Metadata) > math.MaxUint32 {
-			return errors.New("metadata buffer larger than uint32, this is unsupported")
-		}
-		ioctlPins.Pin(&cmd.Metadata[0])
-		cmdRaw.metadataLen = uint32(len(cmd.Metadata))
-		cmdRaw.metadata = uint64(uintptr(unsafe.Pointer(&cmd.Metadata[0])))
-	}
-	var errno unix.Errno
-	var status uintptr
-	err = conn.Control(func(fd uintptr) {
-		status, _, errno = unix.Syscall(unix.SYS_IOCTL, fd, nvmeIoctlAdminCmd, uintptr(unsafe.Pointer(&cmdRaw)))
-	})
-	runtime.KeepAlive(cmdRaw)
-	runtime.KeepAlive(cmd.Data)
-	runtime.KeepAlive(cmd.Metadata)
-	if err != nil {
-		return fmt.Errorf("unable to get fd: %w", err)
-	}
-	if errno != 0 {
-		return errno
-	}
-	var commandErr Error
-	commandErr.DoNotRetry = status&(1<<15) != 0            // Bit 31
-	commandErr.More = status&(1<<14) != 0                  // Bit 30
-	commandErr.StatusCodeType = uint8((status >> 8) & 0x7) // Bits 27:25
-	commandErr.StatusCode = uint8(status & 0xff)           // Bits 24:17
-	// The only success status is in the generic status code set with value 0
-	if commandErr.StatusCodeType != StatusCodeTypeGeneric ||
-		commandErr.StatusCode != 0 {
-		return commandErr
-	}
-	return nil
-}
diff --git a/metropolis/pkg/nvme/cmd_unsupported.go b/metropolis/pkg/nvme/cmd_unsupported.go
deleted file mode 100644
index 747a33d..0000000
--- a/metropolis/pkg/nvme/cmd_unsupported.go
+++ /dev/null
@@ -1,12 +0,0 @@
-//go:build !linux
-
-package nvme
-
-import (
-	"fmt"
-	"runtime"
-)
-
-func (d *Device) RawCommand(cmd *Command) error {
-	return fmt.Errorf("NVMe command interface unimplemented for %v", runtime.GOOS)
-}
diff --git a/metropolis/pkg/nvme/error.go b/metropolis/pkg/nvme/error.go
deleted file mode 100644
index 8c4a207..0000000
--- a/metropolis/pkg/nvme/error.go
+++ /dev/null
@@ -1,136 +0,0 @@
-package nvme
-
-import "fmt"
-
-// Figure 31 in the spec
-var genericStatusCodeDesc = map[uint8]string{
-	0x00: "successful completion",
-	0x01: "invalid command opcode",
-	0x02: "invalid field in command",
-	0x03: "command ID conflict",
-	0x04: "data transfer error",
-	0x05: "command aborted due power loss notification",
-	0x06: "internal error",
-	0x07: "command abort requested",
-	0x08: "command abort due to SQ deletion",
-	0x09: "command abort due to failed fused command",
-	0x0a: "command abort due to missing fused command",
-	0x0b: "invalid namespace or format",
-	0x0c: "command sequence error",
-	0x0d: "invalid SGL segment descriptor",
-	0x0e: "invalid number of SGL descriptors",
-	0x0f: "data SGL length invalid",
-	0x10: "metadata SGL length invalid",
-	0x11: "SGL descriptor type invalid",
-	0x12: "invalid use of controller memory buffer",
-	0x13: "PRP offset invalid",
-	0x14: "atomic write unit exceeded",
-	0x15: "operation denied",
-	0x16: "SGL offset invalid",
-	0x18: "host identifer inconsistent format",
-	0x19: "keep alive timeout expired",
-	0x1a: "keep alive timeout invalid",
-	0x1b: "command aborted due to preempt and abort",
-	0x1c: "sanitize failed",
-	0x1d: "sanitize in progress",
-	0x1e: "SGL data block granularity invalid",
-	0x1f: "command not supported for queue in CMB",
-
-	// Figure 32
-	0x80: "LBA out of range",
-	0x81: "capacity exceeded",
-	0x82: "namespace not ready",
-	0x83: "reservation conflict",
-	0x84: "format in progress",
-}
-
-// Figure 33 in the spec
-var commandSpecificStatusCodeDesc = map[uint8]string{
-	0x00: "completion queue invalid",
-	0x01: "invalid queue identifier",
-	0x02: "invalid queue size",
-	0x03: "abort command limit exceeded",
-	0x05: "asynchronous event request limit exceeded",
-	0x06: "invalid firmware slot",
-	0x07: "invalid firmware image",
-	0x08: "invalid interrupt vector",
-	0x09: "invalid log page",
-	0x0a: "invalid format",
-	0x0b: "firmware activation requires conventional reset",
-	0x0c: "invalid queue deletion",
-	0x0d: "feature identifier not saveable",
-	0x0e: "feature not changeable",
-	0x0f: "feature not namespace-specific",
-	0x10: "firmware activation requires NVM subsystem reset",
-	0x11: "firmware activation requires reset",
-	0x12: "firmware activation requires maximum time violation",
-	0x13: "firmware activation prohibited",
-	0x14: "overlapping range",
-	0x15: "namespace insufficient capacity",
-	0x16: "namespace identifier unavailable",
-	0x18: "namespace already attached",
-	0x19: "namespace is private",
-	0x1a: "namespace is not attached",
-	0x1b: "thin provisioning not supported",
-	0x1c: "controller list invalid",
-	0x1d: "device self-test in progress",
-	0x1e: "boot partition write prohibited",
-	0x1f: "invalid controller identifier",
-	0x20: "invalid secondary controller state",
-	0x21: "invalid number of controller resources",
-	0x22: "invalid resource identifier",
-
-	// Figure 34
-	0x80: "conflicting attributes",
-	0x81: "invalid protection information",
-	0x82: "attempted to write to read-only range",
-}
-
-// Figure 36
-var mediaAndDataIntegrityStatusCodeDesc = map[uint8]string{
-	0x80: "write fault",
-	0x81: "unrecovered read error",
-	0x82: "end-to-end guard check error",
-	0x83: "end-to-end application tag check error",
-	0x84: "end-to-end reference tag check error",
-	0x85: "compare failure",
-	0x86: "access denied",
-	0x87: "deallocated or unwritten logical block",
-}
-
-const (
-	StatusCodeTypeGeneric               = 0x0
-	StatusCodeTypeCommandSpecific       = 0x1
-	StatusCodeTypeMediaAndDataIntegrity = 0x2
-)
-
-// Error represents an error returned by the NVMe device in the form of a
-// NVMe Status Field (see also Figure 29 in the spec).
-type Error struct {
-	DoNotRetry     bool
-	More           bool
-	StatusCodeType uint8
-	StatusCode     uint8
-}
-
-func (e Error) Error() string {
-	switch e.StatusCodeType {
-	case StatusCodeTypeGeneric:
-		if errStr, ok := genericStatusCodeDesc[e.StatusCode]; ok {
-			return errStr
-		}
-		return fmt.Sprintf("unknown error with generic code 0x%x", e.StatusCode)
-	case StatusCodeTypeCommandSpecific:
-		if errStr, ok := commandSpecificStatusCodeDesc[e.StatusCode]; ok {
-			return errStr
-		}
-		return fmt.Sprintf("unknown error with command-specific code 0x%x", e.StatusCode)
-	case StatusCodeTypeMediaAndDataIntegrity:
-		if errStr, ok := mediaAndDataIntegrityStatusCodeDesc[e.StatusCode]; ok {
-			return errStr
-		}
-		return fmt.Sprintf("unknown error with media and data integrity code 0x%x", e.StatusCode)
-	default:
-		return fmt.Sprintf("unknown error with unknown type 0x%x and code 0x%x", e.StatusCodeType, e.StatusCode)
-	}
-}
diff --git a/metropolis/pkg/nvme/format.go b/metropolis/pkg/nvme/format.go
deleted file mode 100644
index 8bde44a..0000000
--- a/metropolis/pkg/nvme/format.go
+++ /dev/null
@@ -1,75 +0,0 @@
-package nvme
-
-// SecureEraseType specifices what type of secure erase should be performed by
-// by the controller. The zero value requests no secure erase.
-type SecureEraseType uint8
-
-const (
-	// SecureEraseTypeNone specifies that no secure erase operation is
-	// requested.
-	SecureEraseTypeNone SecureEraseType = 0
-	// SecureEraseTypeUserData specifies that all user data should be securely
-	// erased. The controller is allowed to perform a cryptographic erase
-	// instead.
-	SecureEraseTypeUserData SecureEraseType = 1
-	// SecureEraseTypeCryptographic specifies that the encryption key for user
-	// data should be erased. This in turn causes all current user data to
-	// become unreadable.
-	SecureEraseTypeCryptographic SecureEraseType = 2
-)
-
-// ProtectionInformationType selects the type of end-to-end protection tags to
-// use. NVMe supports the same types as T10 DIF (SCSI).
-type ProtectionInformationType uint8
-
-const (
-	ProtectionInformationTypeNone ProtectionInformationType = 0
-	ProtectionInformationType1    ProtectionInformationType = 1
-	ProtectionInformationType2    ProtectionInformationType = 2
-	ProtectionInformationType3    ProtectionInformationType = 3
-)
-
-type FormatRequest struct {
-	// NamespaceID contains the ID of the namespace to format.
-	// NamespaceGlobal formats all namespaces.
-	NamespaceID uint32
-	// SecureEraseSettings specifies the type of secure erase to perform.
-	SecureEraseSettings SecureEraseType
-	// ProtectionInformationLocation selects where protection information is
-	// transmitted. If true, it is transmitted as the first 8 bytes of metadata.
-	// If false, it is transmitted as the last 8 bytes of metadata.
-	ProtectionInformationLocation bool
-	// ProtectionInformation specifies the type of T10 DIF Protection
-	// Information to use.
-	ProtectionInformation ProtectionInformationType
-	// MetadataInline selects whether metadata is transferred as part of an
-	// extended data LBA. If false, metadata is returned in a separate buffer.
-	// If true, metadata is appended to the data buffer.
-	MetadataInline bool
-	// LBAFormat specifies the LBA format to use. This needs to be selected
-	// from the list of supported LBA formats in the Identify response.
-	LBAFormat uint8
-}
-
-// Format performs a low-level format of the NVM media. This is used for
-// changing the block and/or metadata size. This command causes all data
-// on the specified namespace to be lost. By setting SecureEraseSettings
-// to the appropriate value it can also be used to securely erase data.
-// See also the Sanitize command for just wiping the device.
-func (d *Device) Format(req *FormatRequest) error {
-	var cdw10 uint32
-	cdw10 |= uint32(req.SecureEraseSettings&0x7) << 9
-	cdw10 |= uint32(req.ProtectionInformation&0x7) << 5
-	cdw10 |= uint32(req.LBAFormat & 0x7)
-	if req.ProtectionInformationLocation {
-		cdw10 |= 1 << 8
-	}
-	if req.MetadataInline {
-		cdw10 |= 1 << 4
-	}
-	return d.RawCommand(&Command{
-		Opcode:      0x80,
-		NamespaceID: req.NamespaceID,
-		CDW10:       cdw10,
-	})
-}
diff --git a/metropolis/pkg/nvme/health.go b/metropolis/pkg/nvme/health.go
deleted file mode 100644
index 775742f..0000000
--- a/metropolis/pkg/nvme/health.go
+++ /dev/null
@@ -1,196 +0,0 @@
-package nvme
-
-import (
-	"bytes"
-	"encoding/binary"
-	"fmt"
-	"math/big"
-	"time"
-)
-
-// healthPage represents the raw data from a NVMe Health/SMART page.
-// See Figure 93 in the spec.
-type healthPage struct {
-	CriticalWarning         uint8
-	CompositeTemperature    uint16
-	AvailableSpare          uint8
-	AvailableSpareThreshold uint8
-	PercentageUsed          uint8
-
-	_ [26]byte
-
-	DataUnitsRead               uint128le
-	DataUnitsWritten            uint128le
-	HostReadCommands            uint128le
-	HostWriteCommands           uint128le
-	ControllerBusyTime          uint128le
-	PowerCycles                 uint128le
-	PowerOnHours                uint128le
-	UnsafeSHutdowns             uint128le
-	MediaAndDataIntegrityErrors uint128le
-	ErrorInformationLogEntries  uint128le
-
-	WarningCompositeTemperatureTime  uint32
-	CriticalCompositeTemperatureTime uint32
-
-	TemperatureSensors [8]uint16
-
-	ThermalMgmtTemperature1TransitionCount uint32
-	ThermalMgmtTemperature2TransitionCount uint32
-
-	_ [8]byte
-
-	TotalTimeForThermalMgmtTemperature1 uint32
-	TotalTimeForThermalMgmtTemperature2 uint32
-}
-
-// HealthInfo contains information related to the health of the NVMe device.
-//
-// Note that some values might be clamped under highly abnormal circumstances
-// as they are reported as 128-bit integers which Go doesn't support.
-// For easier handling values which are very unlikely to exceed 64 bits are
-// exposed as 64 bit integers.
-type HealthInfo struct {
-	// AvailableSpareSpaceCritical is set if the avilable spare threshold has
-	// fallen below the critical threshold.
-	AvailableSpareSpaceCritical bool
-	// TemperatureCritical is set if a temperature is outside the acceptable
-	// operating thresholds.
-	TemperatureCritical bool
-	// MediaCritical is set if significant media or internal issues affect the
-	// operation of the device.
-	MediaCritical bool
-	// ForcedReadOnly is set if the device is forced into read-only mode due
-	// to an error.
-	ForcedReadOnly bool
-	// VolatileMemoryBackupFailed is set if the volatile memory backup device
-	// has failed.
-	VolatileMemoryBackupFailed bool
-	// CompositeTemperatureKelvin contains a derived value representing the
-	// composite state of controller and namespace/flash temperature.
-	// The exact mechanism used to derive it is vendor-specific.
-	CompositeTemperatureKelvin uint16
-	// AvailableSpare represents the relative amount (0-1) of spare capacity
-	// still unnused.
-	AvailableSpare float32
-	// AvailableSpareThreshold represents the vendor-defined threshold which
-	// AvailableSpare shuld not fall under.
-	AvailableSpareThreshold float32
-	// LifeUsed represents vendor-defined relative estimate of the life of
-	// the device which has been used up. It is allowed to exceed 1 and will
-	// be clamped by the device somewhere between 1.0 and 2.55.
-	LifeUsed float32
-	// BytesRead contains the number of bytes read from the device.
-	// This value is only updated in 512KiB increments.
-	BytesRead *big.Int
-	// BytesWritten contains the number of bytes written to the device.
-	// This value is only updated in 512KiB increments.
-	BytesWritten *big.Int
-	// HostReadCommands contains the number of read commands completed by the
-	// controller.
-	HostReadCommands *big.Int
-	// HostWriteCommands contains the number of write commands completed by the
-	// controller.
-	HostWriteCommands *big.Int
-	// ControllerBusyTime contains the cumulative amount of time the controller
-	// has spent being busy (i.e. having at least one command outstanding on an
-	// I/O queue). This value is only updated in 1m increments.
-	ControllerBusyTime time.Duration
-	// PowerCycles contains the number of power cycles.
-	PowerCycles uint64
-	// PowerOnHours contains the number of hours the controller has been
-	// powered on. Depending on the vendor implementation it may or may
-	// not contain time spent in a non-operational power state.
-	PowerOnHours uint64
-	// UnsafeShutdown contains the number of power loss events without
-	// a prior shutdown notification from the host.
-	UnsafeShutdowns uint64
-	// MediaAndDataIntegrityErrors contains the number of occurrences where the
-	// controller detecte an unrecovered data integrity error.
-	MediaAndDataIntegrityErrors uint64
-	// ErrorInformationLogEntriesCount contains the number of Error
-	// Information log entries over the life of the controller.
-	ErrorInformationLogEntriesCount uint64
-	// WarningCompositeTemperatureTime contains the amount of time the
-	// controller is operational while the composite temperature is greater
-	// than the warning composite threshold.
-	WarningCompositeTemperatureTime time.Duration
-	// CriticalCompositeTemperatureTime contains the amount of time the
-	// controller is operational while the composite temperature is greater
-	// than the critical composite threshold.
-	CriticalCompositeTemperatureTime time.Duration
-	// TemperatureSensorValues contains the current temperature in Kelvin as
-	// reported by up to 8 sensors on the device. A value of zero means that
-	// the given sensor is not available.
-	TemperatureSensorValues [8]uint16
-	// ThermalMgmtTemperature1TransitionCount contains the number of times the
-	// controller transitioned to lower power active power states or performed
-	// vendor-specific thermal management actions to reduce temperature.
-	ThermalMgmtTemperature1TransitionCount uint32
-	// ThermalMgmtTemperature2TransitionCount is the same as above, but
-	// for "heavier" thermal management actions including heavy throttling.
-	// The actual difference is vendor-specific.
-	ThermalMgmtTemperature2TransitionCount uint32
-	// TotalTimeForThermalMgmtTemperature1 contains the total time the
-	// controller spent under "light" thermal management.
-	TotalTimeForThermalMgmtTemperature1 time.Duration
-	// TotalTimeForThermalMgmtTemperature2 contains the total time the
-	// controller spent under "heavy" thermal management.
-	TotalTimeForThermalMgmtTemperature2 time.Duration
-}
-
-// HasCriticalWarning returns true if any of the critical warnings
-// (AvailableSpareSpaceCritical, TemperatureCritical, MediaCritical,
-// ForcedReadOnly, VolatileMemoryBackupFailed) are active.
-// If this returns true the NVMe medium has reason to believe that
-// data availability or integrity is endangered.
-func (h *HealthInfo) HasCriticalWarning() bool {
-	return h.AvailableSpareSpaceCritical || h.TemperatureCritical || h.MediaCritical || h.ForcedReadOnly || h.VolatileMemoryBackupFailed
-}
-
-// See Figure 93 Data Units Read
-var dataUnit = big.NewInt(512 * 1000)
-
-const (
-	healthLogPage = 0x02
-)
-
-// GetHealthInfo gets health information from the NVMe device's health log page.
-func (d *Device) GetHealthInfo() (*HealthInfo, error) {
-	var buf [512]byte
-
-	if err := d.GetLogPage(GlobalNamespace, healthLogPage, 0, 0, buf[:]); err != nil {
-		return nil, fmt.Errorf("unable to get health log page: %w", err)
-	}
-
-	var page healthPage
-	binary.Read(bytes.NewReader(buf[:]), binary.LittleEndian, &page)
-	var res HealthInfo
-	res.AvailableSpareSpaceCritical = page.CriticalWarning&(1<<0) != 0
-	res.TemperatureCritical = page.CriticalWarning&(1<<1) != 0
-	res.MediaCritical = page.CriticalWarning&(1<<2) != 0
-	res.ForcedReadOnly = page.CriticalWarning&(1<<3) != 0
-	res.VolatileMemoryBackupFailed = page.CriticalWarning&(1<<4) != 0
-	res.CompositeTemperatureKelvin = page.CompositeTemperature
-	res.AvailableSpare = float32(page.AvailableSpare) / 100.
-	res.AvailableSpareThreshold = float32(page.AvailableSpareThreshold) / 100.
-	res.LifeUsed = float32(page.PercentageUsed) / 100.
-	res.BytesRead = new(big.Int).Mul(page.DataUnitsRead.BigInt(), dataUnit)
-	res.BytesWritten = new(big.Int).Mul(page.DataUnitsWritten.BigInt(), dataUnit)
-	res.HostReadCommands = page.HostReadCommands.BigInt()
-	res.HostWriteCommands = page.HostWriteCommands.BigInt()
-	res.ControllerBusyTime = time.Duration(page.ControllerBusyTime.Uint64()) * time.Minute
-	res.PowerCycles = page.PowerCycles.Uint64()
-	res.PowerOnHours = page.PowerOnHours.Uint64()
-	res.UnsafeShutdowns = page.UnsafeSHutdowns.Uint64()
-	res.MediaAndDataIntegrityErrors = page.MediaAndDataIntegrityErrors.Uint64()
-	res.ErrorInformationLogEntriesCount = page.ErrorInformationLogEntries.Uint64()
-	res.WarningCompositeTemperatureTime = time.Duration(page.WarningCompositeTemperatureTime) * time.Minute
-	res.CriticalCompositeTemperatureTime = time.Duration(page.CriticalCompositeTemperatureTime) * time.Minute
-	res.TemperatureSensorValues = page.TemperatureSensors
-	res.ThermalMgmtTemperature1TransitionCount = page.ThermalMgmtTemperature1TransitionCount
-	res.ThermalMgmtTemperature2TransitionCount = page.ThermalMgmtTemperature2TransitionCount
-	res.TotalTimeForThermalMgmtTemperature1 = time.Duration(page.TotalTimeForThermalMgmtTemperature1) * time.Second
-	res.TotalTimeForThermalMgmtTemperature2 = time.Duration(page.TotalTimeForThermalMgmtTemperature2) * time.Second
-	return &res, nil
-}
diff --git a/metropolis/pkg/nvme/identify.go b/metropolis/pkg/nvme/identify.go
deleted file mode 100644
index 218d089..0000000
--- a/metropolis/pkg/nvme/identify.go
+++ /dev/null
@@ -1,193 +0,0 @@
-package nvme
-
-import (
-	"bytes"
-	"encoding/binary"
-	"fmt"
-	"math/big"
-)
-
-// Figure 109
-type identifyData struct {
-	// Controller Capabilities and Features
-	PCIVendorID                 uint16
-	PCISubsystemVendorID        uint16
-	SerialNumber                [20]byte
-	ModelNumber                 [40]byte
-	FirmwareRevision            [8]byte
-	RecommendedArbitrationBurst uint8
-	IEEEOUI                     [3]byte
-	CMIC                        uint8
-	MaximumDataTransferSize     uint8
-	ControllerID                uint16
-	Version                     uint32
-	RuntimeD3ResumeLatency      uint32
-	RuntimeD3EntryLatency       uint32
-	OAES                        uint32
-	CTRATT                      uint32
-	_                           [12]byte
-	FRUGUID                     [16]byte
-	_                           [128]byte
-	// Admin Command Set Attributes & Optional Controller Capabilities
-	OACS                                uint16
-	AbortCommandLimit                   uint8
-	AsynchronousEventRequestLimit       uint8
-	FRMW                                uint8
-	LPA                                 uint8
-	ErrorLogPageEntries                 uint8
-	NumberOfPowerStatesSupport          uint8
-	AdminVendorSpecificCmdConfig        uint8
-	AutonomousPowerStateTransitionAttrs uint8
-	WarningCompositeTempThreshold       uint16
-	CriticalCompositeTempThreshold      uint16
-	MaximumTimeForFirmwareActivation    uint16
-	HostMemoryBufferPreferredSize       uint32
-	HostMemoryBufferMinimumSize         uint32
-	TotalNVMCapacity                    uint128le
-	UnallocatedNVMCapacity              uint128le
-	ReplyProtectedMemoryBlockSupport    uint32
-	ExtendedDeviceSelfTestTime          uint16
-	DeviceSelfTestOptions               uint8
-	FirmwareUpdateGranularity           uint8
-	KeepAliveSupport                    uint16
-	HostControlledThermalMgmtAttrs      uint16
-	MinimumThermalMgmntTemp             uint16
-	MaximumThermalMgmntTemp             uint16
-	SanitizeCapabilities                uint32
-	_                                   [180]byte
-	// NVM Command Set Attributes
-	SubmissionQueueEntrySize       uint8
-	CompletionQueueEntrySize       uint8
-	MaximumOutstandingCommands     uint16
-	NumberOfNamespaces             uint32
-	OptionalNVMCommandSupport      uint16
-	FusedOperationSupport          uint16
-	FormatNVMAttributes            uint8
-	VolatileWriteCache             uint8
-	AtomicWriteUnitNormal          uint16
-	AtomicWriteUnitPowerFail       uint16
-	NVMVendorSepcificCommandConfig uint8
-	AtomicCompareAndWriteUnit      uint16
-	_                              [2]byte
-	SGLSupport                     uint32
-	_                              [228]byte
-	NVMSubsystemNVMeQualifiedName  [256]byte
-	_                              [1024]byte
-	// Power State Descriptors
-	PowerStateDescriptors [32][32]byte
-}
-
-// IdentifyData contains various identifying information about a NVMe
-// controller. Because the actual data structure is very large, currently not
-// all fields are exposed as properly-typed individual fields. If you need
-// a new field, please add it to this structure.
-type IdentifyData struct {
-	// PCIVendorID contains the company vendor identifier assigned by the PCI
-	// SIG.
-	PCIVendorID uint16
-	// PCISubsystemVendorID contains the company vendor identifier that is
-	// assigned by the PCI SIG for the subsystem.
-	PCISubsystemVendorID uint16
-	// SerialNumber contains the serial number for the NVM subsystem that is
-	// assigned by the vendor.
-	SerialNumber string
-	// ModelNumber contains the model number for the NVM subsystem that is
-	// assigned by the vendor.
-	ModelNumber string
-	// FirmwareRevision contains the currently active firmware revision for the
-	// NVM subsystem.
-	FirmwareRevision string
-	// IEEEOUI contains the Organization Unique Identifier for the controller
-	// vendor as assigned by the IEEE.
-	IEEEOUI [3]byte
-
-	// IsPCIVirtualFunction indicates if the controller is a virtual controller
-	// as part of a PCI virtual function.
-	IsPCIVirtualFunction bool
-
-	// SpecVersionMajor/Minor contain the version of the NVMe specification the
-	// controller supports. Only mandatory from spec version 1.2 onwards.
-	SpecVersionMajor uint16
-	SpecVersionMinor uint8
-
-	// FRUGloballyUniqueIdentifier contains a 128-bit value that is globally
-	// unique for a given Field Replaceable Unit (FRU). Contains all-zeroes if
-	// unavailable.
-	FRUGloballyUniqueIdentifier [16]byte
-	// VirtualizationManagementSupported indicates if the controller
-	// supports the Virtualization Management command.
-	VirtualizationManagementSupported bool
-	// NVMeMISupported indicates if the controller supports the NVMe-MI
-	// Send and Receive commands.
-	NVMeMISupported bool
-	// DirectivesSupported indicates if the controller supports the
-	// Directive Send and Receive commands.
-	DirectivesSupported bool
-	// SelfTestSupported indicates if the controller supports the Device Self-
-	// test command.
-	SelfTestSupported bool
-	// NamespaceManagementSupported indicates if the controller supports the
-	// Namespace Management and Attachment commands.
-	NamespaceManagementSupported bool
-	// FirmwareUpdateSupported indicates if the controller supports the
-	// Firmware Commit and Image Download commands.
-	FirmwareUpdateSupported bool
-	// FormattingSupported indicates if the controller supports the Format
-	// command.
-	FormattingSupported bool
-	// SecuritySupported indicates if the controller supports the Security Send
-	// and Receive commands.
-	SecuritySupported bool
-
-	// TotalNVMCapacity contains the total NVM capacity in bytes in the NVM
-	// subsystem. This can be 0 on devices without NamespaceManagementSupported.
-	TotalNVMCapacity *big.Int
-	// UnallocatedNVMCapacity contains the unallocated NVM capacity in bytes in
-	// the NVM subsystem. This can be 0 on devices without
-	// NamespaceManagementSupported.
-	UnallocatedNVMCapacity *big.Int
-
-	// MaximumNumberOfNamespace defines the maximum number of namespaces
-	// supported by the controller.
-	MaximumNumberOfNamespaces uint32
-}
-
-func (d *Device) Identify() (*IdentifyData, error) {
-	var resp [4096]byte
-
-	if err := d.RawCommand(&Command{
-		Opcode: 0x06,
-		Data:   resp[:],
-		CDW10:  1,
-	}); err != nil {
-		return nil, fmt.Errorf("Identify command failed: %w", err)
-	}
-	var raw identifyData
-	binary.Read(bytes.NewReader(resp[:]), binary.LittleEndian, &raw)
-
-	var res IdentifyData
-	res.PCIVendorID = raw.PCIVendorID
-	res.PCISubsystemVendorID = raw.PCISubsystemVendorID
-	res.SerialNumber = string(bytes.TrimRight(raw.SerialNumber[:], " "))
-	res.ModelNumber = string(bytes.TrimRight(raw.ModelNumber[:], " "))
-	res.FirmwareRevision = string(bytes.TrimRight(raw.FirmwareRevision[:], " "))
-	// OUIs are traditionally big-endian, but NVMe exposes them in little-endian
-	res.IEEEOUI[0], res.IEEEOUI[1], res.IEEEOUI[2] = raw.IEEEOUI[2], raw.IEEEOUI[1], raw.IEEEOUI[0]
-	res.IsPCIVirtualFunction = raw.CMIC&(1<<2) != 0
-	res.SpecVersionMajor = uint16(raw.Version >> 16)
-	res.SpecVersionMinor = uint8((raw.Version >> 8) & 0xFF)
-	res.FRUGloballyUniqueIdentifier = raw.FRUGUID
-	res.VirtualizationManagementSupported = raw.OACS&(1<<7) != 0
-	res.NVMeMISupported = raw.OACS&(1<<6) != 0
-	res.DirectivesSupported = raw.OACS&(1<<5) != 0
-	res.SelfTestSupported = raw.OACS&(1<<4) != 0
-	res.NamespaceManagementSupported = raw.OACS&(1<<3) != 0
-	res.FirmwareUpdateSupported = raw.OACS&(1<<2) != 0
-	res.FormattingSupported = raw.OACS&(1<<1) != 0
-	res.SecuritySupported = raw.OACS&(1<<0) != 0
-
-	res.TotalNVMCapacity = raw.TotalNVMCapacity.BigInt()
-	res.UnallocatedNVMCapacity = raw.UnallocatedNVMCapacity.BigInt()
-	res.MaximumNumberOfNamespaces = raw.NumberOfNamespaces
-	return &res, nil
-}
diff --git a/metropolis/pkg/nvme/nvme.go b/metropolis/pkg/nvme/nvme.go
deleted file mode 100644
index f46546d..0000000
--- a/metropolis/pkg/nvme/nvme.go
+++ /dev/null
@@ -1,79 +0,0 @@
-// Package nvme provides methods and data structures for issuing commands to
-// device speaking the NVMe protocol.
-// This package is written against the NVMe Specification Revision 1.3 and
-// all references to figures or other parts of the spec refer to this version.
-package nvme
-
-import (
-	"errors"
-	"fmt"
-	"os"
-	"syscall"
-	"time"
-)
-
-// Device is a handle for a NVMe device.
-type Device struct {
-	fd syscall.Conn
-}
-
-// NewFromFd creates a new NVMe device handle from a system handle.
-func NewFromFd(fd syscall.Conn) (*Device, error) {
-	d := &Device{fd: fd}
-	// There is no good way to validate that a file descriptor indeed points to
-	// a NVMe device. For future compatibility let this return an error so that
-	// code is already prepared to handle it.
-	return d, nil
-}
-
-// Open opens a new NVMe device handle from a device path (like /dev/nvme0).
-func Open(path string) (*Device, error) {
-	f, err := os.Open(path)
-	if err != nil {
-		return nil, fmt.Errorf("unable to open path: %w", err)
-	}
-	return NewFromFd(f)
-}
-
-// Close closes the NVMe device handle. It returns an error if the handle was
-// not created by Open. Please close the handle passed to NewFromFd yourself
-// in that case.
-func (d *Device) Close() error {
-	if f, ok := d.fd.(*os.File); ok {
-		return f.Close()
-	} else {
-		return errors.New("unable to close device not opened via Open, please close it yourself")
-	}
-}
-
-const (
-	// GlobalNamespace is the namespace ID for operations not on a specific
-	// namespace.
-	GlobalNamespace = 0xffffffff
-)
-
-// Command represents a generic NVMe command. Only use this if the command
-// you need is not already wrapped by this library.
-type Command struct {
-	Opcode                                   uint8
-	Flags                                    uint8
-	NamespaceID                              uint32
-	CDW2, CDW3                               uint32
-	Metadata                                 []byte
-	Data                                     []byte
-	CDW10, CDW11, CDW12, CDW13, CDW14, CDW15 uint32
-	Timeout                                  time.Duration
-}
-
-func (d *Device) GetLogPage(ns uint32, logPageIdentifier uint8, logSpecificField uint8, logPageOffset uint64, pageBuf []byte) error {
-	numberOfDwords := len(pageBuf) / 4
-	return d.RawCommand(&Command{
-		Opcode:      0x02,
-		NamespaceID: ns,
-		Data:        pageBuf,
-		CDW10:       uint32(logPageIdentifier) | uint32(logSpecificField&0xF)<<8 | uint32(numberOfDwords)<<16, // TODO: RAE
-		CDW11:       uint32(numberOfDwords >> 16 & 0xffff),
-		CDW12:       uint32(logPageOffset & 0xffffffff),
-		CDW13:       uint32(logPageOffset >> 32),
-	})
-}
diff --git a/metropolis/pkg/nvme/selftest.go b/metropolis/pkg/nvme/selftest.go
deleted file mode 100644
index 8f46995..0000000
--- a/metropolis/pkg/nvme/selftest.go
+++ /dev/null
@@ -1,96 +0,0 @@
-package nvme
-
-import (
-	"bytes"
-	"encoding/binary"
-)
-
-type SelfTestOp uint8
-
-const (
-	SelfTestNone     SelfTestOp = 0x0
-	SelfTestShort    SelfTestOp = 0x1
-	SelfTestExtended SelfTestOp = 0x2
-	SelfTestAbort    SelfTestOp = 0xF
-)
-
-func (d *Device) StartSelfTest(ns uint32, action SelfTestOp) error {
-	return d.RawCommand(&Command{
-		Opcode:      0x14,
-		NamespaceID: ns,
-		CDW10:       uint32(action & 0xF),
-	})
-}
-
-// Figure 99
-type selfTestResult struct {
-	SelfTestStatus             uint8
-	SegmentNumber              uint8
-	ValidDiagnosticInformation uint8
-	_                          byte
-	PowerOnHours               uint64
-	NamespaceID                uint32
-	FailingLBA                 uint64
-	StatusCodeType             uint8
-	StatusCode                 uint8
-	VendorSpecific             [2]byte
-}
-
-// Figure 98
-type selfTestLogPage struct {
-	CurrentSelfTestOp         uint8
-	CurrentSelfTestCompletion uint8
-	_                         [2]byte
-	SelfTestResults           [20]selfTestResult
-}
-
-type SelfTestResult struct {
-	// Op contains the self test type
-	Op            SelfTestOp
-	Result        uint8
-	SegmentNumber uint8
-	PowerOnHours  uint64
-	NamespaceID   uint32
-	FailingLBA    uint64
-	Error         Error
-}
-
-type SelfTestResults struct {
-	// CurrentOp contains the currently in-progress self test type (or
-	// SelfTestTypeNone if no self test is in progress).
-	CurrentOp SelfTestOp
-	// CurrentCompletion contains the progress from 0 to 1 of the currently
-	// in-progress self-test. Only valid if CurrentOp is not SelfTestTypeNone.
-	CurrentSelfTestCompletion float32
-	// PastResults contains a list of up to 20 previous self test results,
-	// sorted from the most recent to the oldest.
-	PastResults []SelfTestResult
-}
-
-func (d *Device) GetSelfTestResults(ns uint32) (*SelfTestResults, error) {
-	var buf [564]byte
-	if err := d.GetLogPage(ns, 0x06, 0, 0, buf[:]); err != nil {
-		return nil, err
-	}
-	var page selfTestLogPage
-	binary.Read(bytes.NewReader(buf[:]), binary.LittleEndian, &page)
-	var res SelfTestResults
-	res.CurrentOp = SelfTestOp(page.CurrentSelfTestOp & 0xF)
-	res.CurrentSelfTestCompletion = float32(page.CurrentSelfTestCompletion&0x7F) / 100.
-	for _, r := range page.SelfTestResults {
-		var t SelfTestResult
-		t.Op = SelfTestOp((r.SelfTestStatus >> 4) & 0xF)
-		t.Result = r.SelfTestStatus & 0xF
-		if t.Result == 0xF {
-			continue
-		}
-		t.SegmentNumber = r.SegmentNumber
-		t.PowerOnHours = r.PowerOnHours
-		t.NamespaceID = r.NamespaceID
-		t.FailingLBA = r.FailingLBA
-		t.Error.StatusCode = r.StatusCode
-		t.Error.StatusCodeType = r.StatusCodeType
-		res.PastResults = append(res.PastResults, t)
-	}
-	return &res, nil
-}
diff --git a/metropolis/pkg/nvme/struct_test.go b/metropolis/pkg/nvme/struct_test.go
deleted file mode 100644
index b26a48e..0000000
--- a/metropolis/pkg/nvme/struct_test.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package nvme
-
-import (
-	"encoding/binary"
-	"testing"
-)
-
-// TestStruct tests if the struct passed to Linux's ioctl has the ABI-specified
-// size.
-func TestStruct(t *testing.T) {
-	passthruCmdSize := binary.Size(passthruCmd{})
-	if passthruCmdSize != 72 {
-		t.Errorf("passthroughCmd is %d bytes, expected 72", passthruCmdSize)
-	}
-}
diff --git a/metropolis/pkg/nvme/uint128le.go b/metropolis/pkg/nvme/uint128le.go
deleted file mode 100644
index a25adb7..0000000
--- a/metropolis/pkg/nvme/uint128le.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package nvme
-
-import (
-	"math"
-	"math/big"
-)
-
-// uint128 little endian composed of two uint64s, readable by binary.Read.
-// Auxiliary type to simplify structures with uint128s (of which NVMe has
-// quite a few).
-type uint128le struct {
-	Lo, Hi uint64
-}
-
-// BigInt returns u as a bigint
-func (u uint128le) BigInt() *big.Int {
-	v := new(big.Int).SetUint64(u.Hi)
-	v = v.Lsh(v, 64)
-	v = v.Or(v, new(big.Int).SetUint64(u.Lo))
-	return v
-}
-
-// Uint64 returns u as a clamped uint64
-func (u uint128le) Uint64() uint64 {
-	if u.Hi > 0 {
-		return math.MaxUint64
-	}
-	return u.Lo
-}
diff --git a/metropolis/pkg/pki/BUILD.bazel b/metropolis/pkg/pki/BUILD.bazel
deleted file mode 100644
index d122ef4..0000000
--- a/metropolis/pkg/pki/BUILD.bazel
+++ /dev/null
@@ -1,34 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
-
-go_library(
-    name = "pki",
-    srcs = [
-        "ca.go",
-        "certificate.go",
-        "crl.go",
-        "x509.go",
-    ],
-    importpath = "source.monogon.dev/metropolis/pkg/pki",
-    visibility = ["//visibility:public"],
-    deps = [
-        "//metropolis/pkg/event",
-        "//metropolis/pkg/event/etcd",
-        "//metropolis/pkg/fileargs",
-        "@io_etcd_go_etcd_client_v3//:client",
-    ],
-)
-
-go_test(
-    name = "pki_test",
-    srcs = [
-        "certificate_test.go",
-        "crl_test.go",
-    ],
-    embed = [":pki"],
-    deps = [
-        "//metropolis/pkg/logtree",
-        "@io_etcd_go_etcd_client_pkg_v3//testutil",
-        "@io_etcd_go_etcd_tests_v3//integration",
-        "@org_uber_go_zap//:zap",
-    ],
-)
diff --git a/metropolis/pkg/pki/ca.go b/metropolis/pkg/pki/ca.go
deleted file mode 100644
index b8c2aed..0000000
--- a/metropolis/pkg/pki/ca.go
+++ /dev/null
@@ -1,133 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package pki
-
-import (
-	"context"
-	"crypto/ed25519"
-	"crypto/rand"
-	"crypto/x509"
-	"fmt"
-	"math/big"
-	"time"
-
-	clientv3 "go.etcd.io/etcd/client/v3"
-)
-
-// Issuer is an entity that can issue certificates. This interface is
-// implemented by SelfSigned, which is an issuer that emits self-signed
-// certificates, and any other Certificate that has been created with CA(),
-// which makes this Certificate act as a CA and issue (sign) ceritficates.
-type Issuer interface {
-	// CACertificate returns the DER-encoded x509 certificate of the CA that
-	// will sign certificates when Issue is called, or nil if this is
-	// self-signing issuer.
-	CACertificate(ctx context.Context, kv clientv3.KV) ([]byte, error)
-	// Issue will generate a certificate signed by the Issuer. The returned
-	// certificate is x509 DER-encoded.
-	Issue(ctx context.Context, req *Certificate, kv clientv3.KV) (cert []byte, err error)
-}
-
-// issueCertificate is a generic low level certificate-and-key issuance
-// function. If ca is null, the certificate will be self-signed. The returned
-// certificate is DER-encoded
-func issueCertificate(req *Certificate, ca *x509.Certificate, caKey ed25519.PrivateKey) (cert []byte, err error) {
-	serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 127)
-	serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
-	if err != nil {
-		err = fmt.Errorf("failed to generate serial number: %w", err)
-		return
-	}
-
-	skid, err := calculateSKID(req.PublicKey)
-	if err != nil {
-		return nil, err
-	}
-
-	req.Template.SerialNumber = serialNumber
-	req.Template.NotBefore = time.Now()
-	req.Template.NotAfter = UnknownNotAfter
-	req.Template.BasicConstraintsValid = true
-	req.Template.SubjectKeyId = skid
-
-	// Set the AuthorityKeyID to the SKID of the signing certificate (or self,
-	// if self-signing).
-	if ca != nil {
-		req.Template.AuthorityKeyId = ca.SubjectKeyId
-	} else {
-		req.Template.AuthorityKeyId = req.Template.SubjectKeyId
-		ca = &req.Template
-	}
-
-	return x509.CreateCertificate(rand.Reader, &req.Template, ca, req.PublicKey, caKey)
-}
-
-type selfSigned struct{}
-
-var (
-	// SelfSigned is an Issuer that generates self-signed certificates.
-	SelfSigned = &selfSigned{}
-)
-
-// Issue will generate a key and certificate that is self-signed.
-func (s *selfSigned) Issue(ctx context.Context, req *Certificate, kv clientv3.KV) (cert []byte, err error) {
-	if err := req.ensureKey(ctx, kv); err != nil {
-		return nil, err
-	}
-	if req.PrivateKey == nil {
-		return nil, fmt.Errorf("cannot issue self-signed certificate without a private key")
-	}
-	return issueCertificate(req, nil, req.PrivateKey)
-}
-
-// CACertificate returns nil for self-signed issuers.
-func (s *selfSigned) CACertificate(ctx context.Context, kv clientv3.KV) ([]byte, error) {
-	return nil, nil
-}
-
-// Issue will generate a key and certificate that is signed by this
-// Certificate, if the Certificate is a CA.
-func (c *Certificate) Issue(ctx context.Context, req *Certificate, kv clientv3.KV) (cert []byte, err error) {
-	if err := c.ensureKey(ctx, kv); err != nil {
-		return nil, fmt.Errorf("could not ensure CA %q key exists: %w", c.Name, err)
-	}
-	if err := req.ensureKey(ctx, kv); err != nil {
-		return nil, fmt.Errorf("could not subject %q key exists: %w", req.Name, err)
-	}
-	if c.PrivateKey == nil {
-		return nil, fmt.Errorf("cannot use certificate without private key as CA")
-	}
-
-	caCert, err := c.ensure(ctx, kv)
-	if err != nil {
-		return nil, fmt.Errorf("could not ensure CA %q certificate exists: %w", c.Name, err)
-	}
-
-	ca, err := x509.ParseCertificate(caCert)
-	if err != nil {
-		return nil, fmt.Errorf("could not parse CA certificate: %w", err)
-	}
-	// Ensure only one level of CAs exist, and that they are created explicitly.
-	req.Template.IsCA = false
-	return issueCertificate(req, ca, c.PrivateKey)
-}
-
-// CACertificate returns the DER encoded x509 form of this Certificate that
-// will be the used to issue child certificates.
-func (c *Certificate) CACertificate(ctx context.Context, kv clientv3.KV) ([]byte, error) {
-	return c.ensure(ctx, kv)
-}
diff --git a/metropolis/pkg/pki/certificate.go b/metropolis/pkg/pki/certificate.go
deleted file mode 100644
index a423680..0000000
--- a/metropolis/pkg/pki/certificate.go
+++ /dev/null
@@ -1,413 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// package pki implements an x509 PKI (Public Key Infrastructure) system backed
-// on etcd.
-package pki
-
-import (
-	"bytes"
-	"context"
-	"crypto/ed25519"
-	"crypto/rand"
-	"crypto/x509"
-	"crypto/x509/pkix"
-	"encoding/pem"
-	"fmt"
-	"net"
-
-	clientv3 "go.etcd.io/etcd/client/v3"
-
-	"source.monogon.dev/metropolis/pkg/fileargs"
-)
-
-// Namespace represents some path in etcd where certificate/CA data will be
-// stored. Creating a namespace via Namespaced then permits the consumer of
-// this library to start creating certificates within this namespace.
-type Namespace struct {
-	prefix string
-}
-
-// Namespaced creates a namespace for storing certificate data in etcd at a
-// given 'path' prefix.
-func Namespaced(prefix string) Namespace {
-	return Namespace{
-		prefix: prefix,
-	}
-}
-
-type CertificateMode int
-
-const (
-	// CertificateManaged is a certificate whose key material is fully managed by
-	// the Certificate code. When set, PublicKey and PrivateKey must not be set by
-	// the user, and instead will be populated by the Ensure call. Name must be set,
-	// and will be used to store this Certificate and its keys within etcd. After
-	// the initial generation during Ensure, other Certificates with the same Name
-	// will be retrieved (including key material) from etcd.
-	CertificateManaged CertificateMode = iota
-
-	// CertificateExternal is a certificate whose key material is not managed by
-	// Certificate or stored in etcd, but the X509 certificate itself is. PublicKey
-	// must be set while PrivateKey must not be set. Name must be set, and will be
-	// used to store the emitted X509 certificate in etcd on Ensure. After the
-	// initial generation during Ensure, other Certificates with the same Name will
-	// be retrieved (without key material) from etcd.
-	CertificateExternal
-
-	// CertificateEphemeral is a certificate whose data (X509 certificate and
-	// possibly key material) is generated on demand each time Ensure is called.
-	// Nothing is stored in etcd or loaded from etcd. PrivateKey or PublicKey can be
-	// set, if both are nil then a new keypair will be generated. Name is ignored.
-	CertificateEphemeral
-)
-
-// Certificate is the promise of a Certificate being available to the caller.
-// In this case, Certificate refers to a pair of x509 certificate and
-// corresponding private key.  Certificates can be stored in etcd, and their
-// issuers might also be store on etcd. As such, this type's methods contain
-// references to an etcd KV client.
-type Certificate struct {
-	Namespace *Namespace
-
-	// Issuer is the Issuer that will generate this certificate if one doesn't
-	// yet exist or etcd, or the requested certificate is ephemeral (not to be
-	// stored on etcd).
-	Issuer Issuer
-	// Name is a unique key for storing the certificate in etcd (if the requested
-	// certificate is not ephemeral).
-	Name string
-	// Template is an x509 certificate definition that will be used to generate
-	// the certificate when issuing it.
-	Template x509.Certificate
-
-	// Mode in which this Certificate will operate. This influences the behaviour of
-	// the Ensure call.
-	Mode CertificateMode
-
-	// PrivateKey is the private key for this Certificate. It should never be set by
-	// the user, and instead will be populated by the Ensure call for Managed
-	// Certificates.
-	PrivateKey ed25519.PrivateKey
-
-	// PublicKey is the public key for this Certificate. It should only be set by
-	// the user for External or Ephemeral certificates, and will be populated by the
-	// next Ensure call if missing.
-	PublicKey ed25519.PublicKey
-}
-
-func (n *Namespace) etcdPath(f string, args ...interface{}) string {
-	return n.prefix + fmt.Sprintf(f, args...)
-}
-
-// Client makes a Kubernetes PKI-compatible client certificate template.
-// Directly derived from Kubernetes PKI requirements documented at
-//   https://kubernetes.io/docs/setup/best-practices/certificates/#configure-certificates-manually
-func Client(identity string, groups []string) x509.Certificate {
-	return x509.Certificate{
-		Subject: pkix.Name{
-			CommonName:   identity,
-			Organization: groups,
-		},
-		KeyUsage:    x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment,
-		ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
-	}
-}
-
-// Server makes a Kubernetes PKI-compatible server certificate template.
-func Server(dnsNames []string, ips []net.IP) x509.Certificate {
-	return x509.Certificate{
-		Subject:     pkix.Name{},
-		KeyUsage:    x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment,
-		ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
-		DNSNames:    dnsNames,
-		IPAddresses: ips,
-	}
-}
-
-// CA makes a Certificate that can sign other certificates.
-func CA(cn string) x509.Certificate {
-	return x509.Certificate{
-		Subject: pkix.Name{
-			CommonName: cn,
-		},
-		IsCA:        true,
-		KeyUsage:    x509.KeyUsageCertSign | x509.KeyUsageCRLSign | x509.KeyUsageDigitalSignature,
-		ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageOCSPSigning},
-	}
-}
-
-// ensure returns a DER-encoded x509 certificate and internally encoded bare
-// ed25519 key for a given Certificate, in memory (if ephemeral), loading it
-// from etcd, or creating and saving it on etcd if needed.
-// This function is safe to call in parallel from multiple etcd clients
-// (including across machines), but it will error in case a concurrent
-// certificate generation happens. These errors are, however, safe to retry -
-// as long as all the certificate creators (ie., Metropolis nodes) run the same
-// version of this code.
-func (c *Certificate) ensure(ctx context.Context, kv clientv3.KV) (cert []byte, err error) {
-	// Ensure key is available.
-	if err := c.ensureKey(ctx, kv); err != nil {
-		return nil, err
-	}
-
-	switch c.Mode {
-	case CertificateEphemeral:
-		// TODO(q3k): cache internally?
-		cert, err = c.Issuer.Issue(ctx, c, kv)
-		if err != nil {
-			return nil, fmt.Errorf("failed to issue: %w", err)
-		}
-		return cert, nil
-	case CertificateManaged, CertificateExternal:
-	default:
-		return nil, fmt.Errorf("invalid certificate mode %v", c.Mode)
-	}
-
-	if c.Name == "" {
-		if c.Mode == CertificateExternal {
-			return nil, fmt.Errorf("external certificate must have name set")
-		} else {
-			return nil, fmt.Errorf("managed certificate must have name set")
-		}
-	}
-
-	certPath := c.Namespace.etcdPath("issued/%s-cert.der", c.Name)
-
-	// Try loading certificate from etcd.
-	certRes, err := kv.Get(ctx, certPath)
-	if err != nil {
-		return nil, fmt.Errorf("failed to get certificate from etcd: %w", err)
-	}
-
-	if len(certRes.Kvs) == 1 {
-		certBytes := certRes.Kvs[0].Value
-		cert, err := x509.ParseCertificate(certBytes)
-		if err != nil {
-			return nil, fmt.Errorf("failed to parse certificate retrieved from etcd: %w", err)
-		}
-		pk, ok := cert.PublicKey.(ed25519.PublicKey)
-		if !ok {
-			return nil, fmt.Errorf("unexpected non-ed25519 certificate found in etcd")
-		}
-		if !bytes.Equal(pk, c.PublicKey) {
-			return nil, fmt.Errorf("certificate stored in etcd emitted for different public key")
-		}
-		// TODO(q3k): ensure issuer and template haven't changed
-		return certBytes, nil
-	}
-
-	// No certificate found - issue one and save to etcd.
-	cert, err = c.Issuer.Issue(ctx, c, kv)
-	if err != nil {
-		return nil, fmt.Errorf("failed to issue: %w", err)
-	}
-
-	res, err := kv.Txn(ctx).
-		If(
-			clientv3.Compare(clientv3.CreateRevision(certPath), "=", 0),
-		).
-		Then(
-			clientv3.OpPut(certPath, string(cert)),
-		).Commit()
-	if err != nil {
-		err = fmt.Errorf("failed to write newly issued certificate: %w", err)
-	} else if !res.Succeeded {
-		err = fmt.Errorf("certificate issuance transaction failed: concurrent write")
-	}
-
-	return
-}
-
-// ensureKey retrieves or creates PublicKey as needed based on the Certificate
-// Mode. For Managed Certificates and Ephemeral Certificates with no PrivateKey
-// it will also populate PrivateKay.
-func (c *Certificate) ensureKey(ctx context.Context, kv clientv3.KV) error {
-	// If we have a public key then we're all set.
-	if c.PublicKey != nil {
-		return nil
-	}
-
-	// For ephemeral keys, we just generate them.
-	// For external keys, we can't do anything - not having the keys set means
-	// a programming error.
-
-	switch c.Mode {
-	case CertificateEphemeral:
-		pub, priv, err := ed25519.GenerateKey(rand.Reader)
-		if err != nil {
-			return fmt.Errorf("when generating ephemeral key: %w", err)
-		}
-		c.PublicKey = pub
-		c.PrivateKey = priv
-		return nil
-	case CertificateExternal:
-		if c.PrivateKey != nil {
-			// We prohibit having PrivateKey set in External Certificates to simplify the
-			// different logic paths this library implements. Being able to assume External
-			// == PublicKey only makes things easier elsewhere.
-			return fmt.Errorf("external certificate must not have PrivateKey set")
-		}
-		return fmt.Errorf("external certificate must have PublicKey set")
-	case CertificateManaged:
-	default:
-		return fmt.Errorf("invalid certificate mode %v", c.Mode)
-	}
-
-	// For managed keys, synchronize with etcd.
-	if c.Name == "" {
-		return fmt.Errorf("managed certificate must have Name set")
-	}
-
-	// First, try loading.
-	privPath := c.Namespace.etcdPath("keys/%s-privkey.bin", c.Name)
-	privRes, err := kv.Get(ctx, privPath)
-	if err != nil {
-		return fmt.Errorf("failed to get private key from etcd: %w", err)
-	}
-	if len(privRes.Kvs) == 1 {
-		privBytes := privRes.Kvs[0].Value
-		if len(privBytes) != ed25519.PrivateKeySize {
-			return fmt.Errorf("stored private key has invalid size")
-		}
-		c.PrivateKey = privBytes
-		c.PublicKey = c.PrivateKey.Public().(ed25519.PublicKey)
-		return nil
-	}
-
-	// No key in etcd? Generate and save.
-	pub, priv, err := ed25519.GenerateKey(rand.Reader)
-	if err != nil {
-		return fmt.Errorf("while generating keypair: %w", err)
-	}
-
-	res, err := kv.Txn(ctx).
-		If(
-			clientv3.Compare(clientv3.CreateRevision(privPath), "=", 0),
-		).
-		Then(
-			clientv3.OpPut(privPath, string(priv)),
-		).Commit()
-	if err != nil {
-		return fmt.Errorf("failed to write newly generated keypair: %w", err)
-	} else if !res.Succeeded {
-		return fmt.Errorf("key generation transaction failed: concurrent write")
-	}
-
-	crlPath := c.crlPath()
-	emptyCRL, err := c.makeCRL(ctx, kv, nil)
-	if err != nil {
-		return fmt.Errorf("failed to generate empty CRL: %w", err)
-	}
-
-	// Also attempt to emit an empty CRL if one doesn't exist yet.
-	_, err = kv.Txn(ctx).
-		If(
-			clientv3.Compare(clientv3.CreateRevision(crlPath), "=", 0),
-		).
-		Then(
-			clientv3.OpPut(crlPath, string(emptyCRL)),
-		).Commit()
-	if err != nil {
-		return fmt.Errorf("failed to upsert empty CRL")
-	}
-
-	c.PrivateKey = priv
-	c.PublicKey = pub
-	return nil
-}
-
-// Ensure returns an x509 DER-encoded (but not PEM-encoded) certificate for a
-// given Certificate.
-//
-// If the Certificate is ephemeral, each call to Ensure will cause a new
-// certificate to be generated. Otherwise, it will be retrieved from etcd, or
-// generated and stored there if needed.
-func (c *Certificate) Ensure(ctx context.Context, kv clientv3.KV) (cert []byte, err error) {
-	return c.ensure(ctx, kv)
-}
-
-func (c *Certificate) PrivateKeyX509() ([]byte, error) {
-	if c.PrivateKey == nil {
-		return nil, fmt.Errorf("certificate has no private key")
-	}
-	key, err := x509.MarshalPKCS8PrivateKey(c.PrivateKey)
-	if err != nil {
-		return nil, fmt.Errorf("could not marshal private key (data corruption?): %w", err)
-	}
-	return key, nil
-}
-
-// FilesystemCertificate is a fileargs.FileArgs wrapper which will contain PEM
-// encoded certificate material when Mounted. This construct is useful when
-// dealing with services that want to access etcd-backed certificates as files
-// available locally.
-// Paths to the available files are considered opaque and should not be leaked
-// outside of the struct. Further restrictions on access to these files might
-// be imposed in the future.
-type FilesystemCertificate struct {
-	*fileargs.FileArgs
-	// CACertPath is the full path at which the CA certificate is available.
-	// Read only.
-	CACertPath string
-	// CertPath is the full path at which the certificate is available. Read
-	// only.
-	CertPath string
-	// KeyPath is the full path at which the private key is available, or an empty
-	// string if the Certificate was created without a private key. Read only.
-	KeyPath string
-}
-
-// Mount returns a locally mounted FilesystemCertificate for this Certificate,
-// which allows services to access this Certificate via local filesystem
-// access.
-// The embeded fileargs.FileArgs can also be used to add additional file-backed
-// data under the same mount by calling ArgPath.
-// The returned FilesystemCertificate must be Closed in order to prevent a
-// system mount leak.
-func (c *Certificate) Mount(ctx context.Context, kv clientv3.KV) (*FilesystemCertificate, error) {
-	fa, err := fileargs.New()
-	if err != nil {
-		return nil, fmt.Errorf("when creating fileargs mount: %w", err)
-	}
-	fs := &FilesystemCertificate{FileArgs: fa}
-
-	cert, err := c.Ensure(ctx, kv)
-	if err != nil {
-		return nil, fmt.Errorf("when issuing certificate: %w", err)
-	}
-
-	cacert, err := c.Issuer.CACertificate(ctx, kv)
-	if err != nil {
-		return nil, fmt.Errorf("when getting issuer CA: %w", err)
-	}
-	// cacert will be null if this is a self-signed certificate.
-	if cacert == nil {
-		cacert = cert
-	}
-
-	fs.CACertPath = fs.ArgPath("ca.crt", pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cacert}))
-	fs.CertPath = fs.ArgPath("tls.crt", pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert}))
-	if c.PrivateKey != nil {
-		key, err := c.PrivateKeyX509()
-		if err != nil {
-			return nil, err
-		}
-		fs.KeyPath = fs.ArgPath("tls.key", pem.EncodeToMemory(&pem.Block{Type: "PRIVATE KEY", Bytes: key}))
-	}
-
-	return fs, nil
-}
diff --git a/metropolis/pkg/pki/certificate_test.go b/metropolis/pkg/pki/certificate_test.go
deleted file mode 100644
index 19baf94..0000000
--- a/metropolis/pkg/pki/certificate_test.go
+++ /dev/null
@@ -1,192 +0,0 @@
-package pki
-
-import (
-	"bytes"
-	"context"
-	"crypto/ed25519"
-	"crypto/rand"
-	"crypto/x509"
-	"testing"
-
-	"go.etcd.io/etcd/client/pkg/v3/testutil"
-	"go.etcd.io/etcd/tests/v3/integration"
-	"go.uber.org/zap"
-
-	"source.monogon.dev/metropolis/pkg/logtree"
-)
-
-// TestManaged ensures Managed Certificates work, including re-ensuring
-// certificates with the same data and issuing subordinate certificates.
-func TestManaged(t *testing.T) {
-	lt := logtree.New()
-	logtree.PipeAllToTest(t, lt)
-	tb, cancel := testutil.NewTestingTBProthesis("pki-managed")
-	defer cancel()
-	cluster := integration.NewClusterV3(tb, &integration.ClusterConfig{
-		Size: 1,
-		LoggerBuilder: func(memberName string) *zap.Logger {
-			dn := logtree.DN("etcd." + memberName)
-			return logtree.Zapify(lt.MustLeveledFor(dn), zap.WarnLevel)
-		},
-	})
-	cl := cluster.Client(0)
-	defer cluster.Terminate(tb)
-	ctx, ctxC := context.WithCancel(context.Background())
-	defer ctxC()
-	ns := Namespaced("/test-managed/")
-
-	// Test CA certificate issuance.
-	ca := &Certificate{
-		Namespace: &ns,
-		Issuer:    SelfSigned,
-		Name:      "ca",
-		Template:  CA("Test CA"),
-	}
-	caBytes, err := ca.Ensure(ctx, cl)
-	if err != nil {
-		t.Fatalf("Failed to Ensure CA: %v", err)
-	}
-	caCert, err := x509.ParseCertificate(caBytes)
-	if err != nil {
-		t.Fatalf("Failed to parse newly emited CA cert: %v", err)
-	}
-	if !caCert.IsCA {
-		t.Errorf("Newly emitted CA cert is not CA")
-	}
-	if ca.PublicKey == nil {
-		t.Errorf("Newly emitted CA cert has no public key")
-	}
-	if ca.PrivateKey == nil {
-		t.Errorf("Newly emitted CA cert has no public key")
-	}
-
-	// Re-emitting CA certificate with same parameters should return exact same
-	// data.
-	ca2 := &Certificate{
-		Namespace: &ns,
-		Issuer:    SelfSigned,
-		Name:      "ca",
-		Template:  CA("Test CA"),
-	}
-	caBytes2, err := ca2.Ensure(ctx, cl)
-	if err != nil {
-		t.Fatalf("Failed to re-Ensure CA: %v", err)
-	}
-	if !bytes.Equal(caBytes, caBytes2) {
-		t.Errorf("New CA has different x509 certificate")
-	}
-	if !bytes.Equal(ca.PublicKey, ca2.PublicKey) {
-		t.Errorf("New CA has different public key")
-	}
-	if !bytes.Equal(ca.PrivateKey, ca2.PrivateKey) {
-		t.Errorf("New CA has different private key")
-	}
-
-	// Emitting a subordinate certificate should work.
-	client := &Certificate{
-		Namespace: &ns,
-		Issuer:    ca2,
-		Name:      "client",
-		Template:  Client("foo", nil),
-	}
-	clientBytes, err := client.Ensure(ctx, cl)
-	if err != nil {
-		t.Fatalf("Failed to ensure client certificate: %v", err)
-	}
-	clientCert, err := x509.ParseCertificate(clientBytes)
-	if err != nil {
-		t.Fatalf("Failed to parse newly emitted client certificate: %v", err)
-	}
-	if clientCert.IsCA {
-		t.Errorf("New client cert is CA")
-	}
-	if want, got := "foo", clientCert.Subject.CommonName; want != got {
-		t.Errorf("New client CN should be %q, got %q", want, got)
-	}
-	if want, got := caCert.Subject.String(), clientCert.Issuer.String(); want != got {
-		t.Errorf("New client issuer should be %q, got %q", want, got)
-	}
-}
-
-// TestExternal ensures External certificates work correctly, including
-// re-Ensuring certificates with the same public key, and attempting to re-issue
-// the same certificate with a different public key (which should fail).
-func TestExternal(t *testing.T) {
-	lt := logtree.New()
-	logtree.PipeAllToTest(t, lt)
-	tb, cancel := testutil.NewTestingTBProthesis("pki-managed")
-	defer cancel()
-	cluster := integration.NewClusterV3(tb, &integration.ClusterConfig{
-		Size: 1,
-		LoggerBuilder: func(memberName string) *zap.Logger {
-			dn := logtree.DN("etcd." + memberName)
-			return logtree.Zapify(lt.MustLeveledFor(dn), zap.WarnLevel)
-		},
-	})
-	cl := cluster.Client(0)
-	defer cluster.Terminate(tb)
-	ctx, ctxC := context.WithCancel(context.Background())
-	defer ctxC()
-	ns := Namespaced("/test-external/")
-
-	ca := &Certificate{
-		Namespace: &ns,
-		Issuer:    SelfSigned,
-		Name:      "ca",
-		Template:  CA("Test CA"),
-	}
-
-	// Issuing an external certificate should work.
-	pk, _, err := ed25519.GenerateKey(rand.Reader)
-	if err != nil {
-		t.Fatalf("GenerateKey: %v", err)
-	}
-	server := &Certificate{
-		Namespace: &ns,
-		Issuer:    ca,
-		Name:      "server",
-		Template:  Server([]string{"server"}, nil),
-		Mode:      CertificateExternal,
-		PublicKey: pk,
-	}
-	serverBytes, err := server.Ensure(ctx, cl)
-	if err != nil {
-		t.Fatalf("Failed to Ensure server certificate: %v", err)
-	}
-
-	// Issuing an external certificate with the same name but different public key
-	// should fail.
-	pk2, _, err := ed25519.GenerateKey(rand.Reader)
-	if err != nil {
-		t.Fatalf("GenerateKey: %v", err)
-	}
-	server2 := &Certificate{
-		Namespace: &ns,
-		Issuer:    ca,
-		Name:      "server",
-		Template:  Server([]string{"server"}, nil),
-		Mode:      CertificateExternal,
-		PublicKey: pk2,
-	}
-	if _, err := server2.Ensure(ctx, cl); err == nil {
-		t.Fatalf("Issuing server certificate with different public key should have failed")
-	}
-
-	// Issuing the external certificate with the same name and same public key
-	// should work and yield the same x509 bytes.
-	server3 := &Certificate{
-		Namespace: &ns,
-		Issuer:    ca,
-		Name:      "server",
-		Template:  Server([]string{"server"}, nil),
-		Mode:      CertificateExternal,
-		PublicKey: pk,
-	}
-	serverBytes3, err := server3.Ensure(ctx, cl)
-	if err != nil {
-		t.Fatalf("Failed to re-Ensure server certificate: %v", err)
-	}
-	if !bytes.Equal(serverBytes, serverBytes3) {
-		t.Errorf("New server certificate has different x509 certificate")
-	}
-}
diff --git a/metropolis/pkg/pki/crl.go b/metropolis/pkg/pki/crl.go
deleted file mode 100644
index f72fb2d..0000000
--- a/metropolis/pkg/pki/crl.go
+++ /dev/null
@@ -1,164 +0,0 @@
-package pki
-
-import (
-	"context"
-	"crypto/rand"
-	"crypto/x509"
-	"crypto/x509/pkix"
-	"fmt"
-	"math/big"
-	"time"
-
-	clientv3 "go.etcd.io/etcd/client/v3"
-
-	"source.monogon.dev/metropolis/pkg/event"
-	"source.monogon.dev/metropolis/pkg/event/etcd"
-)
-
-// crlPath returns the etcd path under which the marshaled X.509 Certificate
-// Revocation List is stored.
-//
-// TODO(q3k): use etcd keyspace API from
-func (c *Certificate) crlPath() string {
-	return c.Namespace.etcdPath("%s-crl.der", c.Name)
-}
-
-// Revoke performs a CRL-based revocation of a given certificate by this CA,
-// looking it up by DNS name. The revocation is immediately written to the
-// backing etcd store and will be available to consumers through the WatchCRL
-// API.
-//
-// An error is returned if the CRL could not be emitted (eg. due to an etcd
-// communication error, a conflicting CRL write) or if the given hostname
-// matches no emitted certificate.
-//
-// Only Managed and External certificates can be revoked.
-func (c Certificate) Revoke(ctx context.Context, kv clientv3.KV, hostname string) error {
-	crlPath := c.crlPath()
-	issuedCerts := c.Namespace.etcdPath("issued/")
-
-	res, err := kv.Txn(ctx).Then(
-		clientv3.OpGet(crlPath),
-		clientv3.OpGet(issuedCerts, clientv3.WithPrefix())).Commit()
-	if err != nil {
-		return fmt.Errorf("failed to retrieve certificates and CRL from etcd: %w", err)
-	}
-
-	// Parse certs, CRL and CRL revision from state.
-	var certs []*x509.Certificate
-	var crlRevision int64
-	var crl *pkix.CertificateList
-	for _, el := range res.Responses {
-		for _, kv := range el.GetResponseRange().GetKvs() {
-			if string(kv.Key) == crlPath {
-				crl, err = x509.ParseCRL(kv.Value)
-				if err != nil {
-					return fmt.Errorf("could not parse CRL from etcd: %w", err)
-				}
-				crlRevision = kv.CreateRevision
-			} else {
-				cert, err := x509.ParseCertificate(kv.Value)
-				if err != nil {
-					return fmt.Errorf("could not parse certificate %q from etcd: %w", string(kv.Key), err)
-				}
-				certs = append(certs, cert)
-			}
-		}
-	}
-	if crl == nil {
-		return fmt.Errorf("could not find CRL in etcd")
-	}
-	revoked := crl.TBSCertList.RevokedCertificates
-
-	// Find requested hostname in issued certificates.
-	var serial *big.Int
-	for _, cert := range certs {
-		for _, dnsName := range cert.DNSNames {
-			if dnsName == hostname {
-				serial = cert.SerialNumber
-				break
-			}
-		}
-		if serial != nil {
-			break
-		}
-	}
-	if serial == nil {
-		return fmt.Errorf("could not find requested hostname")
-	}
-
-	// Check if certificate has already been revoked.
-	for _, revokedCert := range revoked {
-		if revokedCert.SerialNumber.Cmp(serial) == 0 {
-			return nil // Already revoked
-		}
-	}
-
-	// Otherwise, revoke and save new CRL.
-	revoked = append(revoked, pkix.RevokedCertificate{
-		SerialNumber:   serial,
-		RevocationTime: time.Now(),
-	})
-
-	crlRaw, err := c.makeCRL(ctx, kv, revoked)
-	if err != nil {
-		return fmt.Errorf("when generating new CRL for revocation: %w", err)
-	}
-
-	res, err = kv.Txn(ctx).If(
-		clientv3.Compare(clientv3.CreateRevision(crlPath), "=", crlRevision),
-	).Then(
-		clientv3.OpPut(crlPath, string(crlRaw)),
-	).Commit()
-	if err != nil {
-		return fmt.Errorf("when saving new CRL: %w", err)
-	}
-	if !res.Succeeded {
-		return fmt.Errorf("CRL save transaction failed, retry possible")
-	}
-
-	return nil
-}
-
-// makeCRL returns a valid CRL for a given list of certificates to be revoked.
-// The given etcd client is used to ensure this CA certificate exists in etcd,
-// but is not used to write any CRL to etcd.
-func (c *Certificate) makeCRL(ctx context.Context, kv clientv3.KV, revoked []pkix.RevokedCertificate) ([]byte, error) {
-	if c.Mode != CertificateManaged {
-		return nil, fmt.Errorf("only managed certificates can issue CRLs")
-	}
-	certBytes, err := c.ensure(ctx, kv)
-	if err != nil {
-		return nil, fmt.Errorf("when ensuring certificate: %w", err)
-	}
-	cert, err := x509.ParseCertificate(certBytes)
-	if err != nil {
-		return nil, fmt.Errorf("when parsing issuing certificate: %w", err)
-	}
-	crl, err := cert.CreateCRL(rand.Reader, c.PrivateKey, revoked, time.Now(), UnknownNotAfter)
-	if err != nil {
-		return nil, fmt.Errorf("failed to generate CRL: %w", err)
-	}
-	return crl, nil
-}
-
-// WatchCRL returns and Event Value compatible CRLWatcher which can be used to
-// retrieve and watch for the newest CRL available from this CA certificate.
-func (c *Certificate) WatchCRL(cl etcd.ThinClient) event.Watcher[*CRL] {
-	value := etcd.NewValue(cl, c.crlPath(), func(_, data []byte) (*CRL, error) {
-		crl, err := x509.ParseCRL(data)
-		if err != nil {
-			return nil, fmt.Errorf("could not parse CRL from etcd: %w", err)
-		}
-		return &CRL{
-			Raw:  data,
-			List: crl,
-		}, nil
-	})
-	return value.Watch()
-}
-
-type CRL struct {
-	Raw  []byte
-	List *pkix.CertificateList
-}
diff --git a/metropolis/pkg/pki/crl_test.go b/metropolis/pkg/pki/crl_test.go
deleted file mode 100644
index e47eab9..0000000
--- a/metropolis/pkg/pki/crl_test.go
+++ /dev/null
@@ -1,140 +0,0 @@
-package pki
-
-import (
-	"context"
-	"crypto/x509"
-	"testing"
-
-	"go.etcd.io/etcd/client/pkg/v3/testutil"
-	"go.etcd.io/etcd/tests/v3/integration"
-)
-
-// TestRevoke exercises the CRL revocation and watching functionality of a CA
-// certificate.
-func TestRevoke(t *testing.T) {
-	tb, cancel := testutil.NewTestingTBProthesis("pki-revoke")
-	defer cancel()
-	cluster := integration.NewClusterV3(tb, &integration.ClusterConfig{
-		Size: 1,
-	})
-	cl := cluster.Client(0)
-	defer cluster.Terminate(tb)
-	ctx, ctxC := context.WithCancel(context.Background())
-	defer ctxC()
-	ns := Namespaced("/test-managed/")
-
-	ca := &Certificate{
-		Namespace: &ns,
-		Issuer:    SelfSigned,
-		Name:      "ca",
-		Template:  CA("Test CA"),
-	}
-	sub := &Certificate{
-		Namespace: &ns,
-		Issuer:    ca,
-		Name:      "sub",
-		Template:  Server([]string{"server"}, nil),
-	}
-
-	caCertBytes, err := ca.Ensure(ctx, cl)
-	if err != nil {
-		t.Fatalf("Ensuring ca certificate failed: %v", err)
-	}
-	caCert, err := x509.ParseCertificate(caCertBytes)
-	if err != nil {
-		t.Fatalf("Loading newly emitted CA certificate failed: %v", err)
-	}
-
-	subCertBytes, err := sub.Ensure(ctx, cl)
-	if err != nil {
-		t.Fatalf("Ensuring sub certificate failed: %v", err)
-	}
-	subCert, err := x509.ParseCertificate(subCertBytes)
-	if err != nil {
-		t.Fatalf("Loading newly emitted sub certificate failed: %v", err)
-	}
-
-	// Ensure CRL is correctly signed and that subCert is not yet on it.
-	crlW := ca.WatchCRL(cl)
-	crl, err := crlW.Get(ctx)
-	if err != nil {
-		t.Fatalf("Retrieving initial CRL failed: %v", err)
-	}
-	if err := caCert.CheckCRLSignature(crl.List); err != nil {
-		t.Fatalf("Initial CRL not signed by CA: %v", err)
-	}
-	for _, el := range crl.List.TBSCertList.RevokedCertificates {
-		if el.SerialNumber.Cmp(subCert.SerialNumber) == 0 {
-			t.Fatalf("Newly emitted certificate is already on CRL.")
-		}
-	}
-
-	// Emit yet another certificate. Also shouldn't be on CRL.
-	bad := &Certificate{
-		Namespace: &ns,
-		Issuer:    ca,
-		Name:      "bad",
-		Template:  Server([]string{"badserver"}, nil),
-	}
-	badCertBytes, err := bad.Ensure(ctx, cl)
-	if err != nil {
-		t.Fatalf("Ensuring bad certificate failed: %v", err)
-	}
-	badCert, err := x509.ParseCertificate(badCertBytes)
-	if err != nil {
-		t.Fatalf("Loading newly emitted bad certificate failed: %v", err)
-	}
-	for _, el := range crl.List.TBSCertList.RevokedCertificates {
-		if el.SerialNumber.Cmp(badCert.SerialNumber) == 0 {
-			t.Fatalf("Newly emitted bad certificate is already on CRL.")
-		}
-	}
-
-	// Revoke bad certificate. Should now be present in CRL.
-	if err := ca.Revoke(ctx, cl, "badserver"); err != nil {
-		t.Fatalf("Revoke failed: %v", err)
-	}
-	// Get in a loop until found.
-	for {
-		crl, err = crlW.Get(ctx)
-		if err != nil {
-			t.Fatalf("Get failed: %v", err)
-		}
-		found := false
-		for _, el := range crl.List.TBSCertList.RevokedCertificates {
-			if el.SerialNumber.Cmp(badCert.SerialNumber) == 0 {
-				found = true
-			}
-			if el.SerialNumber.Cmp(subCert.SerialNumber) == 0 {
-				t.Errorf("Found non-revoked cert in CRL")
-			}
-		}
-		if found {
-			break
-		}
-	}
-	// Now revoke first certificate. Both should be now present in CRL.
-	if err := ca.Revoke(ctx, cl, "server"); err != nil {
-		t.Fatalf("Revoke failed: %v", err)
-	}
-	// Get in a loop until found.
-	for {
-		crl, err = crlW.Get(ctx)
-		if err != nil {
-			t.Fatalf("Get failed: %v", err)
-		}
-		foundSub := false
-		foundBad := false
-		for _, el := range crl.List.TBSCertList.RevokedCertificates {
-			if el.SerialNumber.Cmp(badCert.SerialNumber) == 0 {
-				foundBad = true
-			}
-			if el.SerialNumber.Cmp(subCert.SerialNumber) == 0 {
-				foundSub = true
-			}
-		}
-		if foundBad && foundSub {
-			break
-		}
-	}
-}
diff --git a/metropolis/pkg/pki/x509.go b/metropolis/pkg/pki/x509.go
deleted file mode 100644
index 40e7a08..0000000
--- a/metropolis/pkg/pki/x509.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package pki
-
-import (
-	"crypto"
-	"crypto/sha1"
-	"crypto/x509"
-	"crypto/x509/pkix"
-	"encoding/asn1"
-	"time"
-)
-
-var (
-	// From RFC 5280 Section 4.1.2.5
-	UnknownNotAfter = time.Unix(253402300799, 0)
-)
-
-// Workaround for https://github.com/golang/go/issues/26676 in Go's
-// crypto/x509. Specifically Go violates Section 4.2.1.2 of RFC 5280 without
-// this. Fixed for 1.15 in https://go-review.googlesource.com/c/go/+/227098/.
-//
-// Taken from https://github.com/FiloSottile/mkcert/blob/master/cert.go#L295
-// Written by one of Go's crypto engineers
-//
-// TODO(lorenz): remove this once we migrate to Go 1.15.
-func calculateSKID(pubKey crypto.PublicKey) ([]byte, error) {
-	spkiASN1, err := x509.MarshalPKIXPublicKey(pubKey)
-	if err != nil {
-		return nil, err
-	}
-
-	var spki struct {
-		Algorithm        pkix.AlgorithmIdentifier
-		SubjectPublicKey asn1.BitString
-	}
-	_, err = asn1.Unmarshal(spkiASN1, &spki)
-	if err != nil {
-		return nil, err
-	}
-	skid := sha1.Sum(spki.SubjectPublicKey.Bytes)
-	return skid[:], nil
-}
diff --git a/metropolis/pkg/pstore/BUILD.bazel b/metropolis/pkg/pstore/BUILD.bazel
deleted file mode 100644
index ac84095..0000000
--- a/metropolis/pkg/pstore/BUILD.bazel
+++ /dev/null
@@ -1,14 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
-
-go_library(
-    name = "pstore",
-    srcs = ["pstore.go"],
-    importpath = "source.monogon.dev/metropolis/pkg/pstore",
-    visibility = ["//visibility:public"],
-)
-
-go_test(
-    name = "pstore_test",
-    srcs = ["pstore_test.go"],
-    embed = [":pstore"],
-)
diff --git a/metropolis/pkg/pstore/pstore.go b/metropolis/pkg/pstore/pstore.go
deleted file mode 100644
index 610f565..0000000
--- a/metropolis/pkg/pstore/pstore.go
+++ /dev/null
@@ -1,194 +0,0 @@
-// The pstore package provides functions for interfacing with the Linux kernel's
-// pstore (persistent storage) system.
-// Documentation for pstore itself can be found at
-// https://docs.kernel.org/admin-guide/abi-testing.html#abi-sys-fs-pstore.
-package pstore
-
-import (
-	"bufio"
-	"errors"
-	"fmt"
-	"io/fs"
-	"os"
-	"path/filepath"
-	"regexp"
-	"sort"
-	"strconv"
-	"time"
-)
-
-// CanonicalMountPath contains the canonical mount path of the pstore filesystem
-const CanonicalMountPath = "/sys/fs/pstore"
-
-// pstoreDmesgHeader contains parsed header data from a pstore header.
-type pstoreDmesgHeader struct {
-	Reason  string
-	Counter uint64
-	Part    uint64
-}
-
-var headerRegexp = regexp.MustCompile("^([^#]+)#([0-9]+) Part([0-9]+)$")
-
-// parseDmesgHeader parses textual pstore entry headers as assembled by
-// @linux//fs/pstore/platform.c:pstore_dump back into a structured format.
-// The input must be the first line of a file with the terminating \n stripped.
-func parseDmesgHeader(hdr string) (*pstoreDmesgHeader, error) {
-	parts := headerRegexp.FindStringSubmatch(hdr)
-	if parts == nil {
-		return nil, errors.New("unable to parse pstore entry header")
-	}
-	counter, err := strconv.ParseUint(parts[2], 10, 64)
-	if err != nil {
-		return nil, fmt.Errorf("failed to parse pstore header count: %w", err)
-	}
-	part, err := strconv.ParseUint(parts[3], 10, 64)
-	if err != nil {
-		return nil, fmt.Errorf("failed to parse pstore header part: %w", err)
-	}
-	return &pstoreDmesgHeader{
-		Reason:  parts[1],
-		Counter: counter,
-		Part:    part,
-	}, nil
-}
-
-// KmsgDump reassembled a kernel message buffer dump from pstore.
-type KmsgDump struct {
-	// The reason why the dump was created. Common values include "Panic" and
-	// "Oops", but depending on the setting `printk.always_kmsg_dump` and
-	// potential future reasons this is likely unbounded.
-	Reason string
-	// The CLOCK_REALTIME value of the first entry in the dump (which is the
-	// closest to the actual time the dump happened). This can be zero or
-	// garbage if the RTC hasn't been initialized or the system has no working
-	// clock source.
-	OccurredAt time.Time
-	// A counter counting up for every dump created. Can be used to order dumps
-	// when the OccurredAt value is not usable due to system issues.
-	Counter uint64
-	// A list of kernel log lines in oldest-to-newest order, i.e. the oldest
-	// message comes first. The actual cause is generally reported last.
-	Lines []string
-}
-
-var dmesgFileRegexp = regexp.MustCompile("^dmesg-.*-([0-9]+)")
-
-var pmsgFileRegexp = regexp.MustCompile("^pmsg-.*-([0-9]+)")
-
-type pstoreDmesgFile struct {
-	hdr   pstoreDmesgHeader
-	ctime time.Time
-	lines []string
-}
-
-// GetKmsgDumps returns a list of events where the kernel has dumped its kmsg
-// (kernel log) buffer into pstore because of a kernel oops or panic.
-func GetKmsgDumps() ([]KmsgDump, error) {
-	return getKmsgDumpsFromFS(os.DirFS(CanonicalMountPath))
-}
-
-// GetPmsgDump returns lines written into /dev/pmsg0
-func GetPmsgDump() ([]string, error) {
-	var lines []string
-	pstoreEntries, err := os.ReadDir(CanonicalMountPath)
-	if err != nil {
-		return nil, fmt.Errorf("failed to list files in pstore: %w", err)
-	}
-	for _, entry := range pstoreEntries {
-		if !pmsgFileRegexp.MatchString(entry.Name()) {
-			continue
-		}
-		f, err := os.Open(filepath.Join(CanonicalMountPath, entry.Name()))
-		if err != nil {
-			return lines, fmt.Errorf("failed to open pstore entry file: %w", err)
-		}
-		// This only closes after all files have been read, but the number of
-		// files is heavily bound by very small amounts of pstore space.
-		defer f.Close()
-		s := bufio.NewScanner(f)
-		for s.Scan() {
-			lines = append(lines, s.Text())
-		}
-	}
-	return lines, nil
-}
-
-// f is injected here for testing
-func getKmsgDumpsFromFS(f fs.FS) ([]KmsgDump, error) {
-	var events []KmsgDump
-	eventMap := make(map[string][]pstoreDmesgFile)
-	pstoreEntries, err := fs.ReadDir(f, ".")
-	if err != nil {
-		return events, fmt.Errorf("failed to list files in pstore: %w", err)
-	}
-	for _, entry := range pstoreEntries {
-		if !dmesgFileRegexp.MatchString(entry.Name()) {
-			continue
-		}
-		f, err := f.Open(entry.Name())
-		if err != nil {
-			return events, fmt.Errorf("failed to open pstore entry file: %w", err)
-		}
-		// This only closes after all files have been read, but the number of
-		// files is heavily bound by very small amounts of pstore space.
-		defer f.Close()
-		finfo, err := f.Stat()
-		if err != nil {
-			return events, fmt.Errorf("failed to stat pstore entry file: %w", err)
-		}
-		s := bufio.NewScanner(f)
-		if !s.Scan() {
-			return events, fmt.Errorf("cannot read first line header of pstore entry %q: %w", entry.Name(), s.Err())
-		}
-		hdr, err := parseDmesgHeader(s.Text())
-		if err != nil {
-			return events, fmt.Errorf("failed to parse header of file %q: %w", entry.Name(), err)
-		}
-		var lines []string
-		for s.Scan() {
-			lines = append(lines, s.Text())
-		}
-		// Same textual encoding is used in the header itself, so this
-		// is as unique as it gets.
-		key := fmt.Sprintf("%v#%d", hdr.Reason, hdr.Counter)
-		eventMap[key] = append(eventMap[key], pstoreDmesgFile{hdr: *hdr, ctime: finfo.ModTime(), lines: lines})
-	}
-
-	for _, event := range eventMap {
-		sort.Slice(event, func(i, j int) bool {
-			return event[i].hdr.Part > event[j].hdr.Part
-		})
-		ev := KmsgDump{
-			Counter: event[len(event)-1].hdr.Counter,
-			Reason:  event[len(event)-1].hdr.Reason,
-			// Entries get created in reverse order, so the most accurate
-			// timestamp is the first one.
-			OccurredAt: event[len(event)-1].ctime,
-		}
-		for _, entry := range event {
-			ev.Lines = append(ev.Lines, entry.lines...)
-		}
-		events = append(events, ev)
-	}
-	sort.Slice(events, func(i, j int) bool {
-		return !events[i].OccurredAt.Before(events[j].OccurredAt)
-	})
-	return events, nil
-}
-
-// ClearAll clears out all existing entries from the pstore. This should be done
-// after every start (after the relevant data has been read out) to ensure that
-// there is always space to store new pstore entries and to minimize the risk
-// of breaking badly-programmed firmware.
-func ClearAll() error {
-	pstoreEntries, err := os.ReadDir(CanonicalMountPath)
-	if err != nil {
-		return fmt.Errorf("failed to list files in pstore: %w", err)
-	}
-	for _, entry := range pstoreEntries {
-		if err := os.Remove(filepath.Join(CanonicalMountPath, entry.Name())); err != nil {
-			return fmt.Errorf("failed to clear pstore entry: %w", err)
-		}
-	}
-	return nil
-}
diff --git a/metropolis/pkg/pstore/pstore_test.go b/metropolis/pkg/pstore/pstore_test.go
deleted file mode 100644
index 0190f04..0000000
--- a/metropolis/pkg/pstore/pstore_test.go
+++ /dev/null
@@ -1,144 +0,0 @@
-package pstore
-
-import (
-	"fmt"
-	"testing"
-	"testing/fstest"
-	"time"
-)
-
-func TestParseHeader(t *testing.T) {
-	var cases = []struct {
-		input       string
-		expectedOut *pstoreDmesgHeader
-	}{
-		{"Panic#2 Part30", &pstoreDmesgHeader{"Panic", 2, 30}},
-		{"Oops#1 Part5", &pstoreDmesgHeader{"Oops", 1, 5}},
-		// Random kernel output that is similar, but definitely not a dump header
-		{"<4>[2501503.489317] Oops: 0010 [#1] SMP NOPTI", nil},
-	}
-	for i, c := range cases {
-		t.Run(fmt.Sprintf("Test#%d", i+1), func(t *testing.T) {
-			out, err := parseDmesgHeader(c.input)
-			switch {
-			case err != nil && c.expectedOut != nil:
-				t.Errorf("Failed parsing %q: %v", c.input, err)
-			case err == nil && c.expectedOut == nil:
-				t.Errorf("Successfully parsed %q, expected error", c.input)
-			case err != nil && c.expectedOut == nil:
-			case err == nil && c.expectedOut != nil:
-				if out.Part != c.expectedOut.Part {
-					t.Errorf("Expected part to be %d, got %d", c.expectedOut.Part, out.Part)
-				}
-				if out.Counter != c.expectedOut.Counter {
-					t.Errorf("Expected counter to be %d, got %d", c.expectedOut.Counter, out.Counter)
-				}
-				if out.Reason != c.expectedOut.Reason {
-					t.Errorf("Expected reason to be %q, got %q", c.expectedOut.Reason, out.Reason)
-				}
-			}
-		})
-	}
-}
-
-func TestGetKmsgDumps(t *testing.T) {
-	testTime1 := time.Date(2022, 06, 13, 1, 2, 3, 4, time.UTC)
-	testTime2 := time.Date(2020, 06, 13, 1, 2, 3, 4, time.UTC)
-	testTime3 := time.Date(2010, 06, 13, 1, 2, 3, 4, time.UTC)
-	cases := []struct {
-		name          string
-		inputFS       fstest.MapFS
-		expectErr     bool
-		expectedDumps []KmsgDump
-	}{
-		{"EmptyPstore", map[string]*fstest.MapFile{}, false, []KmsgDump{}},
-		{"SingleDumpSingleFile", map[string]*fstest.MapFile{
-			"dmesg-efi-165467917816002": {ModTime: testTime1, Data: []byte("Panic#2 Part1\ntest1\ntest2")},
-			"yolo-efi-165467917816002":  {ModTime: testTime1, Data: []byte("something totally unrelated")},
-		}, false, []KmsgDump{{
-			Reason:     "Panic",
-			OccurredAt: testTime1,
-			Counter:    2,
-			Lines: []string{
-				"test1",
-				"test2",
-			},
-		}}},
-		{"SingleDumpMultipleFiles", map[string]*fstest.MapFile{
-			"dmesg-efi-165467917816002": {ModTime: testTime1, Data: []byte("Panic#2 Part1\ntest2\ntest3")},
-			"dmesg-efi-165467917817002": {ModTime: testTime2, Data: []byte("Panic#2 Part2\ntest1")},
-		}, false, []KmsgDump{{
-			Reason:     "Panic",
-			OccurredAt: testTime1,
-			Counter:    2,
-			Lines: []string{
-				"test1",
-				"test2",
-				"test3",
-			},
-		}}},
-		{"MultipleDumpsMultipleFiles", map[string]*fstest.MapFile{
-			"dmesg-efi-165467917816002": {ModTime: testTime1, Data: []byte("Panic#2 Part1\ntest2\ntest3")},
-			"dmesg-efi-165467917817002": {ModTime: testTime2, Data: []byte("Panic#2 Part2\ntest1")},
-			"dmesg-efi-265467917816002": {ModTime: testTime3, Data: []byte("Oops#1 Part1\noops3")},
-			"dmesg-efi-265467917817002": {ModTime: testTime2, Data: []byte("Oops#1 Part2\noops1\noops2")},
-		}, false, []KmsgDump{{
-			Reason:     "Panic",
-			OccurredAt: testTime1,
-			Counter:    2,
-			Lines: []string{
-				"test1",
-				"test2",
-				"test3",
-			},
-		}, {
-			Reason:     "Oops",
-			OccurredAt: testTime3,
-			Counter:    1,
-			Lines: []string{
-				"oops1",
-				"oops2",
-				"oops3",
-			},
-		}}},
-	}
-	for _, c := range cases {
-		t.Run(c.name, func(t *testing.T) {
-			dumps, err := getKmsgDumpsFromFS(c.inputFS)
-			switch {
-			case err == nil && c.expectErr:
-				t.Error("Expected error, but got none")
-				return
-			case err != nil && !c.expectErr:
-				t.Errorf("Got unexpected error: %v", err)
-				return
-			case err != nil && c.expectErr:
-				// Got expected error
-				return
-			case err == nil && !c.expectErr:
-				if len(dumps) != len(c.expectedDumps) {
-					t.Fatalf("Expected %d dumps, got %d", len(c.expectedDumps), len(dumps))
-				}
-				for i, dump := range dumps {
-					if dump.OccurredAt != c.expectedDumps[i].OccurredAt {
-						t.Errorf("Dump %d expected to have occurred at %v, got %v", i, c.expectedDumps[i].OccurredAt, dump.OccurredAt)
-					}
-					if dump.Reason != c.expectedDumps[i].Reason {
-						t.Errorf("Expected reason in dump %d to be %v, got %v", i, c.expectedDumps[i].Reason, dump.Reason)
-					}
-					if dump.Counter != c.expectedDumps[i].Counter {
-						t.Errorf("Expected counter in dump %d to be %d, got %d", i, c.expectedDumps[i].Counter, dump.Counter)
-					}
-					if len(dump.Lines) != len(c.expectedDumps[i].Lines) {
-						t.Errorf("Expected number of lines in dump %d to be %d, got %d", i, len(c.expectedDumps[i].Lines), len(dump.Lines))
-					}
-					for j := range dump.Lines {
-						if dump.Lines[j] != c.expectedDumps[i].Lines[j] {
-							t.Errorf("Expected line %d in dump %d to be %q, got %q", i, j, c.expectedDumps[i].Lines[j], dump.Lines[j])
-						}
-					}
-				}
-			}
-		})
-	}
-}
diff --git a/metropolis/pkg/scsi/BUILD.bazel b/metropolis/pkg/scsi/BUILD.bazel
deleted file mode 100644
index 9c00b26..0000000
--- a/metropolis/pkg/scsi/BUILD.bazel
+++ /dev/null
@@ -1,27 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library")
-
-go_library(
-    name = "scsi",
-    srcs = [
-        "dev_block.go",
-        "health.go",
-        "inquiry.go",
-        "log.go",
-        "scsi.go",
-        "scsi_linux.go",
-        "scsi_linux_defs.go",
-        "sensekeydata.go",
-    ],
-    cgo = True,
-    importpath = "source.monogon.dev/metropolis/pkg/scsi",
-    visibility = ["//visibility:public"],
-    deps = select({
-        "@io_bazel_rules_go//go/platform:android": [
-            "@org_golang_x_sys//unix",
-        ],
-        "@io_bazel_rules_go//go/platform:linux": [
-            "@org_golang_x_sys//unix",
-        ],
-        "//conditions:default": [],
-    }),
-)
diff --git a/metropolis/pkg/scsi/dev_block.go b/metropolis/pkg/scsi/dev_block.go
deleted file mode 100644
index fba0512..0000000
--- a/metropolis/pkg/scsi/dev_block.go
+++ /dev/null
@@ -1,158 +0,0 @@
-package scsi
-
-// Written against SBC-4
-// Contains SCSI block device specific commands.
-
-import (
-	"bytes"
-	"encoding/binary"
-	"errors"
-	"fmt"
-	"math"
-)
-
-// ReadDefectDataLBA reads the primary (manufacturer) and/or grown defect list
-// in LBA format. This is commonly used on SSDs and generally returns an error
-// on spinning drives.
-func (d *Device) ReadDefectDataLBA(plist, glist bool) ([]uint64, error) {
-	data := make([]byte, 4096)
-	var req [8]byte
-	if plist {
-		req[1] |= 1 << 4
-	}
-	if glist {
-		req[1] |= 1 << 3
-	}
-	defectListFormat := 0b011
-	req[1] |= byte(defectListFormat)
-	binary.BigEndian.PutUint16(req[6:8], uint16(len(data)))
-	if err := d.RawCommand(&CommandDataBuffer{
-		OperationCode:         ReadDefectDataOp,
-		Request:               req[:],
-		Data:                  data,
-		DataTransferDirection: DataTransferFromDevice,
-	}); err != nil {
-		var fixedErr *FixedError
-		if errors.As(err, &fixedErr) && fixedErr.SenseKey == RecoveredError && fixedErr.AdditionalSenseCode == DefectListNotFound {
-			return nil, fmt.Errorf("error during LOG SENSE: unsupported defect list format, device returned %03bb", data[1]&0b111)
-		}
-		return nil, fmt.Errorf("error during LOG SENSE: %w", err)
-	}
-	if data[1]&0b111 != byte(defectListFormat) {
-		return nil, fmt.Errorf("device returned wrong defect list format, requested %03bb, got %03bb", defectListFormat, data[1]&0b111)
-	}
-	defectListLength := binary.BigEndian.Uint16(data[2:4])
-	if defectListLength%8 != 0 {
-		return nil, errors.New("returned defect list not divisible by array item size")
-	}
-	if len(data) < int(defectListLength)+4 {
-		return nil, errors.New("returned defect list longer than buffer")
-	}
-	res := make([]uint64, defectListLength/8)
-	if err := binary.Read(bytes.NewReader(data[4:]), binary.BigEndian, &res); err != nil {
-		panic(err)
-	}
-	return res, nil
-}
-
-const (
-	// AllSectors is a magic sector number indicating that it applies to all
-	// sectors on the track.
-	AllSectors = math.MaxUint16
-)
-
-// PhysicalSectorFormatAddress represents a physical sector (or the the whole
-// track if SectorNumber == AllSectors) on a spinning hard drive.
-type PhysicalSectorFormatAddress struct {
-	CylinderNumber              uint32
-	HeadNumber                  uint8
-	SectorNumber                uint32
-	MultiAddressDescriptorStart bool
-}
-
-func parseExtendedPhysicalSectorFormatAddress(buf []byte) (p PhysicalSectorFormatAddress) {
-	p.CylinderNumber = uint32(buf[0])<<16 | uint32(buf[1])<<8 | uint32(buf[2])
-	p.HeadNumber = buf[3]
-	p.MultiAddressDescriptorStart = buf[4]&(1<<7) != 0
-	p.SectorNumber = uint32(buf[4]&0b1111)<<24 | uint32(buf[5])<<16 | uint32(buf[6])<<8 | uint32(buf[7])
-	return
-}
-
-func parsePhysicalSectorFormatAddress(buf []byte) (p PhysicalSectorFormatAddress) {
-	p.CylinderNumber = uint32(buf[0])<<16 | uint32(buf[1])<<8 | uint32(buf[2])
-	p.HeadNumber = buf[3]
-	p.SectorNumber = binary.BigEndian.Uint32(buf[4:8])
-	return
-}
-
-// ReadDefectDataPhysical reads the primary (manufacturer) and/or grown defect
-// list in physical format.
-// This is only defined for spinning drives, returning an error on SSDs.
-func (d *Device) ReadDefectDataPhysical(plist, glist bool) ([]PhysicalSectorFormatAddress, error) {
-	data := make([]byte, 4096)
-	var req [8]byte
-	if plist {
-		req[1] |= 1 << 4
-	}
-	if glist {
-		req[1] |= 1 << 3
-	}
-	defectListFormat := 0b101
-	req[1] |= byte(defectListFormat)
-	binary.BigEndian.PutUint16(req[6:8], uint16(len(data)))
-	if err := d.RawCommand(&CommandDataBuffer{
-		OperationCode:         ReadDefectDataOp,
-		Request:               req[:],
-		Data:                  data,
-		DataTransferDirection: DataTransferFromDevice,
-	}); err != nil {
-		var fixedErr *FixedError
-		if errors.As(err, &fixedErr) && fixedErr.SenseKey == RecoveredError && fixedErr.AdditionalSenseCode == DefectListNotFound {
-			return nil, fmt.Errorf("error during LOG SENSE: unsupported defect list format, device returned %03bb", data[1]&0b111)
-		}
-		return nil, fmt.Errorf("error during LOG SENSE: %w", err)
-	}
-	if data[1]&0b111 != byte(defectListFormat) {
-		return nil, fmt.Errorf("device returned wrong defect list format, requested %03bb, got %03bb", defectListFormat, data[1]&0b111)
-	}
-	defectListLength := binary.BigEndian.Uint16(data[2:4])
-	if defectListLength%8 != 0 {
-		return nil, errors.New("returned defect list not divisible by array item size")
-	}
-	if len(data) < int(defectListLength)+4 {
-		return nil, errors.New("returned defect list longer than buffer")
-	}
-	res := make([]PhysicalSectorFormatAddress, defectListLength/8)
-	data = data[4:]
-	for i := 0; i < int(defectListLength)/8; i++ {
-		res[i] = parsePhysicalSectorFormatAddress(data[i*8 : (i+1)*8])
-	}
-	return res, nil
-}
-
-type SolidStateMediaHealth struct {
-	// PercentageUsedEnduranceIndicator is a value which represents a
-	// vendor-specific wear estimate of the solid state medium.
-	// A new device starts at 0, at 100 the device is considered end-of-life.
-	// Values up to 255 are possible.
-	PercentageUsedEnduranceIndicator uint8
-}
-
-// SolidStateMediaHealth reports parameters about the health of the solid-state
-// media of a SCSI block device.
-func (d *Device) SolidStateMediaHealth() (*SolidStateMediaHealth, error) {
-	raw, err := d.LogSenseParameters(LogSenseRequest{PageCode: 0x11})
-	if err != nil {
-		return nil, err
-	}
-	if len(raw[0x1]) == 0 {
-		return nil, errors.New("mandatory parameter 0001h missing")
-	}
-	param1 := raw[0x01][0]
-	if len(param1.Data) < 4 {
-		return nil, errors.New("parameter 0001h too short")
-	}
-	return &SolidStateMediaHealth{
-		PercentageUsedEnduranceIndicator: param1.Data[3],
-	}, nil
-}
diff --git a/metropolis/pkg/scsi/health.go b/metropolis/pkg/scsi/health.go
deleted file mode 100644
index 724fd75..0000000
--- a/metropolis/pkg/scsi/health.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package scsi
-
-import "errors"
-
-type InformationalExceptions struct {
-	InformationalSenseCode AdditionalSenseCode
-	Temperature            uint8
-}
-
-func (d *Device) GetInformationalExceptions() (*InformationalExceptions, error) {
-	raw, err := d.LogSenseParameters(LogSenseRequest{PageCode: 0x0b})
-	if err != nil {
-		return nil, err
-	}
-	if len(raw[0x1]) == 0 {
-		return nil, errors.New("mandatory parameter 0001h missing")
-	}
-	param1 := raw[0x01][0]
-	if len(param1.Data) < 3 {
-		return nil, errors.New("parameter 0001h too short")
-	}
-	return &InformationalExceptions{
-		InformationalSenseCode: AdditionalSenseCode(uint16(param1.Data[0])<<8 | uint16(param1.Data[1])),
-		Temperature:            param1.Data[2],
-	}, nil
-}
diff --git a/metropolis/pkg/scsi/inquiry.go b/metropolis/pkg/scsi/inquiry.go
deleted file mode 100644
index 819b011..0000000
--- a/metropolis/pkg/scsi/inquiry.go
+++ /dev/null
@@ -1,300 +0,0 @@
-package scsi
-
-import (
-	"bytes"
-	"encoding/binary"
-	"errors"
-	"fmt"
-	"io"
-	"math"
-)
-
-// Inquiry queries the device for various metadata about its identity and
-// supported features.
-func (d Device) Inquiry() (*InquiryData, error) {
-	data := make([]byte, 96)
-	var req [4]byte
-	binary.BigEndian.PutUint16(req[2:4], uint16(len(data)))
-	if err := d.RawCommand(&CommandDataBuffer{
-		OperationCode:         InquiryOp,
-		Request:               req[:],
-		Data:                  data,
-		DataTransferDirection: DataTransferFromDevice,
-	}); err != nil {
-		return nil, fmt.Errorf("error during INQUIRY: %w", err)
-	}
-	resLen := int64(data[4]) + 5
-	// Use LimitReader to not have to deal with out-of-bounds slices
-	rawReader := io.LimitReader(bytes.NewReader(data), resLen)
-	var raw inquiryDataRaw
-	if err := binary.Read(rawReader, binary.BigEndian, &raw); err != nil {
-		if errors.Is(err, io.ErrUnexpectedEOF) {
-			return nil, fmt.Errorf("response to INQUIRY is smaller than %d bytes, very old or broken device", binary.Size(raw))
-		}
-		panic(err) // Read from memory, shouldn't be possible to hit
-	}
-
-	var res InquiryData
-	res.PeriperalQualifier = (raw.PeripheralData >> 5) & 0b111
-	res.PeripheralDeviceType = DeviceType(raw.PeripheralData & 0b11111)
-	res.RemovableMedium = (raw.Flags1 & 1 << 0) != 0
-	res.LogicalUnitConglomerate = (raw.Flags1 & 1 << 1) != 0
-	res.CommandSetVersion = Version(raw.Version)
-	res.NormalACASupported = (raw.Flags2 & 1 << 5) != 0
-	res.HistoricalSupport = (raw.Flags2 & 1 << 4) != 0
-	res.ResponseDataFormat = raw.Flags2 & 0b1111
-	res.SCCSupported = (raw.Flags3 & 1 << 7) != 0
-	res.TargetPortGroupSupport = (raw.Flags3 >> 4) & 0b11
-	res.ThirdPartyCopySupport = (raw.Flags3 & 1 << 3) != 0
-	res.HasProtectionInfo = (raw.Flags3 & 1 << 0) != 0
-	res.HasEnclosureServices = (raw.Flags4 & 1 << 6) != 0
-	res.VendorFeature1 = (raw.Flags4 & 1 << 5) != 0
-	res.HasMultipleSCSIPorts = (raw.Flags4 & 1 << 4) != 0
-	res.CmdQueue = (raw.Flags5 & 1 << 1) != 0
-	res.VendorFeature2 = (raw.Flags5 & 1 << 0) != 0
-	res.Vendor = string(bytes.TrimRight(raw.Vendor[:], " "))
-	res.Product = string(bytes.TrimRight(raw.Product[:], " "))
-	res.ProductRevisionLevel = string(bytes.TrimRight(raw.ProductRevisionLevel[:], " "))
-
-	// Read rest conditionally, as it might not be present on every device
-	var vendorSpecific bytes.Buffer
-	_, err := io.CopyN(&vendorSpecific, rawReader, 20)
-	res.VendorSpecific = vendorSpecific.Bytes()
-	if err == io.EOF {
-		return &res, nil
-	}
-	if err != nil {
-		panic(err) // Mem2Mem copy, can't really happen
-	}
-	var padding [2]byte
-	if _, err := io.ReadFull(rawReader, padding[:]); err != nil {
-		if errors.Is(err, io.ErrUnexpectedEOF) {
-			return &res, nil
-		}
-	}
-	for i := 0; i < 8; i++ {
-		var versionDesc uint16
-		if err := binary.Read(rawReader, binary.BigEndian, &versionDesc); err != nil {
-			if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) {
-				return &res, nil
-			}
-		}
-		res.VersionDescriptors = append(res.VersionDescriptors, versionDesc)
-	}
-
-	return &res, nil
-}
-
-// Table 148, only first 36 mandatory bytes
-type inquiryDataRaw struct {
-	PeripheralData       uint8
-	Flags1               uint8
-	Version              uint8
-	Flags2               uint8
-	AdditionalLength     uint8 // n-4
-	Flags3               uint8
-	Flags4               uint8
-	Flags5               uint8
-	Vendor               [8]byte
-	Product              [16]byte
-	ProductRevisionLevel [4]byte
-}
-
-// DeviceType represents a SCSI peripheral device type, which
-// can be used to determine the command set to use to control
-// the device. See Table 150 in the standard.
-type DeviceType uint8
-
-const (
-	TypeBlockDevice              DeviceType = 0x00
-	TypeSequentialAccessDevice   DeviceType = 0x01
-	TypeProcessor                DeviceType = 0x03
-	TypeOpticalDrive             DeviceType = 0x05
-	TypeOpticalMemory            DeviceType = 0x07
-	TypeMediaChanger             DeviceType = 0x08
-	TypeArrayController          DeviceType = 0x0c
-	TypeEncloseServices          DeviceType = 0x0d
-	TypeOpticalCardRWDevice      DeviceType = 0x0f
-	TypeObjectStorageDevice      DeviceType = 0x11
-	TypeAutomationDriveInterface DeviceType = 0x12
-	TypeZonedBlockDevice         DeviceType = 0x14
-	TypeUnknownDevice            DeviceType = 0x1f
-)
-
-var deviceTypeDesc = map[DeviceType]string{
-	TypeBlockDevice:              "Block Device",
-	TypeSequentialAccessDevice:   "Sequential Access Device",
-	TypeProcessor:                "Processor",
-	TypeOpticalDrive:             "Optical Drive",
-	TypeOpticalMemory:            "Optical Memory",
-	TypeMediaChanger:             "Media Changer",
-	TypeArrayController:          "Array Controller",
-	TypeEncloseServices:          "Enclosure Services",
-	TypeOpticalCardRWDevice:      "Optical Card reader/writer device",
-	TypeObjectStorageDevice:      "Object-based Storage Device",
-	TypeAutomationDriveInterface: "Automation/Drive Interface",
-	TypeZonedBlockDevice:         "Zoned Block Device",
-	TypeUnknownDevice:            "Unknown or no device",
-}
-
-func (d DeviceType) String() string {
-	if str, ok := deviceTypeDesc[d]; ok {
-		return str
-	}
-	return fmt.Sprintf("unknown device type %xh", uint8(d))
-}
-
-// Version represents a specific standardized version of the SCSI
-// primary command set (SPC). The enum values are sorted, so
-// for example version >= SPC3 is true for SPC-3 and all later
-// standards. See table 151 in the standard.
-type Version uint8
-
-const (
-	SPC1 = 0x03
-	SPC2 = 0x04
-	SPC3 = 0x05
-	SPC4 = 0x06
-	SPC5 = 0x07
-)
-
-var versionDesc = map[Version]string{
-	SPC1: "SPC-1 (INCITS 301-1997)",
-	SPC2: "SPC-2 (INCITS 351-2001)",
-	SPC3: "SPC-3 (INCITS 408-2005)",
-	SPC4: "SPC-4 (INCITS 513-2015)",
-	SPC5: "SPC-5 (INCITS 502-2019)",
-}
-
-func (v Version) String() string {
-	if str, ok := versionDesc[v]; ok {
-		return str
-	}
-	return fmt.Sprintf("unknown version %xh", uint8(v))
-}
-
-// InquiryData contains data returned by the INQUIRY command.
-type InquiryData struct {
-	PeriperalQualifier      uint8
-	PeripheralDeviceType    DeviceType
-	RemovableMedium         bool
-	LogicalUnitConglomerate bool
-	CommandSetVersion       Version
-	NormalACASupported      bool
-	HistoricalSupport       bool
-	ResponseDataFormat      uint8
-	SCCSupported            bool
-	TargetPortGroupSupport  uint8
-	ThirdPartyCopySupport   bool
-	HasProtectionInfo       bool
-	HasEnclosureServices    bool
-	VendorFeature1          bool
-	HasMultipleSCSIPorts    bool
-	CmdQueue                bool
-	VendorFeature2          bool
-	Vendor                  string
-	Product                 string
-	ProductRevisionLevel    string
-	VendorSpecific          []byte
-	VersionDescriptors      []uint16
-}
-
-// VPDPageCode see Table 498
-type VPDPageCode uint8
-
-const (
-	SupportedVPDs                      VPDPageCode = 0x00
-	UnitSerialNumberVPD                VPDPageCode = 0x80
-	DeviceIdentificationVPD            VPDPageCode = 0x83
-	SoftwareInterfaceIdentificationVPD VPDPageCode = 0x84
-	ManagementNetworkAddressesVPD      VPDPageCode = 0x85
-	ExtendedINQUIRYDataVPD             VPDPageCode = 0x86
-	ModePagePolicyVPD                  VPDPageCode = 0x87
-	SCSIPortsVPD                       VPDPageCode = 0x88
-	ATAInformationVPD                  VPDPageCode = 0x89
-	PowerConditionVPD                  VPDPageCode = 0x8a
-	DeviceConstituentsVPD              VPDPageCode = 0x8b
-)
-
-var vpdPageCodeDesc = map[VPDPageCode]string{
-	SupportedVPDs:                      "Supported VPD Pages",
-	UnitSerialNumberVPD:                "Unit Serial Number",
-	DeviceIdentificationVPD:            "Device Identification",
-	SoftwareInterfaceIdentificationVPD: "Software Interface Identification",
-	ManagementNetworkAddressesVPD:      "Management Network Addresses",
-	ExtendedINQUIRYDataVPD:             "Extended INQUIRY Data",
-	ModePagePolicyVPD:                  "Mode Page Policy",
-	SCSIPortsVPD:                       "SCSI Ports",
-	ATAInformationVPD:                  "ATA Information",
-	PowerConditionVPD:                  "Power Condition",
-	DeviceConstituentsVPD:              "Device Constituents",
-}
-
-func (v VPDPageCode) String() string {
-	if str, ok := vpdPageCodeDesc[v]; ok {
-		return str
-	}
-	return fmt.Sprintf("Page %xh", uint8(v))
-}
-
-// InquiryVPD requests a specified Vital Product Description Page from the
-// device. If the size of the page is known in advance, initialSize should be
-// set to a non-zero value to make the query more efficient.
-func (d *Device) InquiryVPD(pageCode VPDPageCode, initialSize uint16) ([]byte, error) {
-	var bufferSize uint16 = 254
-	if initialSize > 0 {
-		bufferSize = initialSize
-	}
-	for {
-		data := make([]byte, bufferSize)
-		var req [4]byte
-		req[0] = 0b1 // Enable Vital Product Data
-		req[1] = uint8(pageCode)
-		binary.BigEndian.PutUint16(req[2:4], uint16(len(data)))
-		if err := d.RawCommand(&CommandDataBuffer{
-			OperationCode:         InquiryOp,
-			Request:               req[:],
-			Data:                  data,
-			DataTransferDirection: DataTransferFromDevice,
-		}); err != nil {
-			return nil, fmt.Errorf("error during INQUIRY VPD: %w", err)
-		}
-		if data[1] != uint8(pageCode) {
-			return nil, fmt.Errorf("requested VPD page %x, got %x", pageCode, data[1])
-		}
-		pageLength := binary.BigEndian.Uint16(data[2:4])
-		if pageLength > math.MaxUint16-4 {
-			// Guard against uint16 overflows, this cannot be requested anyway
-			return nil, fmt.Errorf("device VPD page is too long (%d bytes)", pageLength)
-		}
-		if pageLength > uint16(len(data)-4) {
-			bufferSize = pageLength + 4
-			continue
-		}
-		return data[4 : pageLength+4], nil
-	}
-}
-
-// SupportedVPDPages returns the list of supported vital product data pages
-// supported by the device.
-func (d *Device) SupportedVPDPages() (map[VPDPageCode]bool, error) {
-	res, err := d.InquiryVPD(SupportedVPDs, 0)
-	if err != nil {
-		return nil, err
-	}
-	supportedPages := make(map[VPDPageCode]bool)
-	for _, p := range res {
-		supportedPages[VPDPageCode(p)] = true
-	}
-	return supportedPages, nil
-}
-
-// UnitSerialNumber returns the serial number of the device. Only available if
-// UnitSerialNumberVPD is a supported VPD page.
-func (d *Device) UnitSerialNumber() (string, error) {
-	serial, err := d.InquiryVPD(UnitSerialNumberVPD, 0)
-	if err != nil {
-		return "", err
-	}
-	return string(bytes.Trim(serial, " \x00")), nil
-}
diff --git a/metropolis/pkg/scsi/log.go b/metropolis/pkg/scsi/log.go
deleted file mode 100644
index 3aecd5e..0000000
--- a/metropolis/pkg/scsi/log.go
+++ /dev/null
@@ -1,174 +0,0 @@
-package scsi
-
-import (
-	"encoding/binary"
-	"errors"
-	"fmt"
-	"math"
-)
-
-type LogSenseRequest struct {
-	// PageCode contains the identifier of the requested page
-	PageCode uint8
-	// SubpageCode contains the identifier of the requested subpage
-	// or the zero value if no subpage is requested.
-	SubpageCode uint8
-	// PageControl specifies what type of values should be returned for bounded
-	// and unbounded log parameters. See also Table 156 in the standard.
-	PageControl uint8
-	// ParameterPointer allows requesting parameter data beginning from a
-	// specific parameter code. The zero value starts from the beginning.
-	ParameterPointer uint16
-	// SaveParameters requests the device to save all parameters without
-	// DisableUpdate set to non-volatile storage.
-	SaveParameters bool
-	// InitialSize is an optional hint how big the buffer for the log page
-	// should be for the initial request. The zero value sets this to 4096.
-	InitialSize uint16
-}
-
-// LogSenseRaw requests a raw log page. For log pages with parameters
-// LogSenseParameters is better-suited.
-func (d *Device) LogSenseRaw(r LogSenseRequest) ([]byte, error) {
-	var bufferSize uint16 = 4096
-	for {
-		data := make([]byte, bufferSize)
-		var req [8]byte
-		if r.SaveParameters {
-			req[0] = 0b1
-		}
-		req[1] = r.PageControl<<6 | r.PageCode
-		req[2] = r.SubpageCode
-		binary.BigEndian.PutUint16(req[4:6], r.ParameterPointer)
-		binary.BigEndian.PutUint16(req[6:8], uint16(len(data)))
-		if err := d.RawCommand(&CommandDataBuffer{
-			OperationCode:         LogSenseOp,
-			Request:               req[:],
-			Data:                  data,
-			DataTransferDirection: DataTransferFromDevice,
-		}); err != nil {
-			return nil, fmt.Errorf("error during LOG SENSE: %w", err)
-		}
-		if data[0]&0b111111 != r.PageCode {
-			return nil, fmt.Errorf("requested log page %x, got %x", r.PageCode, data[1])
-		}
-		if data[1] != r.SubpageCode {
-			return nil, fmt.Errorf("requested log subpage %x, got %x", r.SubpageCode, data[1])
-		}
-		pageLength := binary.BigEndian.Uint16(data[2:4])
-		if pageLength > math.MaxUint16-4 {
-			// Guard against uint16 overflows, this cannot be requested anyways
-			return nil, fmt.Errorf("device log page is too long (%d bytes)", pageLength)
-		}
-		if pageLength > uint16(len(data)-4) {
-			bufferSize = pageLength + 4
-			continue
-		}
-		return data[4 : pageLength+4], nil
-	}
-}
-
-// SupportedLogPages returns a map with all supported log pages.
-// This can return an error if the device does not support logs at all.
-func (d *Device) SupportedLogPages() (map[uint8]bool, error) {
-	raw, err := d.LogSenseRaw(LogSenseRequest{PageCode: 0})
-	if err != nil {
-		return nil, err
-	}
-	res := make(map[uint8]bool)
-	for _, r := range raw {
-		res[r] = true
-	}
-	return res, nil
-}
-
-// PageAndSubpage identifies a log page uniquely.
-type PageAndSubpage uint16
-
-func NewPageAndSubpage(page, subpage uint8) PageAndSubpage {
-	return PageAndSubpage(uint16(page)<<8 | uint16(subpage))
-}
-
-func (p PageAndSubpage) Page() uint8 {
-	return uint8(p >> 8)
-}
-func (p PageAndSubpage) Subpage() uint8 {
-	return uint8(p & 0xFF)
-}
-
-func (p PageAndSubpage) String() string {
-	return fmt.Sprintf("Page %xh Subpage %xh", p.Page(), p.Subpage())
-}
-
-// SupportedLogPagesAndSubpages returns the list of supported pages and subpages.
-// This can return an error if the device does not support logs at all.
-func (d *Device) SupportedLogPagesAndSubpages() (map[PageAndSubpage]bool, error) {
-	raw, err := d.LogSenseRaw(LogSenseRequest{PageCode: 0x00, SubpageCode: 0xff})
-	if err != nil {
-		return nil, err
-	}
-	res := make(map[PageAndSubpage]bool)
-	for i := 0; i < len(raw)/2; i++ {
-		res[NewPageAndSubpage(raw[i*2], raw[(i*2)+1])] = true
-	}
-	return res, nil
-}
-
-// SupportedLogSubPages returns the list of subpages supported in a log page.
-func (d *Device) SupportedLogSubPages(pageCode uint8) (map[uint8]bool, error) {
-	raw, err := d.LogSenseRaw(LogSenseRequest{PageCode: pageCode, SubpageCode: 0xff})
-	if err != nil {
-		return nil, err
-	}
-	res := make(map[uint8]bool)
-	for _, r := range raw {
-		res[r] = true
-	}
-	return res, nil
-}
-
-type LogParameter struct {
-	// DisableUpdate indicates if the device is updating this parameter.
-	// If this is true the parameter has either overflown or updating has been
-	// manually disabled.
-	DisableUpdate bool
-	// TargetSaveDisable indicates if automatic saving of this parameter has
-	// been disabled.
-	TargetSaveDisable bool
-	// FormatAndLinking contains the format of the log parameter.
-	FormatAndLinking uint8
-	// Data contains the payload of the log parameter.
-	Data []byte
-}
-
-// LogSenseParameters returns the parameters of a log page. The returned map
-// contains one entry per parameter ID in the result. The order of parameters
-// of the same ID is kept.
-func (d *Device) LogSenseParameters(r LogSenseRequest) (map[uint16][]LogParameter, error) {
-	raw, err := d.LogSenseRaw(r)
-	if err != nil {
-		return nil, err
-	}
-	res := make(map[uint16][]LogParameter)
-	for {
-		if len(raw) == 0 {
-			break
-		}
-		if len(raw) < 4 {
-			return nil, errors.New("not enough data left to read full parameter metadata")
-		}
-		var param LogParameter
-		parameterCode := binary.BigEndian.Uint16(raw[0:2])
-		param.DisableUpdate = raw[2]&(1<<7) != 0
-		param.TargetSaveDisable = raw[2]&(1<<5) != 0
-		param.FormatAndLinking = raw[2] & 0b11
-		if int(raw[3]) > len(raw)-4 {
-			fmt.Println(raw[3], len(raw))
-			return nil, errors.New("unable to read parameter, not enough data for indicated length")
-		}
-		param.Data = raw[4 : int(raw[3])+4]
-		raw = raw[int(raw[3])+4:]
-		res[parameterCode] = append(res[parameterCode], param)
-	}
-	return res, nil
-}
diff --git a/metropolis/pkg/scsi/scsi.go b/metropolis/pkg/scsi/scsi.go
deleted file mode 100644
index f09bf44..0000000
--- a/metropolis/pkg/scsi/scsi.go
+++ /dev/null
@@ -1,256 +0,0 @@
-// INCITS 502 Revision 19 / SPC-5 R19
-package scsi
-
-import (
-	"errors"
-	"fmt"
-	"os"
-	"syscall"
-	"time"
-)
-
-// Device is a handle for a SCSI device
-type Device struct {
-	fd syscall.Conn
-}
-
-// NewFromFd creates a new SCSI device handle from a system handle.
-func NewFromFd(fd syscall.Conn) (*Device, error) {
-	d := &Device{fd: fd}
-	// There is no good way to validate that a file descriptor indeed points to
-	// a SCSI device. For future compatibility let this return an error so that
-	// code is already prepared to handle it.
-	return d, nil
-}
-
-// Open creates a new SCSI device handle from a device path (like /dev/sda).
-func Open(path string) (*Device, error) {
-	f, err := os.Open(path)
-	if err != nil {
-		return nil, fmt.Errorf("unable to open path: %w", err)
-	}
-	return NewFromFd(f)
-}
-
-// Close closes the SCSI device handle if opened by Open()
-func (d *Device) Close() error {
-	if f, ok := d.fd.(*os.File); ok {
-		return f.Close()
-	} else {
-		return errors.New("unable to close device not opened via Open, please close it yourself")
-	}
-}
-
-type DataTransferDirection uint8
-
-const (
-	DataTransferNone DataTransferDirection = iota
-	DataTransferToDevice
-	DataTransferFromDevice
-	DataTransferBidirectional
-)
-
-type OperationCode uint8
-
-const (
-	InquiryOp        OperationCode = 0x12
-	ReadDefectDataOp OperationCode = 0x37
-	LogSenseOp       OperationCode = 0x4d
-)
-
-// CommandDataBuffer represents a command
-type CommandDataBuffer struct {
-	// OperationCode contains the code of the command to be called
-	OperationCode OperationCode
-	// Request contains the OperationCode-specific request parameters
-	Request []byte
-	// ServiceAction can (for certain CDB encodings) contain an additional
-	// qualification for the OperationCode.
-	ServiceAction *uint8
-	// Control contains common CDB metadata
-	Control uint8
-	// DataTransferDirection contains the direction(s) of the data transfer(s)
-	// to be made.
-	DataTransferDirection DataTransferDirection
-	// Data contains the data to be transferred. If data needs to be received
-	// from the device, a buffer needs to be provided here.
-	Data []byte
-	// Timeout can contain an optional timeout (0 = no timeout) for the command
-	Timeout time.Duration
-}
-
-// Bytes returns the raw CDB to be sent to the device
-func (c *CommandDataBuffer) Bytes() ([]byte, error) {
-	// Table 24
-	switch {
-	case c.OperationCode < 0x20:
-		// Use CDB6 as defined in Table 3
-		if c.ServiceAction != nil {
-			return nil, errors.New("ServiceAction field not available in CDB6")
-		}
-		if len(c.Request) != 4 {
-			return nil, fmt.Errorf("CDB6 request size is %d bytes, needs to be 4 bytes without LengthField", len(c.Request))
-		}
-
-		outBuf := make([]byte, 6)
-		outBuf[0] = uint8(c.OperationCode)
-
-		copy(outBuf[1:5], c.Request)
-		outBuf[5] = c.Control
-		return outBuf, nil
-	case c.OperationCode < 0x60:
-		// Use CDB10 as defined in Table 5
-		if len(c.Request) != 8 {
-			return nil, fmt.Errorf("CDB10 request size is %d bytes, needs to be 4 bytes", len(c.Request))
-		}
-
-		outBuf := make([]byte, 10)
-		outBuf[0] = uint8(c.OperationCode)
-		copy(outBuf[1:9], c.Request)
-		if c.ServiceAction != nil {
-			outBuf[1] |= *c.ServiceAction & 0b11111
-		}
-		outBuf[9] = c.Control
-		return outBuf, nil
-	case c.OperationCode < 0x7e:
-		return nil, errors.New("OperationCode is reserved")
-	case c.OperationCode == 0x7e:
-		// Use variable extended
-		return nil, errors.New("variable extended CDBs are unimplemented")
-	case c.OperationCode == 0x7f:
-		// Use variable
-		return nil, errors.New("variable CDBs are unimplemented")
-	case c.OperationCode < 0xa0:
-		// Use CDB16 as defined in Table 13
-		if len(c.Request) != 14 {
-			return nil, fmt.Errorf("CDB16 request size is %d bytes, needs to be 14 bytes", len(c.Request))
-		}
-
-		outBuf := make([]byte, 16)
-		outBuf[0] = uint8(c.OperationCode)
-		copy(outBuf[1:15], c.Request)
-		if c.ServiceAction != nil {
-			outBuf[1] |= *c.ServiceAction & 0b11111
-		}
-		outBuf[15] = c.Control
-		return outBuf, nil
-	case c.OperationCode < 0xc0:
-		// Use CDB12 as defined in Table 7
-		if len(c.Request) != 10 {
-			return nil, fmt.Errorf("CDB12 request size is %d bytes, needs to be 10 bytes", len(c.Request))
-		}
-
-		outBuf := make([]byte, 12)
-		outBuf[0] = uint8(c.OperationCode)
-		copy(outBuf[1:11], c.Request)
-		if c.ServiceAction != nil {
-			outBuf[1] |= *c.ServiceAction & 0b11111
-		}
-		outBuf[11] = c.Control
-		return outBuf, nil
-	default:
-		return nil, errors.New("unable to encode CDB for given OperationCode")
-	}
-}
-
-// SenseKey represents the top-level status code of a SCSI sense response.
-type SenseKey uint8
-
-const (
-	NoSense        SenseKey = 0x0
-	RecoveredError SenseKey = 0x1
-	NotReady       SenseKey = 0x2
-	MediumError    SenseKey = 0x3
-	HardwareError  SenseKey = 0x4
-	IllegalRequest SenseKey = 0x5
-	UnitAttention  SenseKey = 0x6
-	DataProtect    SenseKey = 0x7
-	BlankCheck     SenseKey = 0x8
-	VendorSpecific SenseKey = 0x9
-	CopyAborted    SenseKey = 0xa
-	AbortedCommand SenseKey = 0xb
-	VolumeOverflow SenseKey = 0xd
-	Miscompare     SenseKey = 0xe
-	Completed      SenseKey = 0xf
-)
-
-var senseKeyDesc = map[SenseKey]string{
-	NoSense:        "no sense information",
-	RecoveredError: "recovered error",
-	NotReady:       "not ready",
-	MediumError:    "medium error",
-	HardwareError:  "hardware error",
-	IllegalRequest: "illegal request",
-	UnitAttention:  "unit attention",
-	DataProtect:    "data protected",
-	BlankCheck:     "blank check failed",
-	VendorSpecific: "vendor-specific error",
-	CopyAborted:    "third-party copy aborted",
-	AbortedCommand: "command aborted",
-	VolumeOverflow: "volume overflow",
-	Miscompare:     "miscompare",
-	Completed:      "completed",
-}
-
-func (s SenseKey) String() string {
-	if str, ok := senseKeyDesc[s]; ok {
-		return str
-	}
-	return fmt.Sprintf("sense key %xh", uint8(s))
-}
-
-// AdditionalSenseCode contains the additional sense key and qualifier in one
-// 16-bit value. The high 8 bits are the sense key, the bottom 8 bits the
-// qualifier.
-type AdditionalSenseCode uint16
-
-// ASK returns the raw Additional Sense Key
-func (a AdditionalSenseCode) ASK() uint8 {
-	return uint8(a >> 8)
-}
-
-// ASKQ returns the raw Additional Sense Key Qualifier
-func (a AdditionalSenseCode) ASKQ() uint8 {
-	return uint8(a & 0xFF)
-}
-
-// IsKey checks if the ASK portion of a is the same as the ASK portion of b.
-func (a AdditionalSenseCode) IsKey(b AdditionalSenseCode) bool {
-	return a.ASK() == b.ASK()
-}
-
-// String returns the textual representation of this ASK
-func (a AdditionalSenseCode) String() string {
-	if str, ok := additionalSenseCodeDesc[a]; ok {
-		return str
-	}
-	return fmt.Sprintf("unknown additional sense code %xh %xh", a.ASK(), a.ASKQ())
-}
-
-// FixedError is one type of error returned by a SCSI CHECK_CONDITION.
-// See also Table 48 in the standard.
-type FixedError struct {
-	Deferred                   bool
-	SenseKey                   SenseKey
-	Information                uint32
-	CommandSpecificInformation uint32
-	AdditionalSenseCode        AdditionalSenseCode
-}
-
-func (e FixedError) Error() string {
-	if e.AdditionalSenseCode == 0 {
-		return fmt.Sprintf("%v", e.SenseKey)
-	}
-	return fmt.Sprintf("%v: %v", e.SenseKey, e.AdditionalSenseCode)
-
-}
-
-// UnknownError is a type of error returned by SCSI which is not understood by this
-// library. This can be a vendor-specific or future error.
-type UnknownError struct {
-	RawSenseData []byte
-}
-
-func (e *UnknownError) Error() string {
-	return fmt.Sprintf("unknown SCSI error, raw sense data follows: %x", e.RawSenseData)
-}
diff --git a/metropolis/pkg/scsi/scsi_linux.go b/metropolis/pkg/scsi/scsi_linux.go
deleted file mode 100644
index 21af01d..0000000
--- a/metropolis/pkg/scsi/scsi_linux.go
+++ /dev/null
@@ -1,103 +0,0 @@
-//go:build linux
-
-package scsi
-
-import (
-	"encoding/binary"
-	"errors"
-	"fmt"
-	"math"
-	"runtime"
-	"unsafe"
-
-	"golang.org/x/sys/unix"
-)
-
-// RawCommand issues a raw command against the device.
-func (d *Device) RawCommand(c *CommandDataBuffer) error {
-	cdb, err := c.Bytes()
-	if err != nil {
-		return fmt.Errorf("error encoding CDB: %w", err)
-	}
-	conn, err := d.fd.SyscallConn()
-	if err != nil {
-		return fmt.Errorf("unable to get RawConn: %w", err)
-	}
-	var dxferDir int32
-	switch c.DataTransferDirection {
-	case DataTransferNone:
-		dxferDir = SG_DXFER_NONE
-	case DataTransferFromDevice:
-		dxferDir = SG_DXFER_FROM_DEV
-	case DataTransferToDevice:
-		dxferDir = SG_DXFER_TO_DEV
-	case DataTransferBidirectional:
-		dxferDir = SG_DXFER_TO_FROM_DEV
-	default:
-		return errors.New("invalid DataTransferDirection")
-	}
-	var timeout uint32
-	if c.Timeout.Milliseconds() > math.MaxUint32 {
-		timeout = math.MaxUint32
-	}
-	if len(c.Data) > math.MaxUint32 {
-		return errors.New("payload larger than 2^32 bytes, unable to issue")
-	}
-	if len(cdb) > math.MaxUint8 {
-		return errors.New("CDB larger than 2^8 bytes, unable to issue")
-	}
-	var senseBuf [32]byte
-
-	var ioctlPins runtime.Pinner
-	ioctlPins.Pin(&c.Data[0])
-	ioctlPins.Pin(&cdb[0])
-	ioctlPins.Pin(&senseBuf[0])
-	defer ioctlPins.Unpin()
-
-	cmdRaw := sgIOHdr{
-		Interface_id:    'S',
-		Dxfer_direction: dxferDir,
-		Dxfer_len:       uint32(len(c.Data)),
-		Dxferp:          uintptr(unsafe.Pointer(&c.Data[0])),
-		Cmd_len:         uint8(len(cdb)),
-		Cmdp:            uintptr(unsafe.Pointer(&cdb[0])),
-		Mx_sb_len:       uint8(len(senseBuf)),
-		Sbp:             uintptr(unsafe.Pointer(&senseBuf[0])),
-		Timeout:         timeout,
-	}
-	var errno unix.Errno
-	err = conn.Control(func(fd uintptr) {
-		_, _, errno = unix.Syscall(unix.SYS_IOCTL, fd, SG_IO, uintptr(unsafe.Pointer(&cmdRaw)))
-	})
-	runtime.KeepAlive(cmdRaw)
-	runtime.KeepAlive(c.Data)
-	runtime.KeepAlive(senseBuf)
-	runtime.KeepAlive(cdb)
-	if err != nil {
-		return fmt.Errorf("unable to get fd: %w", err)
-	}
-	if errno != 0 {
-		return errno
-	}
-	if cmdRaw.Masked_status != 0 {
-		if senseBuf[0] == 0x70 || senseBuf[0] == 0x71 {
-			err := &FixedError{
-				Deferred:    senseBuf[0] == 0x71,
-				SenseKey:    SenseKey(senseBuf[2] & 0b1111),
-				Information: binary.BigEndian.Uint32(senseBuf[3:7]),
-			}
-			length := int(senseBuf[7])
-			if length >= 4 {
-				err.CommandSpecificInformation = binary.BigEndian.Uint32(senseBuf[8:12])
-				if length >= 6 {
-					err.AdditionalSenseCode = AdditionalSenseCode(uint16(senseBuf[12])<<8 | uint16(senseBuf[13]))
-				}
-			}
-			return err
-		}
-		return &UnknownError{
-			RawSenseData: senseBuf[:],
-		}
-	}
-	return nil
-}
diff --git a/metropolis/pkg/scsi/scsi_linux_defs.go b/metropolis/pkg/scsi/scsi_linux_defs.go
deleted file mode 100644
index 2d84e91..0000000
--- a/metropolis/pkg/scsi/scsi_linux_defs.go
+++ /dev/null
@@ -1,40 +0,0 @@
-//go:build linux
-
-// Code generated by cmd/cgo -godefs; DO NOT EDIT.
-// cgo -godefs scsi_linux_defs1.go
-
-package scsi
-
-const (
-	SG_IO                = 0x2285
-	SG_DXFER_NONE        = -0x1
-	SG_DXFER_TO_DEV      = -0x2
-	SG_DXFER_FROM_DEV    = -0x3
-	SG_DXFER_TO_FROM_DEV = -0x4
-)
-
-type sgIOHdr struct {
-	Interface_id    int32
-	Dxfer_direction int32
-	Cmd_len         uint8
-	Mx_sb_len       uint8
-	Iovec_count     uint16
-	Dxfer_len       uint32
-	Dxferp          uintptr
-	Cmdp            uintptr
-	Sbp             uintptr
-	Timeout         uint32
-	Flags           uint32
-	Pack_id         int32
-	Usr_ptr         uintptr
-	Status          uint8
-	Masked_status   uint8
-	Msg_status      uint8
-	Sb_len_wr       uint8
-	Host_status     uint16
-	Driver_status   uint16
-	Resid           int32
-	Duration        uint32
-	Info            uint32
-	Pad_cgo_0       [4]byte
-}
diff --git a/metropolis/pkg/scsi/scsi_linux_defs1.go b/metropolis/pkg/scsi/scsi_linux_defs1.go
deleted file mode 100644
index e2e410d..0000000
--- a/metropolis/pkg/scsi/scsi_linux_defs1.go
+++ /dev/null
@@ -1,16 +0,0 @@
-//go:build ignore
-
-package scsi
-
-// #include <scsi/sg.h>
-import "C"
-
-const (
-	SG_IO                = C.SG_IO
-	SG_DXFER_NONE        = C.SG_DXFER_NONE
-	SG_DXFER_TO_DEV      = C.SG_DXFER_TO_DEV
-	SG_DXFER_FROM_DEV    = C.SG_DXFER_FROM_DEV
-	SG_DXFER_TO_FROM_DEV = C.SG_DXFER_TO_FROM_DEV
-)
-
-type sgIOHdr C.sg_io_hdr_t
diff --git a/metropolis/pkg/scsi/sensekeydata.go b/metropolis/pkg/scsi/sensekeydata.go
deleted file mode 100644
index 37a31c2..0000000
--- a/metropolis/pkg/scsi/sensekeydata.go
+++ /dev/null
@@ -1,1506 +0,0 @@
-package scsi
-
-// Generated from Table F.1
-const (
-	NoAdditionalSenseInformation                            AdditionalSenseCode = 0x0000
-	FilemarkDetected                                        AdditionalSenseCode = 0x0001
-	EndOfPartitionmediumDetected                            AdditionalSenseCode = 0x0002
-	SetmarkDetected                                         AdditionalSenseCode = 0x0003
-	BeginningOfPartitionmediumDetected                      AdditionalSenseCode = 0x0004
-	EndOfDataDetected                                       AdditionalSenseCode = 0x0005
-	IoProcessTerminated                                     AdditionalSenseCode = 0x0006
-	ProgrammableEarlyWarningDetected                        AdditionalSenseCode = 0x0007
-	AudioPlayOperationInProgress                            AdditionalSenseCode = 0x0011
-	AudioPlayOperationPaused                                AdditionalSenseCode = 0x0012
-	AudioPlayOperationSuccessfullyCompleted                 AdditionalSenseCode = 0x0013
-	AudioPlayOperationStoppedDueToError                     AdditionalSenseCode = 0x0014
-	NoCurrentAudioStatusToReturn                            AdditionalSenseCode = 0x0015
-	OperationInProgress                                     AdditionalSenseCode = 0x0016
-	CleaningRequested                                       AdditionalSenseCode = 0x0017
-	EraseOperationInProgress                                AdditionalSenseCode = 0x0018
-	LocateOperationInProgress                               AdditionalSenseCode = 0x0019
-	RewindOperationInProgress                               AdditionalSenseCode = 0x001a
-	SetCapacityOperationInProgress                          AdditionalSenseCode = 0x001b
-	VerifyOperationInProgress                               AdditionalSenseCode = 0x001c
-	AtaPassThroughInformationAvailable                      AdditionalSenseCode = 0x001d
-	ConflictingSaCreationRequest                            AdditionalSenseCode = 0x001e
-	LogicalUnitTransitioningToAnotherPowerCondition         AdditionalSenseCode = 0x001f
-	ExtendedCopyInformationAvailable                        AdditionalSenseCode = 0x0020
-	AtomicCommandAbortedDueToAca                            AdditionalSenseCode = 0x0021
-	DeferredMicrocodeIsPending                              AdditionalSenseCode = 0x0022
-	NoIndexsectorSignal                                     AdditionalSenseCode = 0x0100
-	NoSeekComplete                                          AdditionalSenseCode = 0x0200
-	PeripheralDeviceWriteFault                              AdditionalSenseCode = 0x0300
-	NoWriteCurrent                                          AdditionalSenseCode = 0x0301
-	ExcessiveWriteErrors                                    AdditionalSenseCode = 0x0302
-	LogicalUnitNotReadyCauseNotReportable                   AdditionalSenseCode = 0x0400
-	LogicalUnitIsInProcessOfBecomingReady                   AdditionalSenseCode = 0x0401
-	LogicalUnitNotReadyInitializingCommandRequired          AdditionalSenseCode = 0x0402
-	LogicalUnitNotReadyManualInterventionRequired           AdditionalSenseCode = 0x0403
-	LogicalUnitNotReadyFormatInProgress                     AdditionalSenseCode = 0x0404
-	LogicalUnitNotReadyRebuildInProgress                    AdditionalSenseCode = 0x0405
-	LogicalUnitNotReadyRecalculationInProgress              AdditionalSenseCode = 0x0406
-	LogicalUnitNotReadyOperationInProgress                  AdditionalSenseCode = 0x0407
-	LogicalUnitNotReadyLongWriteInProgress                  AdditionalSenseCode = 0x0408
-	LogicalUnitNotReadySelfTestInProgress                   AdditionalSenseCode = 0x0409
-	LogicalUnitNotAccessibleAsymmetricAccessStateTransition AdditionalSenseCode = 0x040a
-	LogicalUnitNotAccessibleTargetPortInStandbyState        AdditionalSenseCode = 0x040b
-	LogicalUnitNotAccessibleTargetPortInUnavailableState    AdditionalSenseCode = 0x040c
-	LogicalUnitNotReadyStructureCheckRequired               AdditionalSenseCode = 0x040d
-	LogicalUnitNotReadySecuritySessionInProgress            AdditionalSenseCode = 0x040e
-	LogicalUnitNotReadyAuxiliaryMemoryNotAccessible         AdditionalSenseCode = 0x0410
-	LogicalUnitNotReadyNotifyenableSpinupRequired           AdditionalSenseCode = 0x0411
-	LogicalUnitNotReadyOffline                              AdditionalSenseCode = 0x0412
-	LogicalUnitNotReadySaCreationInProgress                 AdditionalSenseCode = 0x0413
-	LogicalUnitNotReadySpaceAllocationInProgress            AdditionalSenseCode = 0x0414
-	LogicalUnitNotReadyRoboticsDisabled                     AdditionalSenseCode = 0x0415
-	LogicalUnitNotReadyConfigurationRequired                AdditionalSenseCode = 0x0416
-	LogicalUnitNotReadyCalibrationRequired                  AdditionalSenseCode = 0x0417
-	LogicalUnitNotReadyADoorIsOpen                          AdditionalSenseCode = 0x0418
-	LogicalUnitNotReadyOperatingInSequentialMode            AdditionalSenseCode = 0x0419
-	LogicalUnitNotReadyStartStopUnitCommandInProgress       AdditionalSenseCode = 0x041a
-	LogicalUnitNotReadySanitizeInProgress                   AdditionalSenseCode = 0x041b
-	LogicalUnitNotReadyAdditionalPowerUseNotYetGranted      AdditionalSenseCode = 0x041c
-	LogicalUnitNotReadyConfigurationInProgress              AdditionalSenseCode = 0x041d
-	LogicalUnitNotReadyMicrocodeActivationRequired          AdditionalSenseCode = 0x041e
-	LogicalUnitNotReadyMicrocodeDownloadRequired            AdditionalSenseCode = 0x041f
-	LogicalUnitNotReadyLogicalUnitResetRequired             AdditionalSenseCode = 0x0420
-	LogicalUnitNotReadyHardResetRequired                    AdditionalSenseCode = 0x0421
-	LogicalUnitNotReadyPowerCycleRequired                   AdditionalSenseCode = 0x0422
-	LogicalUnitNotReadyAffiliationRequired                  AdditionalSenseCode = 0x0423
-	DepopulationInProgress                                  AdditionalSenseCode = 0x0424
-	LogicalUnitDoesNotRespondToSelection                    AdditionalSenseCode = 0x0500
-	NoReferencePositionFound                                AdditionalSenseCode = 0x0600
-	MultiplePeripheralDevicesSelected                       AdditionalSenseCode = 0x0700
-	LogicalUnitCommunicationFailure                         AdditionalSenseCode = 0x0800
-	LogicalUnitCommunicationTimeOut                         AdditionalSenseCode = 0x0801
-	LogicalUnitCommunicationParityError                     AdditionalSenseCode = 0x0802
-	LogicalUnitCommunicationCrcErrorultraDma32              AdditionalSenseCode = 0x0803
-	UnreachableCopyTarget                                   AdditionalSenseCode = 0x0804
-	TrackFollowingError                                     AdditionalSenseCode = 0x0900
-	TrackingServoFailure                                    AdditionalSenseCode = 0x0901
-	FocusServoFailure                                       AdditionalSenseCode = 0x0902
-	SpindleServoFailure                                     AdditionalSenseCode = 0x0903
-	HeadSelectFault                                         AdditionalSenseCode = 0x0904
-	VibrationInducedTrackingError                           AdditionalSenseCode = 0x0905
-	ErrorLogOverflow                                        AdditionalSenseCode = 0x0a00
-	Warning                                                 AdditionalSenseCode = 0x0b00
-	WarningSpecifiedTemperatureExceeded                     AdditionalSenseCode = 0x0b01
-	WarningEnclosureDegraded                                AdditionalSenseCode = 0x0b02
-	WarningBackgroundSelfTestFailed                         AdditionalSenseCode = 0x0b03
-	WarningBackgroundPreScanDetectedMediumError             AdditionalSenseCode = 0x0b04
-	WarningBackgroundMediumScanDetectedMediumError          AdditionalSenseCode = 0x0b05
-	WarningNonVolatileCacheNowVolatile                      AdditionalSenseCode = 0x0b06
-	WarningDegradedPowerToNonVolatileCache                  AdditionalSenseCode = 0x0b07
-	WarningPowerLossExpected                                AdditionalSenseCode = 0x0b08
-	WarningDeviceStatisticsNotificationActive               AdditionalSenseCode = 0x0b09
-	WarningHighCriticalTemperatureLimitExceeded             AdditionalSenseCode = 0x0b0a
-	WarningLowCriticalTemperatureLimitExceeded              AdditionalSenseCode = 0x0b0b
-	WarningHighOperatingTemperatureLimitExceeded            AdditionalSenseCode = 0x0b0c
-	WarningLowOperatingTemperatureLimitExceeded             AdditionalSenseCode = 0x0b0d
-	WarningHighCriticalHumidityLimitExceeded                AdditionalSenseCode = 0x0b0e
-	WarningLowCriticalHumidityLimitExceeded                 AdditionalSenseCode = 0x0b0f
-	WarningHighOperatingHumidityLimitExceeded               AdditionalSenseCode = 0x0b10
-	WarningLowOperatingHumidityLimitExceeded                AdditionalSenseCode = 0x0b11
-	WarningMicrocodeSecurityAtRisk                          AdditionalSenseCode = 0x0b12
-	WarningMicrocodeDigitalSignatureValidationFailure       AdditionalSenseCode = 0x0b13
-	WarningPhysicalElementStatusChange                      AdditionalSenseCode = 0x0b14
-	WriteError                                              AdditionalSenseCode = 0x0c00
-	WriteErrorRecoveredWithAutoReallocation                 AdditionalSenseCode = 0x0c01
-	WriteErrorAutoReallocationFailed                        AdditionalSenseCode = 0x0c02
-	WriteErrorRecommendReassignment                         AdditionalSenseCode = 0x0c03
-	CompressionCheckMiscompareError                         AdditionalSenseCode = 0x0c04
-	DataExpansionOccurredDuringCompression                  AdditionalSenseCode = 0x0c05
-	BlockNotCompressible                                    AdditionalSenseCode = 0x0c06
-	WriteErrorRecoveryNeeded                                AdditionalSenseCode = 0x0c07
-	WriteErrorRecoveryFailed                                AdditionalSenseCode = 0x0c08
-	WriteErrorLossOfStreaming                               AdditionalSenseCode = 0x0c09
-	WriteErrorPaddingBlocksAdded                            AdditionalSenseCode = 0x0c0a
-	AuxiliaryMemoryWriteError                               AdditionalSenseCode = 0x0c0b
-	WriteErrorUnexpectedUnsolicitedData                     AdditionalSenseCode = 0x0c0c
-	WriteErrorNotEnoughUnsolicitedData                      AdditionalSenseCode = 0x0c0d
-	MultipleWriteErrors                                     AdditionalSenseCode = 0x0c0e
-	DefectsInErrorWindow                                    AdditionalSenseCode = 0x0c0f
-	IncompleteMultipleAtomicWriteOperations                 AdditionalSenseCode = 0x0c10
-	WriteErrorRecoveryScanNeeded                            AdditionalSenseCode = 0x0c11
-	WriteErrorInsufficientZoneResources                     AdditionalSenseCode = 0x0c12
-	ErrorDetectedByThirdPartyTemporaryInitiator             AdditionalSenseCode = 0x0d00
-	ThirdPartyDeviceFailure                                 AdditionalSenseCode = 0x0d01
-	CopyTargetDeviceNotReachable                            AdditionalSenseCode = 0x0d02
-	IncorrectCopyTargetDeviceType                           AdditionalSenseCode = 0x0d03
-	CopyTargetDeviceDataUnderrun                            AdditionalSenseCode = 0x0d04
-	CopyTargetDeviceDataOverrun                             AdditionalSenseCode = 0x0d05
-	InvalidInformationUnit                                  AdditionalSenseCode = 0x0e00
-	InformationUnitTooShort                                 AdditionalSenseCode = 0x0e01
-	InformationUnitTooLong                                  AdditionalSenseCode = 0x0e02
-	InvalidFieldInCommandInformationUnit                    AdditionalSenseCode = 0x0e03
-	IdCrcOrEccError                                         AdditionalSenseCode = 0x1000
-	LogicalBlockGuardCheckFailed                            AdditionalSenseCode = 0x1001
-	LogicalBlockApplicationTagCheckFailed                   AdditionalSenseCode = 0x1002
-	LogicalBlockReferenceTagCheckFailed                     AdditionalSenseCode = 0x1003
-	LogicalBlockProtectionErrorOnRecoverBufferedData        AdditionalSenseCode = 0x1004
-	LogicalBlockProtectionMethodError                       AdditionalSenseCode = 0x1005
-	UnrecoveredReadError                                    AdditionalSenseCode = 0x1100
-	ReadRetriesExhausted                                    AdditionalSenseCode = 0x1101
-	ErrorTooLongToCorrect                                   AdditionalSenseCode = 0x1102
-	MultipleReadErrors                                      AdditionalSenseCode = 0x1103
-	UnrecoveredReadErrorAutoReallocateFailed                AdditionalSenseCode = 0x1104
-	LEcUncorrectableError                                   AdditionalSenseCode = 0x1105
-	CircUnrecoveredError                                    AdditionalSenseCode = 0x1106
-	DataReSynchronizationError                              AdditionalSenseCode = 0x1107
-	IncompleteBlockRead                                     AdditionalSenseCode = 0x1108
-	NoGapFound                                              AdditionalSenseCode = 0x1109
-	MiscorrectedError                                       AdditionalSenseCode = 0x110a
-	UnrecoveredReadErrorRecommendReassignment               AdditionalSenseCode = 0x110b
-	UnrecoveredReadErrorRecommendRewriteTheData             AdditionalSenseCode = 0x110c
-	DeCompressionCrcError                                   AdditionalSenseCode = 0x110d
-	CannotDecompressUsingDeclaredAlgorithm                  AdditionalSenseCode = 0x110e
-	ErrorReadingUpceanNumber                                AdditionalSenseCode = 0x110f
-	ErrorReadingIsrcNumber                                  AdditionalSenseCode = 0x1110
-	ReadErrorLossOfStreaming                                AdditionalSenseCode = 0x1111
-	AuxiliaryMemoryReadError                                AdditionalSenseCode = 0x1112
-	ReadErrorFailedRetransmissionRequest                    AdditionalSenseCode = 0x1113
-	ReadErrorLbaMarkedBadByApplicationClient                AdditionalSenseCode = 0x1114
-	WriteAfterSanitizeRequired                              AdditionalSenseCode = 0x1115
-	AddressMarkNotFoundForIdField                           AdditionalSenseCode = 0x1200
-	AddressMarkNotFoundForDataField                         AdditionalSenseCode = 0x1300
-	RecordedEntityNotFound                                  AdditionalSenseCode = 0x1400
-	RecordNotFound                                          AdditionalSenseCode = 0x1401
-	FilemarkOrSetmarkNotFound                               AdditionalSenseCode = 0x1402
-	EndOfDataNotFound                                       AdditionalSenseCode = 0x1403
-	BlockSequenceError                                      AdditionalSenseCode = 0x1404
-	RecordNotFoundRecommendReassignment                     AdditionalSenseCode = 0x1405
-	RecordNotFoundDataAutoReallocated                       AdditionalSenseCode = 0x1406
-	LocateOperationFailure                                  AdditionalSenseCode = 0x1407
-	RandomPositioningError                                  AdditionalSenseCode = 0x1500
-	MechanicalPositioningError                              AdditionalSenseCode = 0x1501
-	PositioningErrorDetectedByReadOfMedium                  AdditionalSenseCode = 0x1502
-	DataSynchronizationMarkError                            AdditionalSenseCode = 0x1600
-	DataSyncErrorDataRewritten                              AdditionalSenseCode = 0x1601
-	DataSyncErrorRecommendRewrite                           AdditionalSenseCode = 0x1602
-	DataSyncErrorDataAutoReallocated                        AdditionalSenseCode = 0x1603
-	DataSyncErrorRecommendReassignment                      AdditionalSenseCode = 0x1604
-	RecoveredDataWithNoErrorCorrectionApplied               AdditionalSenseCode = 0x1700
-	RecoveredDataWithRetries                                AdditionalSenseCode = 0x1701
-	RecoveredDataWithPositiveHeadOffset                     AdditionalSenseCode = 0x1702
-	RecoveredDataWithNegativeHeadOffset                     AdditionalSenseCode = 0x1703
-	RecoveredDataWithRetriesAndorCircApplied                AdditionalSenseCode = 0x1704
-	RecoveredDataUsingPreviousSectorId                      AdditionalSenseCode = 0x1705
-	RecoveredDataWithoutEccDataAutoReallocated              AdditionalSenseCode = 0x1706
-	RecoveredDataWithoutEccRecommendReassignment            AdditionalSenseCode = 0x1707
-	RecoveredDataWithoutEccRecommendRewrite                 AdditionalSenseCode = 0x1708
-	RecoveredDataWithoutEccDataRewritten                    AdditionalSenseCode = 0x1709
-	RecoveredDataWithErrorCorrectionApplied                 AdditionalSenseCode = 0x1800
-	RecoveredDataWithErrorCorrRetriesApplied                AdditionalSenseCode = 0x1801
-	RecoveredDataDataAutoReallocated                        AdditionalSenseCode = 0x1802
-	RecoveredDataWithCirc                                   AdditionalSenseCode = 0x1803
-	RecoveredDataWithLEc                                    AdditionalSenseCode = 0x1804
-	RecoveredDataRecommendReassignment                      AdditionalSenseCode = 0x1805
-	RecoveredDataRecommendRewrite                           AdditionalSenseCode = 0x1806
-	RecoveredDataWithEccDataRewritten                       AdditionalSenseCode = 0x1807
-	RecoveredDataWithLinking                                AdditionalSenseCode = 0x1808
-	DefectListError                                         AdditionalSenseCode = 0x1900
-	DefectListNotAvailable                                  AdditionalSenseCode = 0x1901
-	DefectListErrorInPrimaryList                            AdditionalSenseCode = 0x1902
-	DefectListErrorInGrownList                              AdditionalSenseCode = 0x1903
-	ParameterListLengthError                                AdditionalSenseCode = 0x1a00
-	SynchronousDataTransferError                            AdditionalSenseCode = 0x1b00
-	DefectListNotFound                                      AdditionalSenseCode = 0x1c00
-	PrimaryDefectListNotFound                               AdditionalSenseCode = 0x1c01
-	GrownDefectListNotFound                                 AdditionalSenseCode = 0x1c02
-	MiscompareDuringVerifyOperation                         AdditionalSenseCode = 0x1d00
-	MiscompareVerifyOfUnmappedLba                           AdditionalSenseCode = 0x1d01
-	RecoveredIdWithEccCorrection                            AdditionalSenseCode = 0x1e00
-	PartialDefectListTransfer                               AdditionalSenseCode = 0x1f00
-	InvalidCommandOperationCode                             AdditionalSenseCode = 0x2000
-	AccessDeniedInitiatorPendingEnrolled                    AdditionalSenseCode = 0x2001
-	AccessDeniedNoAccessRights                              AdditionalSenseCode = 0x2002
-	AccessDeniedInvalidMgmtIdKey                            AdditionalSenseCode = 0x2003
-	IllegalCommandWhileInWriteCapableState                  AdditionalSenseCode = 0x2004
-	IllegalCommandWhileInExplicitAddressMode                AdditionalSenseCode = 0x2006
-	IllegalCommandWhileInImplicitAddressMode                AdditionalSenseCode = 0x2007
-	AccessDeniedEnrollmentConflict                          AdditionalSenseCode = 0x2008
-	AccessDeniedInvalidLuIdentifier                         AdditionalSenseCode = 0x2009
-	AccessDeniedInvalidProxyToken                           AdditionalSenseCode = 0x200a
-	AccessDeniedAclLunConflict                              AdditionalSenseCode = 0x200b
-	IllegalCommandWhenNotInAppendOnlyMode                   AdditionalSenseCode = 0x200c
-	NotAnAdministrativeLogicalUnit                          AdditionalSenseCode = 0x200d
-	NotASubsidiaryLogicalUnit                               AdditionalSenseCode = 0x200e
-	NotAConglomerateLogicalUnit                             AdditionalSenseCode = 0x200f
-	LogicalBlockAddressOutOfRange                           AdditionalSenseCode = 0x2100
-	InvalidElementAddress                                   AdditionalSenseCode = 0x2101
-	InvalidAddressForWrite                                  AdditionalSenseCode = 0x2102
-	InvalidWriteCrossingLayerJump                           AdditionalSenseCode = 0x2103
-	UnalignedWriteCommand                                   AdditionalSenseCode = 0x2104
-	WriteBoundaryViolation                                  AdditionalSenseCode = 0x2105
-	AttemptToReadInvalidData                                AdditionalSenseCode = 0x2106
-	ReadBoundaryViolation                                   AdditionalSenseCode = 0x2107
-	MisalignedWriteCommand                                  AdditionalSenseCode = 0x2108
-	IllegalFunctionuse20002400Or2600                        AdditionalSenseCode = 0x2200
-	InvalidTokenOperationCauseNotReportable                 AdditionalSenseCode = 0x2300
-	InvalidTokenOperationUnsupportedTokenType               AdditionalSenseCode = 0x2301
-	InvalidTokenOperationRemoteTokenUsageNotSupported       AdditionalSenseCode = 0x2302
-	InvalidTokenOperationRemoteRodTokenCreationNotSupported AdditionalSenseCode = 0x2303
-	InvalidTokenOperationTokenUnknown                       AdditionalSenseCode = 0x2304
-	InvalidTokenOperationTokenCorrupt                       AdditionalSenseCode = 0x2305
-	InvalidTokenOperationTokenRevoked                       AdditionalSenseCode = 0x2306
-	InvalidTokenOperationTokenExpired                       AdditionalSenseCode = 0x2307
-	InvalidTokenOperationTokenCancelled                     AdditionalSenseCode = 0x2308
-	InvalidTokenOperationTokenDeleted                       AdditionalSenseCode = 0x2309
-	InvalidTokenOperationInvalidTokenLength                 AdditionalSenseCode = 0x230a
-	InvalidFieldInCdb                                       AdditionalSenseCode = 0x2400
-	CdbDecryptionError                                      AdditionalSenseCode = 0x2401
-	SecurityAuditValueFrozen                                AdditionalSenseCode = 0x2404
-	SecurityWorkingKeyFrozen                                AdditionalSenseCode = 0x2405
-	NonceNotUnique                                          AdditionalSenseCode = 0x2406
-	NonceTimestampOutOfRange                                AdditionalSenseCode = 0x2407
-	InvalidXcdb                                             AdditionalSenseCode = 0x2408
-	InvalidFastFormat                                       AdditionalSenseCode = 0x2409
-	LogicalUnitNotSupported                                 AdditionalSenseCode = 0x2500
-	InvalidFieldInParameterList                             AdditionalSenseCode = 0x2600
-	ParameterNotSupported                                   AdditionalSenseCode = 0x2601
-	ParameterValueInvalid                                   AdditionalSenseCode = 0x2602
-	ThresholdParametersNotSupported                         AdditionalSenseCode = 0x2603
-	InvalidReleaseOfPersistentReservation                   AdditionalSenseCode = 0x2604
-	DataDecryptionError                                     AdditionalSenseCode = 0x2605
-	TooManyTargetDescriptors                                AdditionalSenseCode = 0x2606
-	UnsupportedTargetDescriptorTypeCode                     AdditionalSenseCode = 0x2607
-	TooManySegmentDescriptors                               AdditionalSenseCode = 0x2608
-	UnsupportedSegmentDescriptorTypeCode                    AdditionalSenseCode = 0x2609
-	UnexpectedInexactSegment                                AdditionalSenseCode = 0x260a
-	InlineDataLengthExceeded                                AdditionalSenseCode = 0x260b
-	InvalidOperationForCopySourceOrDestination              AdditionalSenseCode = 0x260c
-	CopySegmentGranularityViolation                         AdditionalSenseCode = 0x260d
-	InvalidParameterWhilePortIsEnabled                      AdditionalSenseCode = 0x260e
-	InvalidDataOutBufferIntegrityCheckValue                 AdditionalSenseCode = 0x260f
-	DataDecryptionKeyFailLimitReached                       AdditionalSenseCode = 0x2610
-	IncompleteKeyAssociatedDataSet                          AdditionalSenseCode = 0x2611
-	VendorSpecificKeyReferenceNotFound                      AdditionalSenseCode = 0x2612
-	ApplicationTagModePageIsInvalid                         AdditionalSenseCode = 0x2613
-	TapeStreamMirroringPrevented                            AdditionalSenseCode = 0x2614
-	CopySourceOrCopyDestinationNotAuthorized                AdditionalSenseCode = 0x2615
-	WriteProtected                                          AdditionalSenseCode = 0x2700
-	HardwareWriteProtected                                  AdditionalSenseCode = 0x2701
-	LogicalUnitSoftwareWriteProtected                       AdditionalSenseCode = 0x2702
-	AssociatedWriteProtect                                  AdditionalSenseCode = 0x2703
-	PersistentWriteProtect                                  AdditionalSenseCode = 0x2704
-	PermanentWriteProtect                                   AdditionalSenseCode = 0x2705
-	ConditionalWriteProtect                                 AdditionalSenseCode = 0x2706
-	SpaceAllocationFailedWriteProtect                       AdditionalSenseCode = 0x2707
-	ZoneIsReadOnly                                          AdditionalSenseCode = 0x2708
-	NotReadyToReadyChangeMediumMayHaveChanged               AdditionalSenseCode = 0x2800
-	ImportOrExportElementAccessed                           AdditionalSenseCode = 0x2801
-	FormatLayerMayHaveChanged                               AdditionalSenseCode = 0x2802
-	ImportexportElementAccessedMediumChanged                AdditionalSenseCode = 0x2803
-	PowerOnResetOrBusDeviceResetOccurred                    AdditionalSenseCode = 0x2900
-	PowerOnOccurred                                         AdditionalSenseCode = 0x2901
-	ScsiBusResetOccurred                                    AdditionalSenseCode = 0x2902
-	BusDeviceResetFunctionOccurred                          AdditionalSenseCode = 0x2903
-	DeviceInternalReset                                     AdditionalSenseCode = 0x2904
-	TransceiverModeChangedToSingleEnded                     AdditionalSenseCode = 0x2905
-	TransceiverModeChangedToLvd                             AdditionalSenseCode = 0x2906
-	ITNexusLossOccurred                                     AdditionalSenseCode = 0x2907
-	ParametersChanged                                       AdditionalSenseCode = 0x2a00
-	ModeParametersChanged                                   AdditionalSenseCode = 0x2a01
-	LogParametersChanged                                    AdditionalSenseCode = 0x2a02
-	ReservationsPreempted                                   AdditionalSenseCode = 0x2a03
-	ReservationsReleased                                    AdditionalSenseCode = 0x2a04
-	RegistrationsPreempted                                  AdditionalSenseCode = 0x2a05
-	AsymmetricAccessStateChanged                            AdditionalSenseCode = 0x2a06
-	ImplicitAsymmetricAccessStateTransitionFailed           AdditionalSenseCode = 0x2a07
-	PriorityChanged                                         AdditionalSenseCode = 0x2a08
-	CapacityDataHasChanged                                  AdditionalSenseCode = 0x2a09
-	ErrorHistoryITNexusCleared                              AdditionalSenseCode = 0x2a0a
-	ErrorHistorySnapshotReleased                            AdditionalSenseCode = 0x2a0b
-	ErrorRecoveryAttributesHaveChanged                      AdditionalSenseCode = 0x2a0c
-	DataEncryptionCapabilitiesChanged                       AdditionalSenseCode = 0x2a0d
-	TimestampChanged                                        AdditionalSenseCode = 0x2a10
-	DataEncryptionParametersChangedByAnotherITNexus         AdditionalSenseCode = 0x2a11
-	DataEncryptionParametersChangedByVendorSpecificEvent    AdditionalSenseCode = 0x2a12
-	DataEncryptionKeyInstanceCounterHasChanged              AdditionalSenseCode = 0x2a13
-	SaCreationCapabilitiesDataHasChanged                    AdditionalSenseCode = 0x2a14
-	MediumRemovalPreventionPreempted                        AdditionalSenseCode = 0x2a15
-	ZoneResetWritePointerRecommended                        AdditionalSenseCode = 0x2a16
-	CopyCannotExecuteSinceHostCannotDisconnect              AdditionalSenseCode = 0x2b00
-	CommandSequenceError                                    AdditionalSenseCode = 0x2c00
-	TooManyWindowsSpecified                                 AdditionalSenseCode = 0x2c01
-	InvalidCombinationOfWindowsSpecified                    AdditionalSenseCode = 0x2c02
-	CurrentProgramAreaIsNotEmpty                            AdditionalSenseCode = 0x2c03
-	CurrentProgramAreaIsEmpty                               AdditionalSenseCode = 0x2c04
-	IllegalPowerConditionRequest                            AdditionalSenseCode = 0x2c05
-	PersistentPreventConflict                               AdditionalSenseCode = 0x2c06
-	PreviousBusyStatus                                      AdditionalSenseCode = 0x2c07
-	PreviousTaskSetFullStatus                               AdditionalSenseCode = 0x2c08
-	PreviousReservationConflictStatus                       AdditionalSenseCode = 0x2c09
-	PartitionOrCollectionContainsUserObjects                AdditionalSenseCode = 0x2c0a
-	NotReserved                                             AdditionalSenseCode = 0x2c0b
-	OrwriteGenerationDoesNotMatch                           AdditionalSenseCode = 0x2c0c
-	ResetWritePointerNotAllowed                             AdditionalSenseCode = 0x2c0d
-	ZoneIsOffline                                           AdditionalSenseCode = 0x2c0e
-	StreamNotOpen                                           AdditionalSenseCode = 0x2c0f
-	UnwrittenDataInZone                                     AdditionalSenseCode = 0x2c10
-	DescriptorFormatSenseDataRequired                       AdditionalSenseCode = 0x2c11
-	OverwriteErrorOnUpdateInPlace                           AdditionalSenseCode = 0x2d00
-	InsufficientTimeForOperation                            AdditionalSenseCode = 0x2e00
-	CommandTimeoutBeforeProcessing                          AdditionalSenseCode = 0x2e01
-	CommandTimeoutDuringProcessing                          AdditionalSenseCode = 0x2e02
-	CommandTimeoutDuringProcessingDueToErrorRecovery        AdditionalSenseCode = 0x2e03
-	CommandsClearedByAnotherInitiator                       AdditionalSenseCode = 0x2f00
-	CommandsClearedByPowerLossNotification                  AdditionalSenseCode = 0x2f01
-	CommandsClearedByDeviceServer                           AdditionalSenseCode = 0x2f02
-	SomeCommandsClearedByQueuingLayerEvent                  AdditionalSenseCode = 0x2f03
-	IncompatibleMediumInstalled                             AdditionalSenseCode = 0x3000
-	CannotReadMediumUnknownFormat                           AdditionalSenseCode = 0x3001
-	CannotReadMediumIncompatibleFormat                      AdditionalSenseCode = 0x3002
-	CleaningCartridgeInstalled                              AdditionalSenseCode = 0x3003
-	CannotWriteMediumUnknownFormat                          AdditionalSenseCode = 0x3004
-	CannotWriteMediumIncompatibleFormat                     AdditionalSenseCode = 0x3005
-	CannotFormatMediumIncompatibleMedium                    AdditionalSenseCode = 0x3006
-	CleaningFailure                                         AdditionalSenseCode = 0x3007
-	CannotWriteApplicationCodeMismatch                      AdditionalSenseCode = 0x3008
-	CurrentSessionNotFixatedForAppend                       AdditionalSenseCode = 0x3009
-	CleaningRequestRejected                                 AdditionalSenseCode = 0x300a
-	WormMediumOverwriteAttempted                            AdditionalSenseCode = 0x300c
-	WormMediumIntegrityCheck                                AdditionalSenseCode = 0x300d
-	MediumNotFormatted                                      AdditionalSenseCode = 0x3010
-	IncompatibleVolumeType                                  AdditionalSenseCode = 0x3011
-	IncompatibleVolumeQualifier                             AdditionalSenseCode = 0x3012
-	CleaningVolumeExpired                                   AdditionalSenseCode = 0x3013
-	MediumFormatCorrupted                                   AdditionalSenseCode = 0x3100
-	FormatCommandFailed                                     AdditionalSenseCode = 0x3101
-	ZonedFormattingFailedDueToSpareLinking                  AdditionalSenseCode = 0x3102
-	SanitizeCommandFailed                                   AdditionalSenseCode = 0x3103
-	DepopulationFailed                                      AdditionalSenseCode = 0x3104
-	NoDefectSpareLocationAvailable                          AdditionalSenseCode = 0x3200
-	DefectListUpdateFailure                                 AdditionalSenseCode = 0x3201
-	TapeLengthError                                         AdditionalSenseCode = 0x3300
-	EnclosureFailure                                        AdditionalSenseCode = 0x3400
-	EnclosureServicesFailure                                AdditionalSenseCode = 0x3500
-	UnsupportedEnclosureFunction                            AdditionalSenseCode = 0x3501
-	EnclosureServicesUnavailable                            AdditionalSenseCode = 0x3502
-	EnclosureServicesTransferFailure                        AdditionalSenseCode = 0x3503
-	EnclosureServicesTransferRefused                        AdditionalSenseCode = 0x3504
-	EnclosureServicesChecksumError                          AdditionalSenseCode = 0x3505
-	RibbonInkOrTonerFailure                                 AdditionalSenseCode = 0x3600
-	RoundedParameter                                        AdditionalSenseCode = 0x3700
-	EventStatusNotification                                 AdditionalSenseCode = 0x3800
-	EsnPowerManagementClassEvent                            AdditionalSenseCode = 0x3802
-	EsnMediaClassEvent                                      AdditionalSenseCode = 0x3804
-	EsnDeviceBusyClassEvent                                 AdditionalSenseCode = 0x3806
-	ThinProvisioningSoftThresholdReached                    AdditionalSenseCode = 0x3807
-	SavingParametersNotSupported                            AdditionalSenseCode = 0x3900
-	MediumNotPresent                                        AdditionalSenseCode = 0x3a00
-	MediumNotPresentTrayClosed                              AdditionalSenseCode = 0x3a01
-	MediumNotPresentTrayOpen                                AdditionalSenseCode = 0x3a02
-	MediumNotPresentLoadable                                AdditionalSenseCode = 0x3a03
-	MediumNotPresentMediumAuxiliaryMemoryAccessible         AdditionalSenseCode = 0x3a04
-	SequentialPositioningError                              AdditionalSenseCode = 0x3b00
-	TapePositionErrorAtBeginningOfMedium                    AdditionalSenseCode = 0x3b01
-	TapePositionErrorAtEndOfMedium                          AdditionalSenseCode = 0x3b02
-	TapeOrElectronicVerticalFormsUnitNotReady               AdditionalSenseCode = 0x3b03
-	SlewFailure                                             AdditionalSenseCode = 0x3b04
-	PaperJam                                                AdditionalSenseCode = 0x3b05
-	FailedToSenseTopOfForm                                  AdditionalSenseCode = 0x3b06
-	FailedToSenseBottomOfForm                               AdditionalSenseCode = 0x3b07
-	RepositionError                                         AdditionalSenseCode = 0x3b08
-	ReadPastEndOfMedium                                     AdditionalSenseCode = 0x3b09
-	ReadPastBeginningOfMedium                               AdditionalSenseCode = 0x3b0a
-	PositionPastEndOfMedium                                 AdditionalSenseCode = 0x3b0b
-	PositionPastBeginningOfMedium                           AdditionalSenseCode = 0x3b0c
-	MediumDestinationElementFull                            AdditionalSenseCode = 0x3b0d
-	MediumSourceElementEmpty                                AdditionalSenseCode = 0x3b0e
-	EndOfMediumReached                                      AdditionalSenseCode = 0x3b0f
-	MediumMagazineNotAccessible                             AdditionalSenseCode = 0x3b11
-	MediumMagazineRemoved                                   AdditionalSenseCode = 0x3b12
-	MediumMagazineInserted                                  AdditionalSenseCode = 0x3b13
-	MediumMagazineLocked                                    AdditionalSenseCode = 0x3b14
-	MediumMagazineUnlocked                                  AdditionalSenseCode = 0x3b15
-	MechanicalPositioningOrChangerError                     AdditionalSenseCode = 0x3b16
-	ReadPastEndOfUserObject                                 AdditionalSenseCode = 0x3b17
-	ElementDisabled                                         AdditionalSenseCode = 0x3b18
-	ElementEnabled                                          AdditionalSenseCode = 0x3b19
-	DataTransferDeviceRemoved                               AdditionalSenseCode = 0x3b1a
-	DataTransferDeviceInserted                              AdditionalSenseCode = 0x3b1b
-	TooManyLogicalObjectsOnPartitionToSupportOperation      AdditionalSenseCode = 0x3b1c
-	InvalidBitsInIdentifyMessage                            AdditionalSenseCode = 0x3d00
-	LogicalUnitHasNotSelfConfiguredYet                      AdditionalSenseCode = 0x3e00
-	LogicalUnitFailure                                      AdditionalSenseCode = 0x3e01
-	TimeoutOnLogicalUnit                                    AdditionalSenseCode = 0x3e02
-	LogicalUnitFailedSelfTest                               AdditionalSenseCode = 0x3e03
-	LogicalUnitUnableToUpdateSelfTestLog                    AdditionalSenseCode = 0x3e04
-	TargetOperatingConditionsHaveChanged                    AdditionalSenseCode = 0x3f00
-	MicrocodeHasBeenChanged                                 AdditionalSenseCode = 0x3f01
-	ChangedOperatingDefinition                              AdditionalSenseCode = 0x3f02
-	InquiryDataHasChanged                                   AdditionalSenseCode = 0x3f03
-	ComponentDeviceAttached                                 AdditionalSenseCode = 0x3f04
-	DeviceIdentifierChanged                                 AdditionalSenseCode = 0x3f05
-	RedundancyGroupCreatedOrModified                        AdditionalSenseCode = 0x3f06
-	RedundancyGroupDeleted                                  AdditionalSenseCode = 0x3f07
-	SpareCreatedOrModified                                  AdditionalSenseCode = 0x3f08
-	SpareDeleted                                            AdditionalSenseCode = 0x3f09
-	VolumeSetCreatedOrModified                              AdditionalSenseCode = 0x3f0a
-	VolumeSetDeleted                                        AdditionalSenseCode = 0x3f0b
-	VolumeSetDeassigned                                     AdditionalSenseCode = 0x3f0c
-	VolumeSetReassigned                                     AdditionalSenseCode = 0x3f0d
-	ReportedLunsDataHasChanged                              AdditionalSenseCode = 0x3f0e
-	EchoBufferOverwritten                                   AdditionalSenseCode = 0x3f0f
-	MediumLoadable                                          AdditionalSenseCode = 0x3f10
-	MediumAuxiliaryMemoryAccessible                         AdditionalSenseCode = 0x3f11
-	IscsiIpAddressAdded                                     AdditionalSenseCode = 0x3f12
-	IscsiIpAddressRemoved                                   AdditionalSenseCode = 0x3f13
-	IscsiIpAddressChanged                                   AdditionalSenseCode = 0x3f14
-	InspectReferralsSenseDescriptors                        AdditionalSenseCode = 0x3f15
-	MicrocodeHasBeenChangedWithoutReset                     AdditionalSenseCode = 0x3f16
-	ZoneTransitionToFull                                    AdditionalSenseCode = 0x3f17
-	BindCompleted                                           AdditionalSenseCode = 0x3f18
-	BindRedirected                                          AdditionalSenseCode = 0x3f19
-	SubsidiaryBindingChanged                                AdditionalSenseCode = 0x3f1a
-	RamFailureshouldUse40Nn                                 AdditionalSenseCode = 0x4000
-	DataPathFailureshouldUse40Nn                            AdditionalSenseCode = 0x4100
-	PowerOnOrSelfTestFailureshouldUse40Nn                   AdditionalSenseCode = 0x4200
-	MessageError                                            AdditionalSenseCode = 0x4300
-	InternalTargetFailure                                   AdditionalSenseCode = 0x4400
-	PersistentReservationInformationLost                    AdditionalSenseCode = 0x4401
-	AtaDeviceFailedSetFeatures                              AdditionalSenseCode = 0x4471
-	SelectOrReselectFailure                                 AdditionalSenseCode = 0x4500
-	UnsuccessfulSoftReset                                   AdditionalSenseCode = 0x4600
-	ScsiParityError                                         AdditionalSenseCode = 0x4700
-	DataPhaseCrcErrorDetected                               AdditionalSenseCode = 0x4701
-	ScsiParityErrorDetectedDuringStDataPhase                AdditionalSenseCode = 0x4702
-	InformationUnitIucrcErrorDetected                       AdditionalSenseCode = 0x4703
-	AsynchronousInformationProtectionErrorDetected          AdditionalSenseCode = 0x4704
-	ProtocolServiceCrcError                                 AdditionalSenseCode = 0x4705
-	PhyTestFunctionInProgress                               AdditionalSenseCode = 0x4706
-	SomeCommandsClearedByIscsiProtocolEvent                 AdditionalSenseCode = 0x477f
-	InitiatorDetectedErrorMessageReceived                   AdditionalSenseCode = 0x4800
-	InvalidMessageError                                     AdditionalSenseCode = 0x4900
-	CommandPhaseError                                       AdditionalSenseCode = 0x4a00
-	DataPhaseError                                          AdditionalSenseCode = 0x4b00
-	InvalidTargetPortTransferTagReceived                    AdditionalSenseCode = 0x4b01
-	TooMuchWriteData                                        AdditionalSenseCode = 0x4b02
-	AcknakTimeout                                           AdditionalSenseCode = 0x4b03
-	NakReceived                                             AdditionalSenseCode = 0x4b04
-	DataOffsetError                                         AdditionalSenseCode = 0x4b05
-	InitiatorResponseTimeout                                AdditionalSenseCode = 0x4b06
-	ConnectionLost                                          AdditionalSenseCode = 0x4b07
-	DataInBufferOverflowDataBufferSize                      AdditionalSenseCode = 0x4b08
-	DataInBufferOverflowDataBufferDescriptorArea            AdditionalSenseCode = 0x4b09
-	DataInBufferError                                       AdditionalSenseCode = 0x4b0a
-	DataOutBufferOverflowDataBufferSize                     AdditionalSenseCode = 0x4b0b
-	DataOutBufferOverflowDataBufferDescriptorArea           AdditionalSenseCode = 0x4b0c
-	DataOutBufferError                                      AdditionalSenseCode = 0x4b0d
-	PcieFabricError                                         AdditionalSenseCode = 0x4b0e
-	PcieCompletionTimeout                                   AdditionalSenseCode = 0x4b0f
-	PcieCompleterAbort                                      AdditionalSenseCode = 0x4b10
-	PciePoisonedTlpReceived                                 AdditionalSenseCode = 0x4b11
-	PcieEcrcCheckFailed                                     AdditionalSenseCode = 0x4b12
-	PcieUnsupportedRequest                                  AdditionalSenseCode = 0x4b13
-	PcieAcsViolation                                        AdditionalSenseCode = 0x4b14
-	PcieTlpPrefixBlocked                                    AdditionalSenseCode = 0x4b15
-	LogicalUnitFailedSelfConfiguration                      AdditionalSenseCode = 0x4c00
-	OverlappedCommandsAttempted                             AdditionalSenseCode = 0x4e00
-	WriteAppendError                                        AdditionalSenseCode = 0x5000
-	WriteAppendPositionError                                AdditionalSenseCode = 0x5001
-	PositionErrorRelatedToTiming                            AdditionalSenseCode = 0x5002
-	EraseFailure                                            AdditionalSenseCode = 0x5100
-	EraseFailureIncompleteEraseOperationDetected            AdditionalSenseCode = 0x5101
-	CartridgeFault                                          AdditionalSenseCode = 0x5200
-	MediaLoadOrEjectFailed                                  AdditionalSenseCode = 0x5300
-	UnloadTapeFailure                                       AdditionalSenseCode = 0x5301
-	MediumRemovalPrevented                                  AdditionalSenseCode = 0x5302
-	MediumRemovalPreventedByDataTransferElement             AdditionalSenseCode = 0x5303
-	MediumThreadOrUnthreadFailure                           AdditionalSenseCode = 0x5304
-	VolumeIdentifierInvalid                                 AdditionalSenseCode = 0x5305
-	VolumeIdentifierMissing                                 AdditionalSenseCode = 0x5306
-	DuplicateVolumeIdentifier                               AdditionalSenseCode = 0x5307
-	ElementStatusUnknown                                    AdditionalSenseCode = 0x5308
-	DataTransferDeviceErrorLoadFailed                       AdditionalSenseCode = 0x5309
-	DataTransferDeviceErrorUnloadFailed                     AdditionalSenseCode = 0x530a
-	DataTransferDeviceErrorUnloadMissing                    AdditionalSenseCode = 0x530b
-	DataTransferDeviceErrorEjectFailed                      AdditionalSenseCode = 0x530c
-	DataTransferDeviceErrorLibraryCommunicationFailed       AdditionalSenseCode = 0x530d
-	ScsiToHostSystemInterfaceFailure                        AdditionalSenseCode = 0x5400
-	SystemResourceFailure                                   AdditionalSenseCode = 0x5500
-	SystemBufferFull                                        AdditionalSenseCode = 0x5501
-	InsufficientReservationResources                        AdditionalSenseCode = 0x5502
-	InsufficientResources                                   AdditionalSenseCode = 0x5503
-	InsufficientRegistrationResources                       AdditionalSenseCode = 0x5504
-	InsufficientAccessControlResources                      AdditionalSenseCode = 0x5505
-	AuxiliaryMemoryOutOfSpace                               AdditionalSenseCode = 0x5506
-	QuotaError                                              AdditionalSenseCode = 0x5507
-	MaximumNumberOfSupplementalDecryptionKeysExceeded       AdditionalSenseCode = 0x5508
-	MediumAuxiliaryMemoryNotAccessible                      AdditionalSenseCode = 0x5509
-	DataCurrentlyUnavailable                                AdditionalSenseCode = 0x550a
-	InsufficientPowerForOperation                           AdditionalSenseCode = 0x550b
-	InsufficientResourcesToCreateRod                        AdditionalSenseCode = 0x550c
-	InsufficientResourcesToCreateRodToken                   AdditionalSenseCode = 0x550d
-	InsufficientZoneResources                               AdditionalSenseCode = 0x550e
-	InsufficientZoneResourcesToCompleteWrite                AdditionalSenseCode = 0x550f
-	MaximumNumberOfStreamsOpen                              AdditionalSenseCode = 0x5510
-	InsufficientResourcesToBind                             AdditionalSenseCode = 0x5511
-	UnableToRecoverTableOfContents                          AdditionalSenseCode = 0x5700
-	GenerationDoesNotExist                                  AdditionalSenseCode = 0x5800
-	UpdatedBlockRead                                        AdditionalSenseCode = 0x5900
-	OperatorRequestOrStateChangeInput                       AdditionalSenseCode = 0x5a00
-	OperatorMediumRemovalRequest                            AdditionalSenseCode = 0x5a01
-	OperatorSelectedWriteProtect                            AdditionalSenseCode = 0x5a02
-	OperatorSelectedWritePermit                             AdditionalSenseCode = 0x5a03
-	LogException                                            AdditionalSenseCode = 0x5b00
-	ThresholdConditionMet                                   AdditionalSenseCode = 0x5b01
-	LogCounterAtMaximum                                     AdditionalSenseCode = 0x5b02
-	LogListCodesExhausted                                   AdditionalSenseCode = 0x5b03
-	RplStatusChange                                         AdditionalSenseCode = 0x5c00
-	SpindlesSynchronized                                    AdditionalSenseCode = 0x5c01
-	SpindlesNotSynchronized                                 AdditionalSenseCode = 0x5c02
-	FailurePredictionThresholdExceeded                      AdditionalSenseCode = 0x5d00
-	MediaFailurePredictionThresholdExceeded                 AdditionalSenseCode = 0x5d01
-	LogicalUnitFailurePredictionThresholdExceeded           AdditionalSenseCode = 0x5d02
-	SpareAreaExhaustionPredictionThresholdExceeded          AdditionalSenseCode = 0x5d03
-	HardwareImpendingFailureGeneralHardDriveFailure         AdditionalSenseCode = 0x5d10
-	HardwareImpendingFailureDriveErrorRateTooHigh           AdditionalSenseCode = 0x5d11
-	HardwareImpendingFailureDataErrorRateTooHigh            AdditionalSenseCode = 0x5d12
-	HardwareImpendingFailureSeekErrorRateTooHigh            AdditionalSenseCode = 0x5d13
-	HardwareImpendingFailureTooManyBlockReassigns           AdditionalSenseCode = 0x5d14
-	HardwareImpendingFailureAccessTimesTooHigh              AdditionalSenseCode = 0x5d15
-	HardwareImpendingFailureStartUnitTimesTooHigh           AdditionalSenseCode = 0x5d16
-	HardwareImpendingFailureChannelParametrics              AdditionalSenseCode = 0x5d17
-	HardwareImpendingFailureControllerDetected              AdditionalSenseCode = 0x5d18
-	HardwareImpendingFailureThroughputPerformance           AdditionalSenseCode = 0x5d19
-	HardwareImpendingFailureSeekTimePerformance             AdditionalSenseCode = 0x5d1a
-	HardwareImpendingFailureSpinUpRetryCount                AdditionalSenseCode = 0x5d1b
-	HardwareImpendingFailureDriveCalibrationRetryCount      AdditionalSenseCode = 0x5d1c
-	HardwareImpendingFailurePowerLossProtectionCircuit      AdditionalSenseCode = 0x5d1d
-	ControllerImpendingFailureGeneralHardDriveFailure       AdditionalSenseCode = 0x5d20
-	ControllerImpendingFailureDriveErrorRateTooHigh         AdditionalSenseCode = 0x5d21
-	ControllerImpendingFailureDataErrorRateTooHigh          AdditionalSenseCode = 0x5d22
-	ControllerImpendingFailureSeekErrorRateTooHigh          AdditionalSenseCode = 0x5d23
-	ControllerImpendingFailureTooManyBlockReassigns         AdditionalSenseCode = 0x5d24
-	ControllerImpendingFailureAccessTimesTooHigh            AdditionalSenseCode = 0x5d25
-	ControllerImpendingFailureStartUnitTimesTooHigh         AdditionalSenseCode = 0x5d26
-	ControllerImpendingFailureChannelParametrics            AdditionalSenseCode = 0x5d27
-	ControllerImpendingFailureControllerDetected            AdditionalSenseCode = 0x5d28
-	ControllerImpendingFailureThroughputPerformance         AdditionalSenseCode = 0x5d29
-	ControllerImpendingFailureSeekTimePerformance           AdditionalSenseCode = 0x5d2a
-	ControllerImpendingFailureSpinUpRetryCount              AdditionalSenseCode = 0x5d2b
-	ControllerImpendingFailureDriveCalibrationRetryCount    AdditionalSenseCode = 0x5d2c
-	DataChannelImpendingFailureGeneralHardDriveFailure      AdditionalSenseCode = 0x5d30
-	DataChannelImpendingFailureDriveErrorRateTooHigh        AdditionalSenseCode = 0x5d31
-	DataChannelImpendingFailureDataErrorRateTooHigh         AdditionalSenseCode = 0x5d32
-	DataChannelImpendingFailureSeekErrorRateTooHigh         AdditionalSenseCode = 0x5d33
-	DataChannelImpendingFailureTooManyBlockReassigns        AdditionalSenseCode = 0x5d34
-	DataChannelImpendingFailureAccessTimesTooHigh           AdditionalSenseCode = 0x5d35
-	DataChannelImpendingFailureStartUnitTimesTooHigh        AdditionalSenseCode = 0x5d36
-	DataChannelImpendingFailureChannelParametrics           AdditionalSenseCode = 0x5d37
-	DataChannelImpendingFailureControllerDetected           AdditionalSenseCode = 0x5d38
-	DataChannelImpendingFailureThroughputPerformance        AdditionalSenseCode = 0x5d39
-	DataChannelImpendingFailureSeekTimePerformance          AdditionalSenseCode = 0x5d3a
-	DataChannelImpendingFailureSpinUpRetryCount             AdditionalSenseCode = 0x5d3b
-	DataChannelImpendingFailureDriveCalibrationRetryCount   AdditionalSenseCode = 0x5d3c
-	ServoImpendingFailureGeneralHardDriveFailure            AdditionalSenseCode = 0x5d40
-	ServoImpendingFailureDriveErrorRateTooHigh              AdditionalSenseCode = 0x5d41
-	ServoImpendingFailureDataErrorRateTooHigh               AdditionalSenseCode = 0x5d42
-	ServoImpendingFailureSeekErrorRateTooHigh               AdditionalSenseCode = 0x5d43
-	ServoImpendingFailureTooManyBlockReassigns              AdditionalSenseCode = 0x5d44
-	ServoImpendingFailureAccessTimesTooHigh                 AdditionalSenseCode = 0x5d45
-	ServoImpendingFailureStartUnitTimesTooHigh              AdditionalSenseCode = 0x5d46
-	ServoImpendingFailureChannelParametrics                 AdditionalSenseCode = 0x5d47
-	ServoImpendingFailureControllerDetected                 AdditionalSenseCode = 0x5d48
-	ServoImpendingFailureThroughputPerformance              AdditionalSenseCode = 0x5d49
-	ServoImpendingFailureSeekTimePerformance                AdditionalSenseCode = 0x5d4a
-	ServoImpendingFailureSpinUpRetryCount                   AdditionalSenseCode = 0x5d4b
-	ServoImpendingFailureDriveCalibrationRetryCount         AdditionalSenseCode = 0x5d4c
-	SpindleImpendingFailureGeneralHardDriveFailure          AdditionalSenseCode = 0x5d50
-	SpindleImpendingFailureDriveErrorRateTooHigh            AdditionalSenseCode = 0x5d51
-	SpindleImpendingFailureDataErrorRateTooHigh             AdditionalSenseCode = 0x5d52
-	SpindleImpendingFailureSeekErrorRateTooHigh             AdditionalSenseCode = 0x5d53
-	SpindleImpendingFailureTooManyBlockReassigns            AdditionalSenseCode = 0x5d54
-	SpindleImpendingFailureAccessTimesTooHigh               AdditionalSenseCode = 0x5d55
-	SpindleImpendingFailureStartUnitTimesTooHigh            AdditionalSenseCode = 0x5d56
-	SpindleImpendingFailureChannelParametrics               AdditionalSenseCode = 0x5d57
-	SpindleImpendingFailureControllerDetected               AdditionalSenseCode = 0x5d58
-	SpindleImpendingFailureThroughputPerformance            AdditionalSenseCode = 0x5d59
-	SpindleImpendingFailureSeekTimePerformance              AdditionalSenseCode = 0x5d5a
-	SpindleImpendingFailureSpinUpRetryCount                 AdditionalSenseCode = 0x5d5b
-	SpindleImpendingFailureDriveCalibrationRetryCount       AdditionalSenseCode = 0x5d5c
-	FirmwareImpendingFailureGeneralHardDriveFailure         AdditionalSenseCode = 0x5d60
-	FirmwareImpendingFailureDriveErrorRateTooHigh           AdditionalSenseCode = 0x5d61
-	FirmwareImpendingFailureDataErrorRateTooHigh            AdditionalSenseCode = 0x5d62
-	FirmwareImpendingFailureSeekErrorRateTooHigh            AdditionalSenseCode = 0x5d63
-	FirmwareImpendingFailureTooManyBlockReassigns           AdditionalSenseCode = 0x5d64
-	FirmwareImpendingFailureAccessTimesTooHigh              AdditionalSenseCode = 0x5d65
-	FirmwareImpendingFailureStartUnitTimesTooHigh           AdditionalSenseCode = 0x5d66
-	FirmwareImpendingFailureChannelParametrics              AdditionalSenseCode = 0x5d67
-	FirmwareImpendingFailureControllerDetected              AdditionalSenseCode = 0x5d68
-	FirmwareImpendingFailureThroughputPerformance           AdditionalSenseCode = 0x5d69
-	FirmwareImpendingFailureSeekTimePerformance             AdditionalSenseCode = 0x5d6a
-	FirmwareImpendingFailureSpinUpRetryCount                AdditionalSenseCode = 0x5d6b
-	FirmwareImpendingFailureDriveCalibrationRetryCount      AdditionalSenseCode = 0x5d6c
-	MediaImpendingFailureEnduranceLimitMet                  AdditionalSenseCode = 0x5d73
-	FailurePredictionThresholdExceededfalse                 AdditionalSenseCode = 0x5dff
-	LowPowerConditionOn                                     AdditionalSenseCode = 0x5e00
-	IdleConditionActivatedByTimer                           AdditionalSenseCode = 0x5e01
-	StandbyConditionActivatedByTimer                        AdditionalSenseCode = 0x5e02
-	IdleConditionActivatedByCommand                         AdditionalSenseCode = 0x5e03
-	StandbyConditionActivatedByCommand                      AdditionalSenseCode = 0x5e04
-	IdleBConditionActivatedByTimer                          AdditionalSenseCode = 0x5e05
-	IdleBConditionActivatedByCommand                        AdditionalSenseCode = 0x5e06
-	IdleCConditionActivatedByTimer                          AdditionalSenseCode = 0x5e07
-	IdleCConditionActivatedByCommand                        AdditionalSenseCode = 0x5e08
-	StandbyYConditionActivatedByTimer                       AdditionalSenseCode = 0x5e09
-	StandbyYConditionActivatedByCommand                     AdditionalSenseCode = 0x5e0a
-	PowerStateChangeToActive                                AdditionalSenseCode = 0x5e41
-	PowerStateChangeToIdle                                  AdditionalSenseCode = 0x5e42
-	PowerStateChangeToStandby                               AdditionalSenseCode = 0x5e43
-	PowerStateChangeToSleep                                 AdditionalSenseCode = 0x5e45
-	PowerStateChangeToDeviceControl                         AdditionalSenseCode = 0x5e47
-	LampFailure                                             AdditionalSenseCode = 0x6000
-	VideoAcquisitionError                                   AdditionalSenseCode = 0x6100
-	UnableToAcquireVideo                                    AdditionalSenseCode = 0x6101
-	OutOfFocus                                              AdditionalSenseCode = 0x6102
-	ScanHeadPositioningError                                AdditionalSenseCode = 0x6200
-	EndOfUserAreaEncounteredOnThisTrack                     AdditionalSenseCode = 0x6300
-	PacketDoesNotFitInAvailableSpace                        AdditionalSenseCode = 0x6301
-	IllegalModeForThisTrack                                 AdditionalSenseCode = 0x6400
-	InvalidPacketSize                                       AdditionalSenseCode = 0x6401
-	VoltageFault                                            AdditionalSenseCode = 0x6500
-	AutomaticDocumentFeederCoverUp                          AdditionalSenseCode = 0x6600
-	AutomaticDocumentFeederLiftUp                           AdditionalSenseCode = 0x6601
-	DocumentJamInAutomaticDocumentFeeder                    AdditionalSenseCode = 0x6602
-	DocumentMissFeedAutomaticInDocumentFeeder               AdditionalSenseCode = 0x6603
-	ConfigurationFailure                                    AdditionalSenseCode = 0x6700
-	ConfigurationOfIncapableLogicalUnitsFailed              AdditionalSenseCode = 0x6701
-	AddLogicalUnitFailed                                    AdditionalSenseCode = 0x6702
-	ModificationOfLogicalUnitFailed                         AdditionalSenseCode = 0x6703
-	ExchangeOfLogicalUnitFailed                             AdditionalSenseCode = 0x6704
-	RemoveOfLogicalUnitFailed                               AdditionalSenseCode = 0x6705
-	AttachmentOfLogicalUnitFailed                           AdditionalSenseCode = 0x6706
-	CreationOfLogicalUnitFailed                             AdditionalSenseCode = 0x6707
-	AssignFailureOccurred                                   AdditionalSenseCode = 0x6708
-	MultiplyAssignedLogicalUnit                             AdditionalSenseCode = 0x6709
-	SetTargetPortGroupsCommandFailed                        AdditionalSenseCode = 0x670a
-	AtaDeviceFeatureNotEnabled                              AdditionalSenseCode = 0x670b
-	CommandRejected                                         AdditionalSenseCode = 0x670c
-	ExplicitBindNotAllowed                                  AdditionalSenseCode = 0x670d
-	LogicalUnitNotConfigured                                AdditionalSenseCode = 0x6800
-	SubsidiaryLogicalUnitNotConfigured                      AdditionalSenseCode = 0x6801
-	DataLossOnLogicalUnit                                   AdditionalSenseCode = 0x6900
-	MultipleLogicalUnitFailures                             AdditionalSenseCode = 0x6901
-	ParitydataMismatch                                      AdditionalSenseCode = 0x6902
-	InformationalReferToLog                                 AdditionalSenseCode = 0x6a00
-	StateChangeHasOccurred                                  AdditionalSenseCode = 0x6b00
-	RedundancyLevelGotBetter                                AdditionalSenseCode = 0x6b01
-	RedundancyLevelGotWorse                                 AdditionalSenseCode = 0x6b02
-	RebuildFailureOccurred                                  AdditionalSenseCode = 0x6c00
-	RecalculateFailureOccurred                              AdditionalSenseCode = 0x6d00
-	CommandToLogicalUnitFailed                              AdditionalSenseCode = 0x6e00
-	CopyProtectionKeyExchangeFailureAuthentication          AdditionalSenseCode = 0x6f00
-	CopyProtectionKeyExchangeFailureKeyNotPresent           AdditionalSenseCode = 0x6f01
-	CopyProtectionKeyExchangeFailureKeyNotEstablished       AdditionalSenseCode = 0x6f02
-	ReadOfScrambledSectorWithoutAuthentication              AdditionalSenseCode = 0x6f03
-	MediaRegionCodeIsMismatchedToLogicalUnitRegion          AdditionalSenseCode = 0x6f04
-	DriveRegionMustBePermanentregionResetCountError         AdditionalSenseCode = 0x6f05
-	InsufficientBlockCountForBindingNonceRecording          AdditionalSenseCode = 0x6f06
-	ConflictInBindingNonceRecording                         AdditionalSenseCode = 0x6f07
-	InsufficientPermission                                  AdditionalSenseCode = 0x6f08
-	InvalidDriveHostPairingServer                           AdditionalSenseCode = 0x6f09
-	DriveHostPairingSuspended                               AdditionalSenseCode = 0x6f0a
-	DecompressionExceptionLongAlgorithmId                   AdditionalSenseCode = 0x7100
-	SessionFixationError                                    AdditionalSenseCode = 0x7200
-	SessionFixationErrorWritingLeadIn                       AdditionalSenseCode = 0x7201
-	SessionFixationErrorWritingLeadOut                      AdditionalSenseCode = 0x7202
-	SessionFixationErrorIncompleteTrackInSession            AdditionalSenseCode = 0x7203
-	EmptyOrPartiallyWrittenReservedTrack                    AdditionalSenseCode = 0x7204
-	NoMoreTrackReservationsAllowed                          AdditionalSenseCode = 0x7205
-	RmzExtensionIsNotAllowed                                AdditionalSenseCode = 0x7206
-	NoMoreTestZoneExtensionsAreAllowed                      AdditionalSenseCode = 0x7207
-	CdControlError                                          AdditionalSenseCode = 0x7300
-	PowerCalibrationAreaAlmostFull                          AdditionalSenseCode = 0x7301
-	PowerCalibrationAreaIsFull                              AdditionalSenseCode = 0x7302
-	PowerCalibrationAreaError                               AdditionalSenseCode = 0x7303
-	ProgramMemoryAreaUpdateFailure                          AdditionalSenseCode = 0x7304
-	ProgramMemoryAreaIsFull                                 AdditionalSenseCode = 0x7305
-	RmapmaIsAlmostFull                                      AdditionalSenseCode = 0x7306
-	CurrentPowerCalibrationAreaAlmostFull                   AdditionalSenseCode = 0x7310
-	CurrentPowerCalibrationAreaIsFull                       AdditionalSenseCode = 0x7311
-	RdzIsFull                                               AdditionalSenseCode = 0x7317
-	SecurityError                                           AdditionalSenseCode = 0x7400
-	UnableToDecryptData                                     AdditionalSenseCode = 0x7401
-	UnencryptedDataEncounteredWhileDecrypting               AdditionalSenseCode = 0x7402
-	IncorrectDataEncryptionKey                              AdditionalSenseCode = 0x7403
-	CryptographicIntegrityValidationFailed                  AdditionalSenseCode = 0x7404
-	ErrorDecryptingData                                     AdditionalSenseCode = 0x7405
-	UnknownSignatureVerificationKey                         AdditionalSenseCode = 0x7406
-	EncryptionParametersNotUseable                          AdditionalSenseCode = 0x7407
-	DigitalSignatureValidationFailure                       AdditionalSenseCode = 0x7408
-	EncryptionModeMismatchOnRead                            AdditionalSenseCode = 0x7409
-	EncryptedBlockNotRawReadEnabled                         AdditionalSenseCode = 0x740a
-	IncorrectEncryptionParameters                           AdditionalSenseCode = 0x740b
-	UnableToDecryptParameterList                            AdditionalSenseCode = 0x740c
-	EncryptionAlgorithmDisabled                             AdditionalSenseCode = 0x740d
-	SaCreationParameterValueInvalid                         AdditionalSenseCode = 0x7410
-	SaCreationParameterValueRejected                        AdditionalSenseCode = 0x7411
-	InvalidSaUsage                                          AdditionalSenseCode = 0x7412
-	DataEncryptionConfigurationPrevented                    AdditionalSenseCode = 0x7421
-	SaCreationParameterNotSupported                         AdditionalSenseCode = 0x7430
-	AuthenticationFailed                                    AdditionalSenseCode = 0x7440
-	ExternalDataEncryptionKeyManagerAccessError             AdditionalSenseCode = 0x7461
-	ExternalDataEncryptionKeyManagerError                   AdditionalSenseCode = 0x7462
-	ExternalDataEncryptionKeyNotFound                       AdditionalSenseCode = 0x7463
-	ExternalDataEncryptionRequestNotAuthorized              AdditionalSenseCode = 0x7464
-	ExternalDataEncryptionControlTimeout                    AdditionalSenseCode = 0x746e
-	ExternalDataEncryptionControlError                      AdditionalSenseCode = 0x746f
-	LogicalUnitAccessNotAuthorized                          AdditionalSenseCode = 0x7471
-	SecurityConflictInTranslatedDevice                      AdditionalSenseCode = 0x7479
-)
-
-var additionalSenseCodeDesc = map[AdditionalSenseCode]string{
-	0x0000: "no additional sense information",
-	0x0001: "filemark detected",
-	0x0002: "end-of-partition/medium detected",
-	0x0003: "setmark detected",
-	0x0004: "beginning-of-partition/medium detected",
-	0x0005: "end-of-data detected",
-	0x0006: "i/o process terminated",
-	0x0007: "programmable early warning detected",
-	0x0011: "audio play operation in progress",
-	0x0012: "audio play operation paused",
-	0x0013: "audio play operation successfully completed",
-	0x0014: "audio play operation stopped due to error",
-	0x0015: "no current audio status to return",
-	0x0016: "operation in progress",
-	0x0017: "cleaning requested",
-	0x0018: "erase operation in progress",
-	0x0019: "locate operation in progress",
-	0x001a: "rewind operation in progress",
-	0x001b: "set capacity operation in progress",
-	0x001c: "verify operation in progress",
-	0x001d: "ata pass through information available",
-	0x001e: "conflicting sa creation request",
-	0x001f: "logical unit transitioning to another power condition",
-	0x0020: "extended copy information available",
-	0x0021: "atomic command aborted due to aca",
-	0x0022: "deferred microcode is pending",
-	0x0100: "no index/sector signal",
-	0x0200: "no seek complete",
-	0x0300: "peripheral device write fault",
-	0x0301: "no write current",
-	0x0302: "excessive write errors",
-	0x0400: "logical unit not ready, cause not reportable",
-	0x0401: "logical unit is in process of becoming ready",
-	0x0402: "logical unit not ready, initializing command required",
-	0x0403: "logical unit not ready, manual intervention required",
-	0x0404: "logical unit not ready, format in progress",
-	0x0405: "logical unit not ready, rebuild in progress",
-	0x0406: "logical unit not ready, recalculation in progress",
-	0x0407: "logical unit not ready, operation in progress",
-	0x0408: "logical unit not ready, long write in progress",
-	0x0409: "logical unit not ready, self-test in progress",
-	0x040a: "logical unit not accessible, asymmetric access state transition",
-	0x040b: "logical unit not accessible, target port in standby state",
-	0x040c: "logical unit not accessible, target port in unavailable state",
-	0x040d: "logical unit not ready, structure check required",
-	0x040e: "logical unit not ready, security session in progress",
-	0x0410: "logical unit not ready, auxiliary memory not accessible",
-	0x0411: "logical unit not ready, notify (enable spinup) required",
-	0x0412: "logical unit not ready, offline",
-	0x0413: "logical unit not ready, sa creation in progress",
-	0x0414: "logical unit not ready, space allocation in progress",
-	0x0415: "logical unit not ready, robotics disabled",
-	0x0416: "logical unit not ready, configuration required",
-	0x0417: "logical unit not ready, calibration required",
-	0x0418: "logical unit not ready, a door is open",
-	0x0419: "logical unit not ready, operating in sequential mode",
-	0x041a: "logical unit not ready, start stop unit command in progress",
-	0x041b: "logical unit not ready, sanitize in progress",
-	0x041c: "logical unit not ready, additional power use not yet granted",
-	0x041d: "logical unit not ready, configuration in progress",
-	0x041e: "logical unit not ready, microcode activation required",
-	0x041f: "logical unit not ready, microcode download required",
-	0x0420: "logical unit not ready, logical unit reset required",
-	0x0421: "logical unit not ready, hard reset required",
-	0x0422: "logical unit not ready, power cycle required",
-	0x0423: "logical unit not ready, affiliation required",
-	0x0424: "depopulation in progress",
-	0x0500: "logical unit does not respond to selection",
-	0x0600: "no reference position found",
-	0x0700: "multiple peripheral devices selected",
-	0x0800: "logical unit communication failure",
-	0x0801: "logical unit communication time-out",
-	0x0802: "logical unit communication parity error",
-	0x0803: "logical unit communication crc error (ultra-dma/32)",
-	0x0804: "unreachable copy target",
-	0x0900: "track following error",
-	0x0901: "tracking servo failure",
-	0x0902: "focus servo failure",
-	0x0903: "spindle servo failure",
-	0x0904: "head select fault",
-	0x0905: "vibration induced tracking error",
-	0x0a00: "error log overflow",
-	0x0b00: "warning",
-	0x0b01: "warning - specified temperature exceeded",
-	0x0b02: "warning - enclosure degraded",
-	0x0b03: "warning - background self-test failed",
-	0x0b04: "warning - background pre-scan detected medium error",
-	0x0b05: "warning - background medium scan detected medium error",
-	0x0b06: "warning - non-volatile cache now volatile",
-	0x0b07: "warning - degraded power to non-volatile cache",
-	0x0b08: "warning - power loss expected",
-	0x0b09: "warning - device statistics notification active",
-	0x0b0a: "warning - high critical temperature limit exceeded",
-	0x0b0b: "warning - low critical temperature limit exceeded",
-	0x0b0c: "warning - high operating temperature limit exceeded",
-	0x0b0d: "warning - low operating temperature limit exceeded",
-	0x0b0e: "warning - high critical humidity limit exceeded",
-	0x0b0f: "warning - low critical humidity limit exceeded",
-	0x0b10: "warning - high operating humidity limit exceeded",
-	0x0b11: "warning - low operating humidity limit exceeded",
-	0x0b12: "warning - microcode security at risk",
-	0x0b13: "warning - microcode digital signature validation failure",
-	0x0b14: "warning - physical element status change",
-	0x0c00: "write error",
-	0x0c01: "write error - recovered with auto reallocation",
-	0x0c02: "write error - auto reallocation failed",
-	0x0c03: "write error - recommend reassignment",
-	0x0c04: "compression check miscompare error",
-	0x0c05: "data expansion occurred during compression",
-	0x0c06: "block not compressible",
-	0x0c07: "write error - recovery needed",
-	0x0c08: "write error - recovery failed",
-	0x0c09: "write error - loss of streaming",
-	0x0c0a: "write error - padding blocks added",
-	0x0c0b: "auxiliary memory write error",
-	0x0c0c: "write error - unexpected unsolicited data",
-	0x0c0d: "write error - not enough unsolicited data",
-	0x0c0e: "multiple write errors",
-	0x0c0f: "defects in error window",
-	0x0c10: "incomplete multiple atomic write operations",
-	0x0c11: "write error - recovery scan needed",
-	0x0c12: "write error - insufficient zone resources",
-	0x0d00: "error detected by third party temporary initiator",
-	0x0d01: "third party device failure",
-	0x0d02: "copy target device not reachable",
-	0x0d03: "incorrect copy target device type",
-	0x0d04: "copy target device data underrun",
-	0x0d05: "copy target device data overrun",
-	0x0e00: "invalid information unit",
-	0x0e01: "information unit too short",
-	0x0e02: "information unit too long",
-	0x0e03: "invalid field in command information unit",
-	0x1000: "id crc or ecc error",
-	0x1001: "logical block guard check failed",
-	0x1002: "logical block application tag check failed",
-	0x1003: "logical block reference tag check failed",
-	0x1004: "logical block protection error on recover buffered data",
-	0x1005: "logical block protection method error",
-	0x1100: "unrecovered read error",
-	0x1101: "read retries exhausted",
-	0x1102: "error too long to correct",
-	0x1103: "multiple read errors",
-	0x1104: "unrecovered read error - auto reallocate failed",
-	0x1105: "l-ec uncorrectable error",
-	0x1106: "circ unrecovered error",
-	0x1107: "data re-synchronization error",
-	0x1108: "incomplete block read",
-	0x1109: "no gap found",
-	0x110a: "miscorrected error",
-	0x110b: "unrecovered read error - recommend reassignment",
-	0x110c: "unrecovered read error - recommend rewrite the data",
-	0x110d: "de-compression crc error",
-	0x110e: "cannot decompress using declared algorithm",
-	0x110f: "error reading upc/ean number",
-	0x1110: "error reading isrc number",
-	0x1111: "read error - loss of streaming",
-	0x1112: "auxiliary memory read error",
-	0x1113: "read error - failed retransmission request",
-	0x1114: "read error - lba marked bad by application client",
-	0x1115: "write after sanitize required",
-	0x1200: "address mark not found for id field",
-	0x1300: "address mark not found for data field",
-	0x1400: "recorded entity not found",
-	0x1401: "record not found",
-	0x1402: "filemark or setmark not found",
-	0x1403: "end-of-data not found",
-	0x1404: "block sequence error",
-	0x1405: "record not found - recommend reassignment",
-	0x1406: "record not found - data auto-reallocated",
-	0x1407: "locate operation failure",
-	0x1500: "random positioning error",
-	0x1501: "mechanical positioning error",
-	0x1502: "positioning error detected by read of medium",
-	0x1600: "data synchronization mark error",
-	0x1601: "data sync error - data rewritten",
-	0x1602: "data sync error - recommend rewrite",
-	0x1603: "data sync error - data auto-reallocated",
-	0x1604: "data sync error - recommend reassignment",
-	0x1700: "recovered data with no error correction applied",
-	0x1701: "recovered data with retries",
-	0x1702: "recovered data with positive head offset",
-	0x1703: "recovered data with negative head offset",
-	0x1704: "recovered data with retries and/or circ applied",
-	0x1705: "recovered data using previous sector id",
-	0x1706: "recovered data without ecc - data auto-reallocated",
-	0x1707: "recovered data without ecc - recommend reassignment",
-	0x1708: "recovered data without ecc - recommend rewrite",
-	0x1709: "recovered data without ecc - data rewritten",
-	0x1800: "recovered data with error correction applied",
-	0x1801: "recovered data with error corr. & retries applied",
-	0x1802: "recovered data - data auto-reallocated",
-	0x1803: "recovered data with circ",
-	0x1804: "recovered data with l-ec",
-	0x1805: "recovered data - recommend reassignment",
-	0x1806: "recovered data - recommend rewrite",
-	0x1807: "recovered data with ecc - data rewritten",
-	0x1808: "recovered data with linking",
-	0x1900: "defect list error",
-	0x1901: "defect list not available",
-	0x1902: "defect list error in primary list",
-	0x1903: "defect list error in grown list",
-	0x1a00: "parameter list length error",
-	0x1b00: "synchronous data transfer error",
-	0x1c00: "defect list not found",
-	0x1c01: "primary defect list not found",
-	0x1c02: "grown defect list not found",
-	0x1d00: "miscompare during verify operation",
-	0x1d01: "miscompare verify of unmapped lba",
-	0x1e00: "recovered id with ecc correction",
-	0x1f00: "partial defect list transfer",
-	0x2000: "invalid command operation code",
-	0x2001: "access denied - initiator pending-enrolled",
-	0x2002: "access denied - no access rights",
-	0x2003: "access denied - invalid mgmt id key",
-	0x2004: "illegal command while in write capable state",
-	0x2006: "illegal command while in explicit address mode",
-	0x2007: "illegal command while in implicit address mode",
-	0x2008: "access denied - enrollment conflict",
-	0x2009: "access denied - invalid lu identifier",
-	0x200a: "access denied - invalid proxy token",
-	0x200b: "access denied - acl lun conflict",
-	0x200c: "illegal command when not in append-only mode",
-	0x200d: "not an administrative logical unit",
-	0x200e: "not a subsidiary logical unit",
-	0x200f: "not a conglomerate logical unit",
-	0x2100: "logical block address out of range",
-	0x2101: "invalid element address",
-	0x2102: "invalid address for write",
-	0x2103: "invalid write crossing layer jump",
-	0x2104: "unaligned write command",
-	0x2105: "write boundary violation",
-	0x2106: "attempt to read invalid data",
-	0x2107: "read boundary violation",
-	0x2108: "misaligned write command",
-	0x2200: "illegal function (use 20 00, 24 00, or 26 00)",
-	0x2300: "invalid token operation, cause not reportable",
-	0x2301: "invalid token operation, unsupported token type",
-	0x2302: "invalid token operation, remote token usage not supported",
-	0x2303: "invalid token operation, remote rod token creation not supported",
-	0x2304: "invalid token operation, token unknown",
-	0x2305: "invalid token operation, token corrupt",
-	0x2306: "invalid token operation, token revoked",
-	0x2307: "invalid token operation, token expired",
-	0x2308: "invalid token operation, token cancelled",
-	0x2309: "invalid token operation, token deleted",
-	0x230a: "invalid token operation, invalid token length",
-	0x2400: "invalid field in cdb",
-	0x2401: "cdb decryption error",
-	0x2404: "security audit value frozen",
-	0x2405: "security working key frozen",
-	0x2406: "nonce not unique",
-	0x2407: "nonce timestamp out of range",
-	0x2408: "invalid xcdb",
-	0x2409: "invalid fast format",
-	0x2500: "logical unit not supported",
-	0x2600: "invalid field in parameter list",
-	0x2601: "parameter not supported",
-	0x2602: "parameter value invalid",
-	0x2603: "threshold parameters not supported",
-	0x2604: "invalid release of persistent reservation",
-	0x2605: "data decryption error",
-	0x2606: "too many target descriptors",
-	0x2607: "unsupported target descriptor type code",
-	0x2608: "too many segment descriptors",
-	0x2609: "unsupported segment descriptor type code",
-	0x260a: "unexpected inexact segment",
-	0x260b: "inline data length exceeded",
-	0x260c: "invalid operation for copy source or destination",
-	0x260d: "copy segment granularity violation",
-	0x260e: "invalid parameter while port is enabled",
-	0x260f: "invalid data-out buffer integrity check value",
-	0x2610: "data decryption key fail limit reached",
-	0x2611: "incomplete key-associated data set",
-	0x2612: "vendor specific key reference not found",
-	0x2613: "application tag mode page is invalid",
-	0x2614: "tape stream mirroring prevented",
-	0x2615: "copy source or copy destination not authorized",
-	0x2700: "write protected",
-	0x2701: "hardware write protected",
-	0x2702: "logical unit software write protected",
-	0x2703: "associated write protect",
-	0x2704: "persistent write protect",
-	0x2705: "permanent write protect",
-	0x2706: "conditional write protect",
-	0x2707: "space allocation failed write protect",
-	0x2708: "zone is read only",
-	0x2800: "not ready to ready change, medium may have changed",
-	0x2801: "import or export element accessed",
-	0x2802: "format-layer may have changed",
-	0x2803: "import/export element accessed, medium changed",
-	0x2900: "power on, reset, or bus device reset occurred",
-	0x2901: "power on occurred",
-	0x2902: "scsi bus reset occurred",
-	0x2903: "bus device reset function occurred",
-	0x2904: "device internal reset",
-	0x2905: "transceiver mode changed to single-ended",
-	0x2906: "transceiver mode changed to lvd",
-	0x2907: "i_t nexus loss occurred",
-	0x2a00: "parameters changed",
-	0x2a01: "mode parameters changed",
-	0x2a02: "log parameters changed",
-	0x2a03: "reservations preempted",
-	0x2a04: "reservations released",
-	0x2a05: "registrations preempted",
-	0x2a06: "asymmetric access state changed",
-	0x2a07: "implicit asymmetric access state transition failed",
-	0x2a08: "priority changed",
-	0x2a09: "capacity data has changed",
-	0x2a0a: "error history i_t nexus cleared",
-	0x2a0b: "error history snapshot released",
-	0x2a0c: "error recovery attributes have changed",
-	0x2a0d: "data encryption capabilities changed",
-	0x2a10: "timestamp changed",
-	0x2a11: "data encryption parameters changed by another i_t nexus",
-	0x2a12: "data encryption parameters changed by vendor specific event",
-	0x2a13: "data encryption key instance counter has changed",
-	0x2a14: "sa creation capabilities data has changed",
-	0x2a15: "medium removal prevention preempted",
-	0x2a16: "zone reset write pointer recommended",
-	0x2b00: "copy cannot execute since host cannot disconnect",
-	0x2c00: "command sequence error",
-	0x2c01: "too many windows specified",
-	0x2c02: "invalid combination of windows specified",
-	0x2c03: "current program area is not empty",
-	0x2c04: "current program area is empty",
-	0x2c05: "illegal power condition request",
-	0x2c06: "persistent prevent conflict",
-	0x2c07: "previous busy status",
-	0x2c08: "previous task set full status",
-	0x2c09: "previous reservation conflict status",
-	0x2c0a: "partition or collection contains user objects",
-	0x2c0b: "not reserved",
-	0x2c0c: "orwrite generation does not match",
-	0x2c0d: "reset write pointer not allowed",
-	0x2c0e: "zone is offline",
-	0x2c0f: "stream not open",
-	0x2c10: "unwritten data in zone",
-	0x2c11: "descriptor format sense data required",
-	0x2d00: "overwrite error on update in place",
-	0x2e00: "insufficient time for operation",
-	0x2e01: "command timeout before processing",
-	0x2e02: "command timeout during processing",
-	0x2e03: "command timeout during processing due to error recovery",
-	0x2f00: "commands cleared by another initiator",
-	0x2f01: "commands cleared by power loss notification",
-	0x2f02: "commands cleared by device server",
-	0x2f03: "some commands cleared by queuing layer event",
-	0x3000: "incompatible medium installed",
-	0x3001: "cannot read medium - unknown format",
-	0x3002: "cannot read medium - incompatible format",
-	0x3003: "cleaning cartridge installed",
-	0x3004: "cannot write medium - unknown format",
-	0x3005: "cannot write medium - incompatible format",
-	0x3006: "cannot format medium - incompatible medium",
-	0x3007: "cleaning failure",
-	0x3008: "cannot write - application code mismatch",
-	0x3009: "current session not fixated for append",
-	0x300a: "cleaning request rejected",
-	0x300c: "worm medium - overwrite attempted",
-	0x300d: "worm medium - integrity check",
-	0x3010: "medium not formatted",
-	0x3011: "incompatible volume type",
-	0x3012: "incompatible volume qualifier",
-	0x3013: "cleaning volume expired",
-	0x3100: "medium format corrupted",
-	0x3101: "format command failed",
-	0x3102: "zoned formatting failed due to spare linking",
-	0x3103: "sanitize command failed",
-	0x3104: "depopulation failed",
-	0x3200: "no defect spare location available",
-	0x3201: "defect list update failure",
-	0x3300: "tape length error",
-	0x3400: "enclosure failure",
-	0x3500: "enclosure services failure",
-	0x3501: "unsupported enclosure function",
-	0x3502: "enclosure services unavailable",
-	0x3503: "enclosure services transfer failure",
-	0x3504: "enclosure services transfer refused",
-	0x3505: "enclosure services checksum error",
-	0x3600: "ribbon, ink, or toner failure",
-	0x3700: "rounded parameter",
-	0x3800: "event status notification",
-	0x3802: "esn - power management class event",
-	0x3804: "esn - media class event",
-	0x3806: "esn - device busy class event",
-	0x3807: "thin provisioning soft threshold reached",
-	0x3900: "saving parameters not supported",
-	0x3a00: "medium not present",
-	0x3a01: "medium not present - tray closed",
-	0x3a02: "medium not present - tray open",
-	0x3a03: "medium not present - loadable",
-	0x3a04: "medium not present - medium auxiliary memory accessible",
-	0x3b00: "sequential positioning error",
-	0x3b01: "tape position error at beginning-of-medium",
-	0x3b02: "tape position error at end-of-medium",
-	0x3b03: "tape or electronic vertical forms unit not ready",
-	0x3b04: "slew failure",
-	0x3b05: "paper jam",
-	0x3b06: "failed to sense top-of-form",
-	0x3b07: "failed to sense bottom-of-form",
-	0x3b08: "reposition error",
-	0x3b09: "read past end of medium",
-	0x3b0a: "read past beginning of medium",
-	0x3b0b: "position past end of medium",
-	0x3b0c: "position past beginning of medium",
-	0x3b0d: "medium destination element full",
-	0x3b0e: "medium source element empty",
-	0x3b0f: "end of medium reached",
-	0x3b11: "medium magazine not accessible",
-	0x3b12: "medium magazine removed",
-	0x3b13: "medium magazine inserted",
-	0x3b14: "medium magazine locked",
-	0x3b15: "medium magazine unlocked",
-	0x3b16: "mechanical positioning or changer error",
-	0x3b17: "read past end of user object",
-	0x3b18: "element disabled",
-	0x3b19: "element enabled",
-	0x3b1a: "data transfer device removed",
-	0x3b1b: "data transfer device inserted",
-	0x3b1c: "too many logical objects on partition to support operation",
-	0x3d00: "invalid bits in identify message",
-	0x3e00: "logical unit has not self-configured yet",
-	0x3e01: "logical unit failure",
-	0x3e02: "timeout on logical unit",
-	0x3e03: "logical unit failed self-test",
-	0x3e04: "logical unit unable to update self-test log",
-	0x3f00: "target operating conditions have changed",
-	0x3f01: "microcode has been changed",
-	0x3f02: "changed operating definition",
-	0x3f03: "inquiry data has changed",
-	0x3f04: "component device attached",
-	0x3f05: "device identifier changed",
-	0x3f06: "redundancy group created or modified",
-	0x3f07: "redundancy group deleted",
-	0x3f08: "spare created or modified",
-	0x3f09: "spare deleted",
-	0x3f0a: "volume set created or modified",
-	0x3f0b: "volume set deleted",
-	0x3f0c: "volume set deassigned",
-	0x3f0d: "volume set reassigned",
-	0x3f0e: "reported luns data has changed",
-	0x3f0f: "echo buffer overwritten",
-	0x3f10: "medium loadable",
-	0x3f11: "medium auxiliary memory accessible",
-	0x3f12: "iscsi ip address added",
-	0x3f13: "iscsi ip address removed",
-	0x3f14: "iscsi ip address changed",
-	0x3f15: "inspect referrals sense descriptors",
-	0x3f16: "microcode has been changed without reset",
-	0x3f17: "zone transition to full",
-	0x3f18: "bind completed",
-	0x3f19: "bind redirected",
-	0x3f1a: "subsidiary binding changed",
-	0x4000: "ram failure (should use 40 nn)",
-	0x4100: "data path failure (should use 40 nn)",
-	0x4200: "power-on or self-test failure (should use 40 nn)",
-	0x4300: "message error",
-	0x4400: "internal target failure",
-	0x4401: "persistent reservation information lost",
-	0x4471: "ata device failed set features",
-	0x4500: "select or reselect failure",
-	0x4600: "unsuccessful soft reset",
-	0x4700: "scsi parity error",
-	0x4701: "data phase crc error detected",
-	0x4702: "scsi parity error detected during st data phase",
-	0x4703: "information unit iucrc error detected",
-	0x4704: "asynchronous information protection error detected",
-	0x4705: "protocol service crc error",
-	0x4706: "phy test function in progress",
-	0x477f: "some commands cleared by iscsi protocol event",
-	0x4800: "initiator detected error message received",
-	0x4900: "invalid message error",
-	0x4a00: "command phase error",
-	0x4b00: "data phase error",
-	0x4b01: "invalid target port transfer tag received",
-	0x4b02: "too much write data",
-	0x4b03: "ack/nak timeout",
-	0x4b04: "nak received",
-	0x4b05: "data offset error",
-	0x4b06: "initiator response timeout",
-	0x4b07: "connection lost",
-	0x4b08: "data-in buffer overflow - data buffer size",
-	0x4b09: "data-in buffer overflow - data buffer descriptor area",
-	0x4b0a: "data-in buffer error",
-	0x4b0b: "data-out buffer overflow - data buffer size",
-	0x4b0c: "data-out buffer overflow - data buffer descriptor area",
-	0x4b0d: "data-out buffer error",
-	0x4b0e: "pcie fabric error",
-	0x4b0f: "pcie completion timeout",
-	0x4b10: "pcie completer abort",
-	0x4b11: "pcie poisoned tlp received",
-	0x4b12: "pcie ecrc check failed",
-	0x4b13: "pcie unsupported request",
-	0x4b14: "pcie acs violation",
-	0x4b15: "pcie tlp prefix blocked",
-	0x4c00: "logical unit failed self-configuration",
-	0x4e00: "overlapped commands attempted",
-	0x5000: "write append error",
-	0x5001: "write append position error",
-	0x5002: "position error related to timing",
-	0x5100: "erase failure",
-	0x5101: "erase failure - incomplete erase operation detected",
-	0x5200: "cartridge fault",
-	0x5300: "media load or eject failed",
-	0x5301: "unload tape failure",
-	0x5302: "medium removal prevented",
-	0x5303: "medium removal prevented by data transfer element",
-	0x5304: "medium thread or unthread failure",
-	0x5305: "volume identifier invalid",
-	0x5306: "volume identifier missing",
-	0x5307: "duplicate volume identifier",
-	0x5308: "element status unknown",
-	0x5309: "data transfer device error - load failed",
-	0x530a: "data transfer device error - unload failed",
-	0x530b: "data transfer device error - unload missing",
-	0x530c: "data transfer device error - eject failed",
-	0x530d: "data transfer device error - library communication failed",
-	0x5400: "scsi to host system interface failure",
-	0x5500: "system resource failure",
-	0x5501: "system buffer full",
-	0x5502: "insufficient reservation resources",
-	0x5503: "insufficient resources",
-	0x5504: "insufficient registration resources",
-	0x5505: "insufficient access control resources",
-	0x5506: "auxiliary memory out of space",
-	0x5507: "quota error",
-	0x5508: "maximum number of supplemental decryption keys exceeded",
-	0x5509: "medium auxiliary memory not accessible",
-	0x550a: "data currently unavailable",
-	0x550b: "insufficient power for operation",
-	0x550c: "insufficient resources to create rod",
-	0x550d: "insufficient resources to create rod token",
-	0x550e: "insufficient zone resources",
-	0x550f: "insufficient zone resources to complete write",
-	0x5510: "maximum number of streams open",
-	0x5511: "insufficient resources to bind",
-	0x5700: "unable to recover table-of-contents",
-	0x5800: "generation does not exist",
-	0x5900: "updated block read",
-	0x5a00: "operator request or state change input",
-	0x5a01: "operator medium removal request",
-	0x5a02: "operator selected write protect",
-	0x5a03: "operator selected write permit",
-	0x5b00: "log exception",
-	0x5b01: "threshold condition met",
-	0x5b02: "log counter at maximum",
-	0x5b03: "log list codes exhausted",
-	0x5c00: "rpl status change",
-	0x5c01: "spindles synchronized",
-	0x5c02: "spindles not synchronized",
-	0x5d00: "failure prediction threshold exceeded",
-	0x5d01: "media failure prediction threshold exceeded",
-	0x5d02: "logical unit failure prediction threshold exceeded",
-	0x5d03: "spare area exhaustion prediction threshold exceeded",
-	0x5d10: "hardware impending failure general hard drive failure",
-	0x5d11: "hardware impending failure drive error rate too high",
-	0x5d12: "hardware impending failure data error rate too high",
-	0x5d13: "hardware impending failure seek error rate too high",
-	0x5d14: "hardware impending failure too many block reassigns",
-	0x5d15: "hardware impending failure access times too high",
-	0x5d16: "hardware impending failure start unit times too high",
-	0x5d17: "hardware impending failure channel parametrics",
-	0x5d18: "hardware impending failure controller detected",
-	0x5d19: "hardware impending failure throughput performance",
-	0x5d1a: "hardware impending failure seek time performance",
-	0x5d1b: "hardware impending failure spin-up retry count",
-	0x5d1c: "hardware impending failure drive calibration retry count",
-	0x5d1d: "hardware impending failure power loss protection circuit",
-	0x5d20: "controller impending failure general hard drive failure",
-	0x5d21: "controller impending failure drive error rate too high",
-	0x5d22: "controller impending failure data error rate too high",
-	0x5d23: "controller impending failure seek error rate too high",
-	0x5d24: "controller impending failure too many block reassigns",
-	0x5d25: "controller impending failure access times too high",
-	0x5d26: "controller impending failure start unit times too high",
-	0x5d27: "controller impending failure channel parametrics",
-	0x5d28: "controller impending failure controller detected",
-	0x5d29: "controller impending failure throughput performance",
-	0x5d2a: "controller impending failure seek time performance",
-	0x5d2b: "controller impending failure spin-up retry count",
-	0x5d2c: "controller impending failure drive calibration retry count",
-	0x5d30: "data channel impending failure general hard drive failure",
-	0x5d31: "data channel impending failure drive error rate too high",
-	0x5d32: "data channel impending failure data error rate too high",
-	0x5d33: "data channel impending failure seek error rate too high",
-	0x5d34: "data channel impending failure too many block reassigns",
-	0x5d35: "data channel impending failure access times too high",
-	0x5d36: "data channel impending failure start unit times too high",
-	0x5d37: "data channel impending failure channel parametrics",
-	0x5d38: "data channel impending failure controller detected",
-	0x5d39: "data channel impending failure throughput performance",
-	0x5d3a: "data channel impending failure seek time performance",
-	0x5d3b: "data channel impending failure spin-up retry count",
-	0x5d3c: "data channel impending failure drive calibration retry count",
-	0x5d40: "servo impending failure general hard drive failure",
-	0x5d41: "servo impending failure drive error rate too high",
-	0x5d42: "servo impending failure data error rate too high",
-	0x5d43: "servo impending failure seek error rate too high",
-	0x5d44: "servo impending failure too many block reassigns",
-	0x5d45: "servo impending failure access times too high",
-	0x5d46: "servo impending failure start unit times too high",
-	0x5d47: "servo impending failure channel parametrics",
-	0x5d48: "servo impending failure controller detected",
-	0x5d49: "servo impending failure throughput performance",
-	0x5d4a: "servo impending failure seek time performance",
-	0x5d4b: "servo impending failure spin-up retry count",
-	0x5d4c: "servo impending failure drive calibration retry count",
-	0x5d50: "spindle impending failure general hard drive failure",
-	0x5d51: "spindle impending failure drive error rate too high",
-	0x5d52: "spindle impending failure data error rate too high",
-	0x5d53: "spindle impending failure seek error rate too high",
-	0x5d54: "spindle impending failure too many block reassigns",
-	0x5d55: "spindle impending failure access times too high",
-	0x5d56: "spindle impending failure start unit times too high",
-	0x5d57: "spindle impending failure channel parametrics",
-	0x5d58: "spindle impending failure controller detected",
-	0x5d59: "spindle impending failure throughput performance",
-	0x5d5a: "spindle impending failure seek time performance",
-	0x5d5b: "spindle impending failure spin-up retry count",
-	0x5d5c: "spindle impending failure drive calibration retry count",
-	0x5d60: "firmware impending failure general hard drive failure",
-	0x5d61: "firmware impending failure drive error rate too high",
-	0x5d62: "firmware impending failure data error rate too high",
-	0x5d63: "firmware impending failure seek error rate too high",
-	0x5d64: "firmware impending failure too many block reassigns",
-	0x5d65: "firmware impending failure access times too high",
-	0x5d66: "firmware impending failure start unit times too high",
-	0x5d67: "firmware impending failure channel parametrics",
-	0x5d68: "firmware impending failure controller detected",
-	0x5d69: "firmware impending failure throughput performance",
-	0x5d6a: "firmware impending failure seek time performance",
-	0x5d6b: "firmware impending failure spin-up retry count",
-	0x5d6c: "firmware impending failure drive calibration retry count",
-	0x5d73: "media impending failure endurance limit met",
-	0x5dff: "failure prediction threshold exceeded (false)",
-	0x5e00: "low power condition on",
-	0x5e01: "idle condition activated by timer",
-	0x5e02: "standby condition activated by timer",
-	0x5e03: "idle condition activated by command",
-	0x5e04: "standby condition activated by command",
-	0x5e05: "idle_b condition activated by timer",
-	0x5e06: "idle_b condition activated by command",
-	0x5e07: "idle_c condition activated by timer",
-	0x5e08: "idle_c condition activated by command",
-	0x5e09: "standby_y condition activated by timer",
-	0x5e0a: "standby_y condition activated by command",
-	0x5e41: "power state change to active",
-	0x5e42: "power state change to idle",
-	0x5e43: "power state change to standby",
-	0x5e45: "power state change to sleep",
-	0x5e47: "power state change to device control",
-	0x6000: "lamp failure",
-	0x6100: "video acquisition error",
-	0x6101: "unable to acquire video",
-	0x6102: "out of focus",
-	0x6200: "scan head positioning error",
-	0x6300: "end of user area encountered on this track",
-	0x6301: "packet does not fit in available space",
-	0x6400: "illegal mode for this track",
-	0x6401: "invalid packet size",
-	0x6500: "voltage fault",
-	0x6600: "automatic document feeder cover up",
-	0x6601: "automatic document feeder lift up",
-	0x6602: "document jam in automatic document feeder",
-	0x6603: "document miss feed automatic in document feeder",
-	0x6700: "configuration failure",
-	0x6701: "configuration of incapable logical units failed",
-	0x6702: "add logical unit failed",
-	0x6703: "modification of logical unit failed",
-	0x6704: "exchange of logical unit failed",
-	0x6705: "remove of logical unit failed",
-	0x6706: "attachment of logical unit failed",
-	0x6707: "creation of logical unit failed",
-	0x6708: "assign failure occurred",
-	0x6709: "multiply assigned logical unit",
-	0x670a: "set target port groups command failed",
-	0x670b: "ata device feature not enabled",
-	0x670c: "command rejected",
-	0x670d: "explicit bind not allowed",
-	0x6800: "logical unit not configured",
-	0x6801: "subsidiary logical unit not configured",
-	0x6900: "data loss on logical unit",
-	0x6901: "multiple logical unit failures",
-	0x6902: "parity/data mismatch",
-	0x6a00: "informational, refer to log",
-	0x6b00: "state change has occurred",
-	0x6b01: "redundancy level got better",
-	0x6b02: "redundancy level got worse",
-	0x6c00: "rebuild failure occurred",
-	0x6d00: "recalculate failure occurred",
-	0x6e00: "command to logical unit failed",
-	0x6f00: "copy protection key exchange failure - authentication",
-	0x6f01: "copy protection key exchange failure - key not present",
-	0x6f02: "copy protection key exchange failure - key not established",
-	0x6f03: "read of scrambled sector without authentication",
-	0x6f04: "media region code is mismatched to logical unit region",
-	0x6f05: "drive region must be permanent/region reset count error",
-	0x6f06: "insufficient block count for binding nonce recording",
-	0x6f07: "conflict in binding nonce recording",
-	0x6f08: "insufficient permission",
-	0x6f09: "invalid drive-host pairing server",
-	0x6f0a: "drive-host pairing suspended",
-	0x7100: "decompression exception long algorithm id",
-	0x7200: "session fixation error",
-	0x7201: "session fixation error writing lead-in",
-	0x7202: "session fixation error writing lead-out",
-	0x7203: "session fixation error - incomplete track in session",
-	0x7204: "empty or partially written reserved track",
-	0x7205: "no more track reservations allowed",
-	0x7206: "rmz extension is not allowed",
-	0x7207: "no more test zone extensions are allowed",
-	0x7300: "cd control error",
-	0x7301: "power calibration area almost full",
-	0x7302: "power calibration area is full",
-	0x7303: "power calibration area error",
-	0x7304: "program memory area update failure",
-	0x7305: "program memory area is full",
-	0x7306: "rma/pma is almost full",
-	0x7310: "current power calibration area almost full",
-	0x7311: "current power calibration area is full",
-	0x7317: "rdz is full",
-	0x7400: "security error",
-	0x7401: "unable to decrypt data",
-	0x7402: "unencrypted data encountered while decrypting",
-	0x7403: "incorrect data encryption key",
-	0x7404: "cryptographic integrity validation failed",
-	0x7405: "error decrypting data",
-	0x7406: "unknown signature verification key",
-	0x7407: "encryption parameters not useable",
-	0x7408: "digital signature validation failure",
-	0x7409: "encryption mode mismatch on read",
-	0x740a: "encrypted block not raw read enabled",
-	0x740b: "incorrect encryption parameters",
-	0x740c: "unable to decrypt parameter list",
-	0x740d: "encryption algorithm disabled",
-	0x7410: "sa creation parameter value invalid",
-	0x7411: "sa creation parameter value rejected",
-	0x7412: "invalid sa usage",
-	0x7421: "data encryption configuration prevented",
-	0x7430: "sa creation parameter not supported",
-	0x7440: "authentication failed",
-	0x7461: "external data encryption key manager access error",
-	0x7462: "external data encryption key manager error",
-	0x7463: "external data encryption key not found",
-	0x7464: "external data encryption request not authorized",
-	0x746e: "external data encryption control timeout",
-	0x746f: "external data encryption control error",
-	0x7471: "logical unit access not authorized",
-	0x7479: "security conflict in translated device",
-}
diff --git a/metropolis/pkg/smbios/BUILD.bazel b/metropolis/pkg/smbios/BUILD.bazel
deleted file mode 100644
index f29f559..0000000
--- a/metropolis/pkg/smbios/BUILD.bazel
+++ /dev/null
@@ -1,11 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library")
-
-go_library(
-    name = "smbios",
-    srcs = [
-        "smbios.go",
-        "structures.go",
-    ],
-    importpath = "source.monogon.dev/metropolis/pkg/smbios",
-    visibility = ["//visibility:public"],
-)
diff --git a/metropolis/pkg/smbios/smbios.go b/metropolis/pkg/smbios/smbios.go
deleted file mode 100644
index e9bcfc3..0000000
--- a/metropolis/pkg/smbios/smbios.go
+++ /dev/null
@@ -1,269 +0,0 @@
-// Package smbios implements parsing of SMBIOS data structures.
-// SMBIOS data is commonly populated by platform firmware to convey various
-// metadata (including name, vendor, slots and serial numbers) about the
-// platform to the operating system.
-// The SMBIOS standard is maintained by DMTF and available at
-// https://www.dmtf.org/sites/default/files/standards/documents/
-// DSP0134_3.6.0.pdf. The rest of this package just refers to it as "the
-// standard".
-package smbios
-
-import (
-	"bufio"
-	"bytes"
-	"encoding/binary"
-	"errors"
-	"fmt"
-	"io"
-	"reflect"
-	"strings"
-)
-
-// See spec section 6.1.2
-type structureHeader struct {
-	// Types 128 through 256 are reserved for OEM and system-specific use.
-	Type uint8
-	// Length of the structure including this header, excluding the string
-	// set.
-	Length uint8
-	// Unique handle for this structure.
-	Handle uint16
-}
-
-type Structure struct {
-	Type             uint8
-	Handle           uint16
-	FormattedSection []byte
-	Strings          []string
-}
-
-// Table represents a decoded SMBIOS table consisting of its structures.
-// A few known structures are parsed if present, the rest is put into
-// Structures unparsed.
-type Table struct {
-	BIOSInformationRaw       *BIOSInformationRaw
-	SystemInformationRaw     *SystemInformationRaw
-	BaseboardsInformationRaw []*BaseboardInformationRaw
-	SystemSlotsRaw           []*SystemSlotRaw
-	MemoryDevicesRaw         []*MemoryDeviceRaw
-
-	Structures []Structure
-}
-
-const (
-	structTypeInactive   = 126
-	structTypeEndOfTable = 127
-)
-
-func Unmarshal(table *bufio.Reader) (*Table, error) {
-	var tbl Table
-	for {
-		var structHdr structureHeader
-		if err := binary.Read(table, binary.LittleEndian, &structHdr); err != nil {
-			if err == io.EOF {
-				// Be tolerant of EOFs on structure boundaries even though
-				// the EOT marker is specified as a type 127 structure.
-				break
-			}
-			return nil, fmt.Errorf("unable to read structure header: %w", err)
-		}
-		if int(structHdr.Length) < binary.Size(structHdr) {
-			return nil, fmt.Errorf("invalid structure: header length (%d) smaller than header", structHdr.Length)
-		}
-		if structHdr.Type == structTypeEndOfTable {
-			break
-		}
-		var s Structure
-		s.Type = structHdr.Type
-		s.Handle = structHdr.Handle
-		s.FormattedSection = make([]byte, structHdr.Length-uint8(binary.Size(structHdr)))
-		if _, err := io.ReadFull(table, s.FormattedSection); err != nil {
-			return nil, fmt.Errorf("error while reading structure (handle %d) contents: %w", structHdr.Handle, err)
-		}
-		// Read string-set
-		for {
-			str, err := table.ReadString(0x00)
-			if err != nil {
-				return nil, fmt.Errorf("error while reading string table (handle %d): %w", structHdr.Handle, err)
-			}
-			// Remove trailing null byte
-			str = strings.TrimSuffix(str, "\x00")
-			// Don't populate a zero-length first string if the string-set is
-			// empty.
-			if len(str) != 0 {
-				s.Strings = append(s.Strings, str)
-			}
-			maybeTerminator, err := table.ReadByte()
-			if err != nil {
-				return nil, fmt.Errorf("error while reading string table (handle %d): %w", structHdr.Handle, err)
-			}
-			if maybeTerminator == 0 {
-				// We have a valid string-set terminator, exit the loop
-				break
-			}
-			// The next byte was not a terminator, put it back
-			if err := table.UnreadByte(); err != nil {
-				panic(err) // Cannot happen operationally
-			}
-		}
-		switch structHdr.Type {
-		case structTypeInactive:
-			continue
-		case structTypeBIOSInformation:
-			var biosInfo BIOSInformationRaw
-			if err := UnmarshalStructureRaw(s, &biosInfo); err != nil {
-				return nil, fmt.Errorf("failed unmarshaling BIOS Information: %w", err)
-			}
-			tbl.BIOSInformationRaw = &biosInfo
-		case structTypeSystemInformation:
-			var systemInfo SystemInformationRaw
-			if err := UnmarshalStructureRaw(s, &systemInfo); err != nil {
-				return nil, fmt.Errorf("failed unmarshaling System Information: %w", err)
-			}
-			tbl.SystemInformationRaw = &systemInfo
-		case structTypeBaseboardInformation:
-			var baseboardInfo BaseboardInformationRaw
-			if err := UnmarshalStructureRaw(s, &baseboardInfo); err != nil {
-				return nil, fmt.Errorf("failed unmarshaling Baseboard Information: %w", err)
-			}
-			tbl.BaseboardsInformationRaw = append(tbl.BaseboardsInformationRaw, &baseboardInfo)
-		case structTypeSystemSlot:
-			var sysSlot SystemSlotRaw
-			if err := UnmarshalStructureRaw(s, &sysSlot); err != nil {
-				return nil, fmt.Errorf("failed unmarshaling System Slot: %w", err)
-			}
-			tbl.SystemSlotsRaw = append(tbl.SystemSlotsRaw, &sysSlot)
-		case structTypeMemoryDevice:
-			var memoryDev MemoryDeviceRaw
-			if err := UnmarshalStructureRaw(s, &memoryDev); err != nil {
-				return nil, fmt.Errorf("failed unmarshaling Memory Device: %w", err)
-			}
-			tbl.MemoryDevicesRaw = append(tbl.MemoryDevicesRaw, &memoryDev)
-		default:
-			// Just pass through the raw structure
-			tbl.Structures = append(tbl.Structures, s)
-		}
-	}
-	return &tbl, nil
-}
-
-// Version contains a two-part version number consisting of a major and minor
-// version. This is a common structure in SMBIOS.
-type Version struct {
-	Major uint8
-	Minor uint8
-}
-
-func (v *Version) String() string {
-	return fmt.Sprintf("%d.%d", v.Major, v.Minor)
-}
-
-// AtLeast returns true if the version in v is at least the given version.
-func (v *Version) AtLeast(major, minor uint8) bool {
-	if v.Major > major {
-		return true
-	}
-	return v.Major == major && v.Minor >= minor
-}
-
-// UnmarshalStructureRaw unmarshals a SMBIOS structure into a Go struct which
-// has some constraints. The first two fields need to be a `uint16 handle` and
-// a `StructureVersion Version` field. After that any number of fields may
-// follow as long as they are either of type `string` (which will be looked up
-// in the string table) or readable by binary.Read. To determine the structure
-// version, the smbios_min_vers struct tag needs to be put on the first field
-// of a newer structure version. The version implicitly starts with 2.0.
-// The version determined is written to the second target struct field.
-// Fields which do not have a fixed size need to be typed as a slice and tagged
-// with smbios_repeat set to the name of the field containing the count. The
-// count field itself needs to be some width of uint.
-func UnmarshalStructureRaw(rawStruct Structure, target any) error {
-	v := reflect.ValueOf(target)
-	if v.Kind() != reflect.Pointer {
-		return errors.New("target needs to be a pointer")
-	}
-	v = v.Elem()
-	if v.Kind() != reflect.Struct {
-		return errors.New("target needs to be a pointer to a struct")
-	}
-	v.Field(0).SetUint(uint64(rawStruct.Handle))
-	r := bytes.NewReader(rawStruct.FormattedSection)
-	completedVersion := Version{Major: 0, Minor: 0}
-	parsingVersion := Version{Major: 2, Minor: 0}
-	numFields := v.NumField()
-	hasAborted := false
-	for i := 2; i < numFields; i++ {
-		fieldType := v.Type().Field(i)
-		if minVer := fieldType.Tag.Get("smbios_min_ver"); minVer != "" {
-			var ver Version
-			if _, err := fmt.Sscanf(minVer, "%d.%d", &ver.Major, &ver.Minor); err != nil {
-				panic(fmt.Sprintf("invalid smbios_min_ver tag in %v: %v", fieldType.Name, err))
-			}
-			completedVersion = parsingVersion
-			parsingVersion = ver
-		}
-		f := v.Field(i)
-
-		if repeat := fieldType.Tag.Get("smbios_repeat"); repeat != "" {
-			repeatCountField := v.FieldByName(repeat)
-			if !repeatCountField.IsValid() {
-				panic(fmt.Sprintf("invalid smbios_repeat tag in %v: no such field %q", fieldType.Name, repeat))
-			}
-			if !repeatCountField.CanUint() {
-				panic(fmt.Sprintf("invalid smbios_repeat tag in %v: referenced field %q is not uint-compatible", fieldType.Name, repeat))
-			}
-			if f.Kind() != reflect.Slice {
-				panic(fmt.Sprintf("cannot repeat a field (%q) which is not a slice", fieldType.Name))
-			}
-			if repeatCountField.Uint() > 65536 {
-				return fmt.Errorf("refusing to read a field repeated more than 65536 times (given %d times)", repeatCountField.Uint())
-			}
-			repeatCount := int(repeatCountField.Uint())
-			f.Set(reflect.MakeSlice(f.Type(), repeatCount, repeatCount))
-			for j := 0; j < repeatCount; j++ {
-				fs := f.Index(j)
-				err := unmarshalField(&rawStruct, fs, r)
-				if errors.Is(err, io.EOF) {
-					hasAborted = true
-					break
-				} else if err != nil {
-					return fmt.Errorf("error unmarshaling field %q: %w", fieldType.Name, err)
-				}
-			}
-		}
-		err := unmarshalField(&rawStruct, f, r)
-		if errors.Is(err, io.EOF) {
-			hasAborted = true
-			break
-		} else if err != nil {
-			return fmt.Errorf("error unmarshaling field %q: %w", fieldType.Name, err)
-		}
-	}
-	if !hasAborted {
-		completedVersion = parsingVersion
-	}
-	if completedVersion.Major == 0 {
-		return fmt.Errorf("structure's formatted section (%d bytes) is smaller than its minimal size", len(rawStruct.FormattedSection))
-	}
-	v.Field(1).Set(reflect.ValueOf(completedVersion))
-	return nil
-}
-
-func unmarshalField(rawStruct *Structure, field reflect.Value, r *bytes.Reader) error {
-	if field.Kind() == reflect.String {
-		var stringTableIdx uint8
-		err := binary.Read(r, binary.LittleEndian, &stringTableIdx)
-		if err != nil {
-			return err
-		}
-		if stringTableIdx == 0 {
-			return nil
-		}
-		if int(stringTableIdx)-1 >= len(rawStruct.Strings) {
-			return fmt.Errorf("string index (%d) bigger than string table (%q)", stringTableIdx-1, rawStruct.Strings)
-		}
-		field.SetString(rawStruct.Strings[stringTableIdx-1])
-		return nil
-	}
-	return binary.Read(r, binary.LittleEndian, field.Addr().Interface())
-}
diff --git a/metropolis/pkg/smbios/structures.go b/metropolis/pkg/smbios/structures.go
deleted file mode 100644
index 4c75709..0000000
--- a/metropolis/pkg/smbios/structures.go
+++ /dev/null
@@ -1,191 +0,0 @@
-package smbios
-
-import (
-	"time"
-)
-
-const (
-	structTypeBIOSInformation      = 0
-	structTypeSystemInformation    = 1
-	structTypeBaseboardInformation = 2
-	structTypeSystemSlot           = 9
-	structTypeMemoryDevice         = 17
-)
-
-// UEFISpecificationSupported is a bitmask for accessing the third bit,
-// which displays the support of UEFI in the smbios structure. For more
-// information check the SMBIOS documentation at Table 7.1.2.2 Bit 3.
-const UEFISpecificationSupported = 1 << 3
-
-// BIOSInformationRaw contains decoded data from the BIOS Information structure
-// (SMBIOS Type 0). See Table 6 in the specification for detailed documentation
-// about the individual fields. Note that structure versions 2.1 and 2.2 are
-// "invented" here as both characteristics extensions bytes were optional
-// between 2.0 and 2.4.
-type BIOSInformationRaw struct {
-	Handle                                 uint16
-	StructureVersion                       Version
-	Vendor                                 string
-	BIOSVersion                            string
-	BIOSStartingAddressSegment             uint16
-	BIOSReleaseDate                        string
-	BIOSROMSize                            uint8
-	BIOSCharacteristics                    uint64
-	BIOSCharacteristicsExtensionByte1      uint8 `smbios_min_ver:"2.1"`
-	BIOSCharacteristicsExtensionByte2      uint8 `smbios_min_ver:"2.2"`
-	SystemBIOSMajorRelease                 uint8 `smbios_min_ver:"2.4"`
-	SystemBIOSMinorRelease                 uint8
-	EmbeddedControllerFirmwareMajorRelease uint8
-	EmbeddedControllerFirmwareMinorRelease uint8
-	ExtendedBIOSROMSize                    uint16 `smbios_min_ver:"3.1"`
-}
-
-// ROMSizeBytes returns the ROM size in bytes
-func (rb *BIOSInformationRaw) ROMSizeBytes() uint64 {
-	if rb.StructureVersion.AtLeast(3, 1) && rb.BIOSROMSize == 0xFF {
-		// Top 2 bits are SI prefix (starting at mega, i.e. 1024^2), lower 14
-		// are value. x*1024^n => x << log2(1024)*n => x << 10*n
-		return uint64(rb.ExtendedBIOSROMSize&0x3fff) << 10 * uint64(rb.ExtendedBIOSROMSize&0xc00+2)
-	} else {
-		// (n+1) * 64KiB
-		return (uint64(rb.BIOSROMSize) + 1) * (64 * 1024)
-	}
-}
-
-// ReleaseDate returns the release date of the BIOS as a time.Time value.
-func (rb *BIOSInformationRaw) ReleaseDate() (time.Time, error) {
-	return time.Parse("01/02/2006", rb.BIOSReleaseDate)
-}
-
-// SystemInformationRaw contains decoded data from the System Information
-// structure (SMBIOS Type 1). See Table 10 in the specification for detailed
-// documentation about the individual fields.
-type SystemInformationRaw struct {
-	Handle           uint16
-	StructureVersion Version
-	Manufacturer     string
-	ProductName      string
-	Version          string
-	SerialNumber     string
-	UUID             [16]byte `smbios_min_ver:"2.1"`
-	WakeupType       uint8
-	SKUNumber        string `smbios_min_ver:"2.4"`
-	Family           string
-}
-
-// BaseboardInformationRaw contains decoded data from the BIOS Information
-// structure (SMBIOS Type 3). See Table 13 in the specification for detailed
-// documentation about the individual fields.
-type BaseboardInformationRaw struct {
-	Handle                         uint16
-	StructureVersion               Version
-	Manufacturer                   string
-	Product                        string
-	Version                        string
-	SerialNumber                   string
-	AssetTag                       string `smbios_min_ver:"2.1"`
-	FeatureFlags                   uint8
-	LocationInChassis              string
-	ChassisHandle                  uint16
-	BoardType                      uint8
-	NumberOfContainedObjectHandles uint8
-	ContainedObjectHandles         []uint16 `smbios_repeat:"NumberOfContainedObjectHandles"`
-}
-
-// SystemSlotRaw contains decoded data from the System Slot structure
-// (SMBIOS Type 9). See Table 44 in the specification for detailed documentation
-// about the individual fields.
-type SystemSlotRaw struct {
-	Handle               uint16
-	StructureVersion     Version
-	SlotDesignation      string
-	SlotType             uint8
-	SlotDataBusWidth     uint8
-	CurrentUsage         uint8
-	SlotLength           uint8
-	SlotID               uint16
-	SlotCharacteristics1 uint8
-	SlotCharacteristics2 uint8  `smbios_min_ver:"2.1"`
-	SegmentGroupNumber   uint16 `smbios_min_ver:"2.6"`
-	BusNumber            uint8
-	DeviceFunctionNumber uint8
-	DataBusWidth         uint8 `smbios_min_ver:"3.2"`
-	PeerGroupingCount    uint8
-	PeerGroups           []SystemSlotPeerRaw `smbios_repeat:"PeerGroupingCount"`
-	SlotInformation      uint8               `smbios_min_ver:"3.4"`
-	SlotPhysicalWidth    uint8
-	SlotPitch            uint16
-	SlotHeight           uint8 `smbios_min_ver:"3.5"`
-}
-
-type SystemSlotPeerRaw struct {
-	SegmentGroupNumber   uint16
-	BusNumber            uint8
-	DeviceFunctionNumber uint8
-	DataBusWidth         uint8
-}
-
-// MemoryDeviceRaw contains decoded data from the BIOS Information structure
-// (SMBIOS Type 17). See Table 76 in the specification for detailed
-// documentation about the individual fields.
-type MemoryDeviceRaw struct {
-	Handle                                  uint16
-	StructureVersion                        Version
-	PhysicalMemoryArrayHandle               uint16 `smbios_min_ver:"2.1"`
-	MemoryErrorInformationHandle            uint16
-	TotalWidth                              uint16
-	DataWidth                               uint16
-	Size                                    uint16
-	FormFactor                              uint8
-	DeviceSet                               uint8
-	DeviceLocator                           string
-	BankLocator                             string
-	MemoryType                              uint8
-	TypeDetail                              uint16
-	Speed                                   uint16 `smbios_min_ver:"2.3"`
-	Manufacturer                            string
-	SerialNumber                            string
-	AssetTag                                string
-	PartNumber                              string
-	Attributes                              uint8  `smbios_min_ver:"2.6"`
-	ExtendedSize                            uint32 `smbios_min_ver:"2.7"`
-	ConfiguredMemorySpeed                   uint16
-	MinimumVoltage                          uint16 `smbios_min_ver:"2.8"`
-	MaximumVoltage                          uint16
-	ConfiguredVoltage                       uint16
-	MemoryTechnology                        uint8 `smbios_min_ver:"3.2"`
-	MemoryOperatingModeCapability           uint16
-	FirmwareVersion                         uint8
-	ModuleManufacturerID                    uint16
-	ModuleProductID                         uint16
-	MemorySubsystemControllerManufacturerID uint16
-	MemorySubsystemControllerProductID      uint16
-	NonVolatileSize                         uint64
-	VolatileSize                            uint64
-	CacheSize                               uint64
-	LogicalSize                             uint64
-	ExtendedSpeed                           uint32 `smbios_min_ver:"3.3"`
-	ExtendedConfiguredMemorySpeed           uint32
-}
-
-const (
-	kibLeftShift = 10 // 2^10 = 1KiB
-	mibLeftShift = 20 // 2^20 = 1MiB
-)
-
-func (md *MemoryDeviceRaw) SizeBytes() (uint64, bool) {
-	if md.Size == 0 || md.Size == 0xFFFF {
-		// Device unpopulated / unknown memory, return ok false
-		return 0, false
-	}
-	if md.Size == 0x7FFF && md.StructureVersion.AtLeast(2, 7) {
-		// Bit 31 is reserved, rest is memory size in MiB
-		return uint64(md.ExtendedSize&0x7FFFFFFF) << mibLeftShift, true
-	}
-	// Bit 15 flips between MiB and KiB, rest is size
-	var shift uint64 = mibLeftShift
-	if (md.Size & 0x8000) != 0 { // Bit set means KiB
-		shift = kibLeftShift
-	}
-	return uint64(md.Size&0x7FFF) << shift, true
-}
diff --git a/metropolis/pkg/socksproxy/BUILD.bazel b/metropolis/pkg/socksproxy/BUILD.bazel
deleted file mode 100644
index c953dbf..0000000
--- a/metropolis/pkg/socksproxy/BUILD.bazel
+++ /dev/null
@@ -1,18 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
-
-go_library(
-    name = "socksproxy",
-    srcs = [
-        "protocol.go",
-        "socksproxy.go",
-    ],
-    importpath = "source.monogon.dev/metropolis/pkg/socksproxy",
-    visibility = ["//visibility:public"],
-)
-
-go_test(
-    name = "socksproxy_test",
-    srcs = ["socksproxy_test.go"],
-    embed = [":socksproxy"],
-    deps = ["@org_golang_x_net//proxy"],
-)
diff --git a/metropolis/pkg/socksproxy/protocol.go b/metropolis/pkg/socksproxy/protocol.go
deleted file mode 100644
index d920099..0000000
--- a/metropolis/pkg/socksproxy/protocol.go
+++ /dev/null
@@ -1,195 +0,0 @@
-package socksproxy
-
-import (
-	"encoding/binary"
-	"errors"
-	"fmt"
-	"io"
-	"net"
-)
-
-// readMethods implements RFC1928 3. “Procedure for TCP-based clients”,
-// paragraph 3. It receives a 'version identifier/method selection message' from
-// r and returns the methods supported by the client.
-func readMethods(r io.Reader) ([]method, error) {
-	var ver uint8
-	if err := binary.Read(r, binary.BigEndian, &ver); err != nil {
-		return nil, fmt.Errorf("when reading ver: %w", err)
-	}
-	if ver != 5 {
-		return nil, fmt.Errorf("unimplemented version %d", ver)
-	}
-	var nmethods uint8
-	if err := binary.Read(r, binary.BigEndian, &nmethods); err != nil {
-		return nil, fmt.Errorf("when reading nmethods: %w", err)
-	}
-	methodBytes := make([]byte, nmethods)
-	if _, err := io.ReadFull(r, methodBytes); err != nil {
-		return nil, fmt.Errorf("while reading methods: %w", err)
-	}
-	methods := make([]method, nmethods)
-	for i, m := range methodBytes {
-		methods[i] = method(m)
-	}
-	return methods, nil
-}
-
-// writeMethod implements RFC1928 3. “Procedure for TCP-based clients”,
-// paragraph 5. It sends a selected method to w.
-func writeMethod(w io.Writer, m method) error {
-	if err := binary.Write(w, binary.BigEndian, uint8(5)); err != nil {
-		return fmt.Errorf("while writing version: %w", err)
-	}
-	if err := binary.Write(w, binary.BigEndian, uint8(m)); err != nil {
-		return fmt.Errorf("while writing method: %w", err)
-	}
-	return nil
-}
-
-// method is an RFC1928 authentication method.
-type method uint8
-
-const (
-	methodNoAuthenticationRequired method = 0
-	methodNoAcceptableMethods      method = 0xff
-)
-
-// negotiateMethod implements the entire flow RFC1928 3. “Procedure for
-// TCP-based clients” by negotiating for the 'NO AUTHENTICATION REQUIRED'
-// authentication method, and failing otherwise.
-func negotiateMethod(rw io.ReadWriter) error {
-	methods, err := readMethods(rw)
-	if err != nil {
-		return fmt.Errorf("could not read methods: %w", err)
-	}
-
-	found := false
-	for _, m := range methods {
-		if m == methodNoAuthenticationRequired {
-			found = true
-			break
-		}
-	}
-	if !found {
-		// Discard error, as this connection is failed anyway.
-		writeMethod(rw, methodNoAcceptableMethods)
-		return fmt.Errorf("no acceptable methods found")
-	}
-	if err := writeMethod(rw, methodNoAuthenticationRequired); err != nil {
-		return fmt.Errorf("could not respond with method: %w", err)
-	}
-	return nil
-}
-
-var (
-	// errNotConnect is returned by readRequest when the request contained some
-	// other request than CONNECT.
-	errNotConnect = errors.New("not CONNECT")
-	// errUnsupportedAddressType is returned by readRequest when the request
-	// contained some unsupported address type (not IPv4 or IPv6).
-	errUnsupportedAddressType = errors.New("unsupported address type")
-)
-
-// readRequest implements RFC1928 4. “Requests” by reading a SOCKS request from
-// r and ensuring it's an IPv4/IPv6 CONNECT request. The parsed address/port
-// pair is then returned.
-func readRequest(r io.Reader) (*connectRequest, error) {
-	header := struct {
-		Ver  uint8
-		Cmd  uint8
-		Rsv  uint8
-		Atyp uint8
-	}{}
-	if err := binary.Read(r, binary.BigEndian, &header); err != nil {
-		return nil, fmt.Errorf("when reading request header: %w", err)
-	}
-
-	if header.Ver != 5 {
-		return nil, fmt.Errorf("invalid version %d", header.Ver)
-	}
-	if header.Cmd != 1 {
-		return nil, errNotConnect
-	}
-
-	var addrBytes []byte
-	switch header.Atyp {
-	case 1:
-		addrBytes = make([]byte, 4)
-	case 4:
-		addrBytes = make([]byte, 16)
-	default:
-		return nil, errUnsupportedAddressType
-	}
-	if _, err := io.ReadFull(r, addrBytes); err != nil {
-		return nil, fmt.Errorf("when reading address: %w", err)
-	}
-
-	var port uint16
-	if err := binary.Read(r, binary.BigEndian, &port); err != nil {
-		return nil, fmt.Errorf("when reading port: %w", err)
-	}
-
-	return &connectRequest{
-		address: addrBytes,
-		port:    port,
-	}, nil
-}
-
-type connectRequest struct {
-	address net.IP
-	port    uint16
-}
-
-// Reply is an RFC1928 6. “Replies” reply field value. It's returned to the
-// client by internal socksproxy code or a Handler to signal a success or error
-// condition within an RFC1928 reply.
-type Reply uint8
-
-const (
-	ReplySucceeded               Reply = 0
-	ReplyGeneralFailure          Reply = 1
-	ReplyConnectionNotAllowed    Reply = 2
-	ReplyNetworkUnreachable      Reply = 3
-	ReplyHostUnreachable         Reply = 4
-	ReplyConnectionRefused       Reply = 5
-	ReplyTTLExpired              Reply = 6
-	ReplyCommandNotSupported     Reply = 7
-	ReplyAddressTypeNotSupported Reply = 8
-)
-
-// writeReply implements RFC1928 6. “Replies” by sending a given Reply, bind
-// address and bind port to w. An error is returned if the given bind address is
-// invaild, or if a communication error occurred.
-func writeReply(w io.Writer, r Reply, bindAddr net.IP, bindPort uint16) error {
-	var atyp uint8
-	switch len(bindAddr) {
-	case 4:
-		atyp = 1
-	case 16:
-		atyp = 4
-	default:
-		return fmt.Errorf("unsupported bind address type")
-	}
-
-	header := struct {
-		Ver   uint8
-		Reply uint8
-		Rsv   uint8
-		Atyp  uint8
-	}{
-		Ver:   5,
-		Reply: uint8(r),
-		Rsv:   0,
-		Atyp:  atyp,
-	}
-	if err := binary.Write(w, binary.BigEndian, &header); err != nil {
-		return fmt.Errorf("when writing reply header: %w", err)
-	}
-	if _, err := w.Write(bindAddr); err != nil {
-		return fmt.Errorf("when writing reply bind address: %w", err)
-	}
-	if err := binary.Write(w, binary.BigEndian, bindPort); err != nil {
-		return fmt.Errorf("when writing reply bind port: %w", err)
-	}
-	return nil
-}
diff --git a/metropolis/pkg/socksproxy/socksproxy.go b/metropolis/pkg/socksproxy/socksproxy.go
deleted file mode 100644
index 808ae1f..0000000
--- a/metropolis/pkg/socksproxy/socksproxy.go
+++ /dev/null
@@ -1,223 +0,0 @@
-// package socksproxy implements a limited subset of the SOCKS 5 (RFC1928)
-// protocol in the form of a pluggable Proxy object. However, this
-// implementation is _not_ RFC1928 compliant, as it does not implement GSSAPI
-// (which is mandated by the spec). It currently only implements CONNECT
-// requests to IPv4/IPv6 addresses. It also doesn't implement any
-// timeout/keepalive system for killing inactive connections.
-//
-// The intended use of the library is internally within Metropolis development
-// environments for contacting test clusters. The code is simple and robust, but
-// not really productionized (as noted above - no timeouts and no authentication
-// make it a bad idea to ever expose this proxy server publicly).
-//
-// There are multiple other, existing Go SOCKS4/5 server implementations, but
-// many of them are either not context aware, part of a larger project (and thus
-// difficult to extract) or are brand new/untested/bleeding edge code.
-package socksproxy
-
-import (
-	"context"
-	"errors"
-	"fmt"
-	"io"
-	"log"
-	"net"
-	"strconv"
-)
-
-// Handler should be implemented by socksproxy users to allow SOCKS connections
-// to be proxied in any other way than via the HostHandler.
-type Handler interface {
-	// Connect is called by the server any time a SOCKS client sends a CONNECT
-	// request. The function should return a ConnectResponse describing some
-	// 'backend' connection, ie. the connection that will then be exposed to the
-	// SOCKS client.
-	//
-	// Connect should return with Error set to a non-default value to abort/deny the
-	// connection request.
-	//
-	// The underlying incoming socket is managed by the proxy server and is not
-	// visible to the client. However, any sockets/connections/files opened by the
-	// Handler should be cleaned up by tying them to the given context, which will
-	// be canceled whenever the connection is closed.
-	Connect(context.Context, *ConnectRequest) *ConnectResponse
-}
-
-// ConnectRequest represents a pending CONNECT request from a client.
-type ConnectRequest struct {
-	// Address is an IPv4 or IPv6 address that the client requested to connect to.
-	// This address might be invalid/malformed/internal, and the Connect method
-	// should sanitize it before using it.
-	Address net.IP
-	// Port is the TCP port number that the client requested to connect to.
-	Port uint16
-}
-
-// ConnectResponse indicates a 'backend' connection that the proxy should expose
-// to the client, or an error if the connection cannot be made.
-type ConnectResponse struct {
-	// Error will cause an error to be returned if it is anything else than the
-	// default value (ReplySucceeded).
-	Error Reply
-
-	// Backend is the ReadWriteCloser that will be bridged over to the connecting
-	// client if no Error is set.
-	Backend io.ReadWriteCloser
-	// LocalAddress is the IP address that is returned to the client as the local
-	// address of the newly established backend connection.
-	LocalAddress net.IP
-	// LocalPort is the local TCP port number that is returned to the client as the
-	// local port of the newly established backend connection.
-	LocalPort uint16
-}
-
-// ConnectResponseFromConn builds a ConnectResponse from a net.Conn. This can be
-// used by custom Handlers to easily return a ConnectResponse for a newly
-// established net.Conn, eg. from a Dial call.
-//
-// An error is returned if the given net.Conn does not carry a properly formed
-// LocalAddr.
-func ConnectResponseFromConn(c net.Conn) (*ConnectResponse, error) {
-	laddr := c.LocalAddr().String()
-	host, port, err := net.SplitHostPort(laddr)
-	if err != nil {
-		return nil, fmt.Errorf("could not parse LocalAddr %q: %w", laddr, err)
-	}
-	addr := net.ParseIP(host)
-	if addr == nil {
-		return nil, fmt.Errorf("could not parse LocalAddr host %q as IP", host)
-	}
-	portNum, err := strconv.ParseUint(port, 10, 16)
-	if err != nil {
-		return nil, fmt.Errorf("could not parse LocalAddr port %q", port)
-	}
-	return &ConnectResponse{
-		Backend:      c,
-		LocalAddress: addr,
-		LocalPort:    uint16(portNum),
-	}, nil
-}
-
-type hostHandler struct{}
-
-func (h *hostHandler) Connect(ctx context.Context, req *ConnectRequest) *ConnectResponse {
-	port := fmt.Sprintf("%d", req.Port)
-	addr := net.JoinHostPort(req.Address.String(), port)
-	s, err := net.Dial("tcp", addr)
-	if err != nil {
-		log.Printf("HostHandler could not dial %q: %v", addr, err)
-		return &ConnectResponse{Error: ReplyConnectionRefused}
-	}
-	go func() {
-		<-ctx.Done()
-		s.Close()
-	}()
-	res, err := ConnectResponseFromConn(s)
-	if err != nil {
-		log.Printf("HostHandler could not build response: %v", err)
-		return &ConnectResponse{Error: ReplyGeneralFailure}
-	}
-	return res
-}
-
-var (
-	// HostHandler is an unsafe SOCKS5 proxy Handler which passes all incoming
-	// connections into the local network stack. The incoming addresses/ports are
-	// not sanitized, and as the proxy does not perform authentication, this handler
-	// is an open proxy. This handler should never be used in cases where the proxy
-	// server is publicly available.
-	HostHandler = &hostHandler{}
-)
-
-// Serve runs a SOCKS5 proxy server for a given Handler at a given listener.
-//
-// When the given context is canceled, the server will stop and the listener
-// will be closed. All pending connections will also be canceled and their
-// sockets closed.
-func Serve(ctx context.Context, handler Handler, lis net.Listener) error {
-	go func() {
-		<-ctx.Done()
-		lis.Close()
-	}()
-
-	for {
-		con, err := lis.Accept()
-		if err != nil {
-			// Context cancellation will close listener socket with a generic 'use of closed
-			// network connection' error, translate that back to context error.
-			if ctx.Err() != nil {
-				return ctx.Err()
-			}
-			return err
-		}
-		go handle(ctx, handler, con)
-	}
-}
-
-// handle runs in a goroutine per incoming SOCKS connection. Its lifecycle
-// corresponds to the lifecycle of a running proxy connection.
-func handle(ctx context.Context, handler Handler, con net.Conn) {
-	// ctxR is a per-request context, and will be canceled whenever the handler
-	// exits or the server is stopped.
-	ctxR, ctxRC := context.WithCancel(ctx)
-	defer ctxRC()
-
-	go func() {
-		<-ctxR.Done()
-		con.Close()
-	}()
-
-	// Perform method negotiation with the client.
-	if err := negotiateMethod(con); err != nil {
-		return
-	}
-
-	// Read request from the client and translate problems into early error replies.
-	req, err := readRequest(con)
-	switch {
-	case errors.Is(err, errNotConnect):
-		writeReply(con, ReplyCommandNotSupported, net.IPv4(0, 0, 0, 0), 0)
-		return
-	case errors.Is(err, errUnsupportedAddressType):
-		writeReply(con, ReplyAddressTypeNotSupported, net.IPv4(0, 0, 0, 0), 0)
-		return
-	case err == nil:
-	default:
-		writeReply(con, ReplyGeneralFailure, net.IPv4(0, 0, 0, 0), 0)
-		return
-	}
-
-	// Ask handler.Connect for a backend.
-	conRes := handler.Connect(ctxR, &ConnectRequest{
-		Address: req.address,
-		Port:    req.port,
-	})
-	// Handle programming error when returned value is nil.
-	if conRes == nil {
-		writeReply(con, ReplyGeneralFailure, net.IPv4(0, 0, 0, 0), 0)
-		return
-	}
-	// Handle returned errors.
-	if conRes.Error != ReplySucceeded {
-		writeReply(con, conRes.Error, net.IPv4(0, 0, 0, 0), 0)
-		return
-	}
-
-	// Ensure Bound.* fields are set.
-	if conRes.Backend == nil || conRes.LocalAddress == nil || conRes.LocalPort == 0 {
-		writeReply(con, ReplyGeneralFailure, net.IPv4(0, 0, 0, 0), 0)
-		return
-	}
-	// Send reply.
-	if err := writeReply(con, ReplySucceeded, conRes.LocalAddress, conRes.LocalPort); err != nil {
-		return
-	}
-
-	// Pipe returned backend into connection.
-	go func() {
-		io.Copy(conRes.Backend, con)
-		conRes.Backend.Close()
-	}()
-	io.Copy(con, conRes.Backend)
-	conRes.Backend.Close()
-}
diff --git a/metropolis/pkg/socksproxy/socksproxy_test.go b/metropolis/pkg/socksproxy/socksproxy_test.go
deleted file mode 100644
index 0132ab3..0000000
--- a/metropolis/pkg/socksproxy/socksproxy_test.go
+++ /dev/null
@@ -1,174 +0,0 @@
-package socksproxy
-
-import (
-	"context"
-	"errors"
-	"fmt"
-	"io"
-	"net"
-	"net/http"
-	"os"
-	"sync/atomic"
-	"testing"
-
-	"golang.org/x/net/proxy"
-)
-
-// TestE2E implements a happy path test by chaining together an HTTP server, a
-// proxy server, a proxy client (from golang.org/x/net) and an HTTP client into
-// an end-to-end test. It uses HostHandler and the actual host network stack for
-// the test HTTP server and test proxy server.
-func TestE2E(t *testing.T) {
-	ctx, ctxC := context.WithCancel(context.Background())
-	defer ctxC()
-
-	// Start test HTTP server.
-	lisSrv, err := net.Listen("tcp", "127.0.0.1:0")
-	if err != nil {
-		t.Fatalf("could not bind http listener: %v", err)
-	}
-
-	mux := http.NewServeMux()
-	mux.HandleFunc("/", func(rw http.ResponseWriter, req *http.Request) {
-		fmt.Fprintf(rw, "foo")
-	})
-	go func() {
-		err := http.Serve(lisSrv, mux)
-		if err != nil {
-			t.Errorf("http.Serve: %v", err)
-			return
-		}
-	}()
-
-	// Start proxy server.
-	lisPrx, err := net.Listen("tcp", ":")
-	if err != nil {
-		t.Fatalf("could not bind proxy listener: %v", err)
-	}
-	go func() {
-		err := Serve(ctx, HostHandler, lisPrx)
-		if err != nil && !errors.Is(err, ctx.Err()) {
-			t.Errorf("proxy.Serve: %v", err)
-			return
-		}
-	}()
-
-	// Start proxy client.
-	dialer, err := proxy.SOCKS5("tcp", lisPrx.Addr().String(), nil, proxy.Direct)
-	if err != nil {
-		t.Fatalf("creating SOCKS dialer failed: %v", err)
-	}
-
-	// Create http client.
-	tr := &http.Transport{
-		Dial: dialer.Dial,
-	}
-	cl := &http.Client{
-		Transport: tr,
-	}
-
-	// Perform request and expect 'foo' in response.
-	url := fmt.Sprintf("http://%s/", lisSrv.Addr().String())
-	req, err := http.NewRequest("GET", url, nil)
-	if err != nil {
-		t.Fatalf("creating test request failed: %v", err)
-	}
-	res, err := cl.Do(req)
-	if err != nil {
-		t.Fatalf("test http request failed: %v", err)
-	}
-	defer res.Body.Close()
-	body, _ := io.ReadAll(res.Body)
-	if want, got := "foo", string(body); want != got {
-		t.Errorf("wrong response from HTTP, wanted %q, got %q", want, got)
-	}
-}
-
-// testHandler is a handler which serves /dev/zero and keeps count of the
-// current number of live connections. It's used in TestCancellation to ensure
-// contexts are canceled appropriately.
-type testHandler struct {
-	live int64
-}
-
-func (t *testHandler) Connect(ctx context.Context, req *ConnectRequest) *ConnectResponse {
-	f, _ := os.Open("/dev/zero")
-
-	atomic.AddInt64(&t.live, 1)
-	go func() {
-		<-ctx.Done()
-		atomic.AddInt64(&t.live, -1)
-		f.Close()
-	}()
-
-	return &ConnectResponse{
-		Backend:      f,
-		LocalAddress: net.ParseIP("127.0.0.1"),
-		LocalPort:    42123,
-	}
-}
-
-// TestCancellation ensures request contexts are canceled correctly - when an
-// incoming connection is closed and when the entire server is stopped.
-func TestCancellation(t *testing.T) {
-	handler := &testHandler{}
-
-	ctx, ctxC := context.WithCancel(context.Background())
-	defer ctxC()
-
-	// Start proxy server.
-	lisPrx, err := net.Listen("tcp", ":")
-	if err != nil {
-		t.Fatalf("could not bind proxy listener: %v", err)
-	}
-	go func() {
-		err := Serve(ctx, handler, lisPrx)
-		if err != nil && !errors.Is(err, ctx.Err()) {
-			t.Errorf("proxy.Serve: %v", err)
-			return
-		}
-	}()
-
-	// Start proxy client.
-	dialer, err := proxy.SOCKS5("tcp", lisPrx.Addr().String(), nil, proxy.Direct)
-	if err != nil {
-		t.Fatalf("creating SOCKS dialer failed: %v", err)
-	}
-
-	// Open two connections.
-	con1, err := dialer.Dial("tcp", "192.2.0.10:1234")
-	if err != nil {
-		t.Fatalf("Dialing first client failed: %v", err)
-	}
-	con2, err := dialer.Dial("tcp", "192.2.0.10:1234")
-	if err != nil {
-		t.Fatalf("Dialing first client failed: %v", err)
-	}
-
-	// Read some data. This makes sure we're ready to check for the liveness of
-	// currently running connections.
-	io.ReadFull(con1, make([]byte, 3))
-	io.ReadFull(con2, make([]byte, 3))
-
-	// Ensure we have two connections.
-	if want, got := int64(2), atomic.LoadInt64(&handler.live); want != got {
-		t.Errorf("wanted %d connections at first, got %d", want, got)
-	}
-
-	// Close one connection. Wait for its context to be canceled.
-	con2.Close()
-	for {
-		if atomic.LoadInt64(&handler.live) == 1 {
-			break
-		}
-	}
-
-	// Cancel the entire server context. Wait for the other connection's context to
-	// be canceled as well.
-	ctxC()
-	for {
-		if atomic.LoadInt64(&handler.live) == 0 {
-			break
-		}
-	}
-}
diff --git a/metropolis/pkg/supervisor/BUILD.bazel b/metropolis/pkg/supervisor/BUILD.bazel
deleted file mode 100644
index e6e3a22..0000000
--- a/metropolis/pkg/supervisor/BUILD.bazel
+++ /dev/null
@@ -1,29 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
-
-go_library(
-    name = "supervisor",
-    srcs = [
-        "supervisor.go",
-        "supervisor_node.go",
-        "supervisor_processor.go",
-        "supervisor_support.go",
-        "supervisor_testhelpers.go",
-    ],
-    importpath = "source.monogon.dev/metropolis/pkg/supervisor",
-    # TODO(#189): move supervisor to //go
-    visibility = ["//visibility:public"],
-    deps = [
-        "//metropolis/pkg/logtree",
-        "@com_github_cenkalti_backoff_v4//:backoff",
-        "@org_golang_google_grpc//:go_default_library",
-    ],
-)
-
-go_test(
-    name = "supervisor_test",
-    srcs = ["supervisor_test.go"],
-    embed = [":supervisor"],
-    # TODO: https://github.com/monogon-dev/monogon/issues/131
-    flaky = True,
-    deps = ["//metropolis/pkg/logtree"],
-)
diff --git a/metropolis/pkg/supervisor/supervisor.go b/metropolis/pkg/supervisor/supervisor.go
deleted file mode 100644
index 2af5e67..0000000
--- a/metropolis/pkg/supervisor/supervisor.go
+++ /dev/null
@@ -1,198 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package supervisor
-
-// The service supervision library allows for writing of reliable,
-// service-style software within a Metropolis node.  It builds upon the
-// Erlang/OTP supervision tree system, adapted to be more Go-ish.  For detailed
-// design see go/supervision.
-
-import (
-	"context"
-	"fmt"
-	"io"
-	"sync"
-
-	"source.monogon.dev/metropolis/pkg/logtree"
-)
-
-// A Runnable is a function that will be run in a goroutine, and supervised
-// throughout its lifetime. It can in turn start more runnables as its
-// children, and those will form part of a supervision tree.
-// The context passed to a runnable is very important and needs to be handled
-// properly. It will be live (non-errored) as long as the runnable should be
-// running, and canceled (ctx.Err() will be non-nil) when the supervisor wants
-// it to exit. This means this context is also perfectly usable for performing
-// any blocking operations.
-type Runnable func(ctx context.Context) error
-
-// RunGroup starts a set of runnables as a group. These runnables will run
-// together, and if any one of them quits unexpectedly, the result will be
-// canceled and restarted.
-// The context here must be an existing Runnable context, and the spawned
-// runnables will run under the node that this context represents.
-func RunGroup(ctx context.Context, runnables map[string]Runnable) error {
-	node, unlock := fromContext(ctx)
-	defer unlock()
-	return node.runGroup(runnables)
-}
-
-// Run starts a single runnable in its own group.
-func Run(ctx context.Context, name string, runnable Runnable) error {
-	return RunGroup(ctx, map[string]Runnable{
-		name: runnable,
-	})
-}
-
-// Signal tells the supervisor that the calling runnable has reached a certain
-// state of its lifecycle. All runnables should SignalHealthy when they are
-// ready with set up, running other child runnables and are now 'serving'.
-func Signal(ctx context.Context, signal SignalType) {
-	node, unlock := fromContext(ctx)
-	defer unlock()
-	node.signal(signal)
-}
-
-type SignalType int
-
-const (
-	// The runnable is healthy, done with setup, done with spawning more
-	// Runnables, and ready to serve in a loop.  The runnable needs to check
-	// the parent context and ensure that if that context is done, the runnable
-	// exits.
-	SignalHealthy SignalType = iota
-	// The runnable is done - it does not need to run any loop. This is useful
-	// for Runnables that only set up other child runnables. This runnable will
-	// be restarted if a related failure happens somewhere in the supervision
-	// tree.
-	SignalDone
-)
-
-// supervisor represents and instance of the supervision system. It keeps track
-// of a supervision tree and a request channel to its internal processor
-// goroutine.
-type supervisor struct {
-	// mu guards the entire state of the supervisor.
-	mu sync.RWMutex
-	// root is the root node of the supervision tree, named 'root'. It
-	// represents the Runnable started with the supervisor.New call.
-	root *node
-	// logtree is the main logtree exposed to runnables and used internally.
-	logtree *logtree.LogTree
-	// ilogger is the internal logger logging to "supervisor" in the logtree.
-	ilogger logtree.LeveledLogger
-
-	// pReq is an interface channel to the lifecycle processor of the
-	// supervisor.
-	pReq chan *processorRequest
-
-	// propagate panics, ie. don't catch them.
-	propagatePanic bool
-}
-
-// SupervisorOpt are runtime configurable options for the supervisor.
-type SupervisorOpt func(s *supervisor)
-
-// WithPropagatePanic prevents the Supervisor from catching panics in
-// runnables and treating them as failures. This is useful to enable for
-// testing and local debugging.
-func WithPropagatePanic(s *supervisor) {
-	s.propagatePanic = true
-}
-
-func WithExistingLogtree(lt *logtree.LogTree) SupervisorOpt {
-	return func(s *supervisor) {
-		s.logtree = lt
-	}
-}
-
-// New creates a new supervisor with its root running the given root runnable.
-// The given context can be used to cancel the entire supervision tree.
-//
-// For tests, we reccomend using TestHarness instead, which will also stream
-// logs to stderr and take care of propagating root runnable errors to the test
-// output.
-func New(ctx context.Context, rootRunnable Runnable, opts ...SupervisorOpt) *supervisor {
-	sup := &supervisor{
-		logtree: logtree.New(),
-		pReq:    make(chan *processorRequest),
-	}
-
-	for _, o := range opts {
-		o(sup)
-	}
-
-	sup.ilogger = sup.logtree.MustLeveledFor("supervisor")
-	sup.root = newNode("root", rootRunnable, sup, nil)
-
-	go sup.processor(ctx)
-
-	sup.pReq <- &processorRequest{
-		schedule: &processorRequestSchedule{dn: "root"},
-	}
-
-	return sup
-}
-
-func Logger(ctx context.Context) logtree.LeveledLogger {
-	node, unlock := fromContext(ctx)
-	defer unlock()
-	return node.sup.logtree.MustLeveledFor(logtree.DN(node.dn()))
-}
-
-func RawLogger(ctx context.Context) io.Writer {
-	node, unlock := fromContext(ctx)
-	defer unlock()
-	return node.sup.logtree.MustRawFor(logtree.DN(node.dn()))
-}
-
-// SubLogger returns a LeveledLogger for a given name. The name is used to
-// placed that logger within the logtree hierarchy. For example, if the
-// runnable `root.foo` requests a SubLogger for name `bar`, the returned logger
-// will log to `root.foo.bar` in the logging tree.
-//
-// An error is returned if the given name is invalid or conflicts with a child
-// runnable of the current runnable. In addition, whenever a node uses a
-// sub-logger with a given name, that name also becomes unavailable for use as
-// a child runnable (no runnable and sub-logger may ever log into the same
-// logtree DN).
-func SubLogger(ctx context.Context, name string) (logtree.LeveledLogger, error) {
-	node, unlock := fromContext(ctx)
-	defer unlock()
-
-	if _, ok := node.children[name]; ok {
-		return nil, fmt.Errorf("name %q already in use by child runnable", name)
-	}
-	if !reNodeName.MatchString(name) {
-		return nil, fmt.Errorf("sub-logger name %q is invalid", name)
-	}
-	node.reserved[name] = true
-
-	dn := fmt.Sprintf("%s.%s", node.dn(), name)
-	return node.sup.logtree.LeveledFor(logtree.DN(dn))
-}
-
-// MustSubLogger is a wrapper around SubLogger which panics on error. Errors
-// should only happen due to invalid names, so as long as the given name is
-// compile-time constant and valid, this function is safe to use.
-func MustSubLogger(ctx context.Context, name string) logtree.LeveledLogger {
-	l, err := SubLogger(ctx, name)
-	if err != nil {
-		panic(err)
-	}
-	return l
-}
diff --git a/metropolis/pkg/supervisor/supervisor_node.go b/metropolis/pkg/supervisor/supervisor_node.go
deleted file mode 100644
index 76d656c..0000000
--- a/metropolis/pkg/supervisor/supervisor_node.go
+++ /dev/null
@@ -1,303 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package supervisor
-
-import (
-	"context"
-	"fmt"
-	"regexp"
-	"strings"
-
-	"github.com/cenkalti/backoff/v4"
-)
-
-// node is a supervision tree node. It represents the state of a Runnable
-// within this tree, its relation to other tree elements, and contains
-// supporting data needed to actually supervise it.
-type node struct {
-	// The name of this node. Opaque string. It's used to make up the 'dn'
-	// (distinguished name) of a node within the tree. When starting a runnable
-	// inside a tree, this is where that name gets used.
-	name     string
-	runnable Runnable
-
-	// The supervisor managing this tree.
-	sup *supervisor
-	// The parent, within the tree, of this node. If this is the root node of
-	// the tree, this is nil.
-	parent *node
-	// Children of this tree. This is represented by a map keyed from child
-	// node names, for easy access.
-	children map[string]*node
-	// Reserved nodes that may not be used as child names. This is currently
-	// used by sub-loggers (see SubLogger function), preventing a sub-logger
-	// name from colliding with a node name.
-	reserved map[string]bool
-	// Supervision groups. Each group is a set of names of children. Sets, and
-	// as such groups, don't overlap between each other. A supervision group
-	// indicates that if any child within that group fails, all others should
-	// be canceled and restarted together.
-	groups []map[string]bool
-
-	// The current state of the runnable in this node.
-	state nodeState
-
-	// Backoff used to keep runnables from being restarted too fast.
-	bo *backoff.ExponentialBackOff
-
-	// Context passed to the runnable, and its cancel function.
-	ctx  context.Context
-	ctxC context.CancelFunc
-}
-
-// nodeState is the state of a runnable within a node, and in a way the node
-// itself. This follows the state diagram from go/supervision.
-type nodeState int
-
-const (
-	// A node that has just been created, and whose runnable has been started
-	// already but hasn't signaled anything yet.
-	nodeStateNew nodeState = iota
-	// A node whose runnable has signaled being healthy - this means it's ready
-	// to serve/act.
-	nodeStateHealthy
-	// A node that has unexpectedly returned or panicked.
-	nodeStateDead
-	// A node that has declared that its done with its work and should not be
-	// restarted, unless a supervision tree failure requires that.
-	nodeStateDone
-	// A node that has returned after being requested to cancel.
-	nodeStateCanceled
-)
-
-func (s nodeState) String() string {
-	switch s {
-	case nodeStateNew:
-		return "NODE_STATE_NEW"
-	case nodeStateHealthy:
-		return "NODE_STATE_HEALTHY"
-	case nodeStateDead:
-		return "NODE_STATE_DEAD"
-	case nodeStateDone:
-		return "NODE_STATE_DONE"
-	case nodeStateCanceled:
-		return "NODE_STATE_CANCELED"
-	}
-	return "UNKNOWN"
-}
-
-func (n *node) String() string {
-	return fmt.Sprintf("%s (%s)", n.dn(), n.state.String())
-}
-
-// contextKey is a type used to keep data within context values.
-type contextKey string
-
-var (
-	supervisorKey = contextKey("supervisor")
-	dnKey         = contextKey("dn")
-)
-
-// fromContext retrieves a tree node from a runnable context. It takes a lock
-// on the tree and returns an unlock function. This unlock function needs to be
-// called once mutations on the tree/supervisor/node are done.
-func fromContext(ctx context.Context) (*node, func()) {
-	sup, ok := ctx.Value(supervisorKey).(*supervisor)
-	if !ok {
-		panic("supervisor function called from non-runnable context")
-	}
-
-	sup.mu.Lock()
-
-	dnParent, ok := ctx.Value(dnKey).(string)
-	if !ok {
-		sup.mu.Unlock()
-		panic("supervisor function called from non-runnable context")
-	}
-
-	return sup.nodeByDN(dnParent), sup.mu.Unlock
-}
-
-// All the following 'internal' supervisor functions must only be called with
-// the supervisor lock taken. Getting a lock via fromContext is enough.
-
-// dn returns the distinguished name of a node. This distinguished name is a
-// period-separated, inverse-DNS-like name.  For instance, the runnable 'foo'
-// within the runnable 'bar' will be called 'root.bar.foo'. The root of the
-// tree is always named, and has the dn, 'root'.
-func (n *node) dn() string {
-	if n.parent != nil {
-		return fmt.Sprintf("%s.%s", n.parent.dn(), n.name)
-	}
-	return n.name
-}
-
-// groupSiblings is a helper function to get all runnable group siblings of a
-// given runnable name within this node.  All children are always in a group,
-// even if that group is unary.
-func (n *node) groupSiblings(name string) map[string]bool {
-	for _, m := range n.groups {
-		if _, ok := m[name]; ok {
-			return m
-		}
-	}
-	return nil
-}
-
-// newNode creates a new node with a given parent. It does not register it with
-// the parent (as that depends on group placement).
-func newNode(name string, runnable Runnable, sup *supervisor, parent *node) *node {
-	// We use exponential backoff for failed runnables, but at some point we
-	// cap at a given backoff time. To achieve this, we set MaxElapsedTime to
-	// 0, which will cap the backoff at MaxInterval.
-	bo := backoff.NewExponentialBackOff()
-	bo.MaxElapsedTime = 0
-
-	n := &node{
-		name:     name,
-		runnable: runnable,
-
-		bo: bo,
-
-		sup:    sup,
-		parent: parent,
-	}
-	n.reset()
-	return n
-}
-
-// resetNode sets up all the dynamic fields of the node, in preparation of
-// starting a runnable. It clears the node's children, groups and resets its
-// context.
-func (n *node) reset() {
-	// Make new context. First, acquire parent context. For the root node
-	// that's Background, otherwise it's the parent's context.
-	var pCtx context.Context
-	if n.parent == nil {
-		pCtx = context.Background()
-	} else {
-		pCtx = n.parent.ctx
-	}
-	// Mark DN and supervisor in context.
-	ctx := context.WithValue(pCtx, dnKey, n.dn())
-	ctx = context.WithValue(ctx, supervisorKey, n.sup)
-	ctx, ctxC := context.WithCancel(ctx)
-	// Set context
-	n.ctx = ctx
-	n.ctxC = ctxC
-
-	// Clear children and state
-	n.state = nodeStateNew
-	n.children = make(map[string]*node)
-	n.reserved = make(map[string]bool)
-	n.groups = nil
-
-	// The node is now ready to be scheduled.
-}
-
-// nodeByDN returns a node by given DN from the supervisor.
-func (s *supervisor) nodeByDN(dn string) *node {
-	parts := strings.Split(dn, ".")
-	if parts[0] != "root" {
-		panic("DN does not start with root.")
-	}
-	parts = parts[1:]
-	cur := s.root
-	for {
-		if len(parts) == 0 {
-			return cur
-		}
-
-		next, ok := cur.children[parts[0]]
-		if !ok {
-			panic(fmt.Errorf("could not find %v (%s) in %s", parts, dn, cur))
-		}
-		cur = next
-		parts = parts[1:]
-	}
-}
-
-// reNodeName validates a node name against constraints.
-var reNodeName = regexp.MustCompile(`[a-z90-9_]{1,64}`)
-
-// runGroup schedules a new group of runnables to run on a node.
-func (n *node) runGroup(runnables map[string]Runnable) error {
-	// Check that the parent node is in the right state.
-	if n.state != nodeStateNew {
-		return fmt.Errorf("cannot run new runnable on non-NEW node")
-	}
-
-	// Check the requested runnable names.
-	for name := range runnables {
-		if !reNodeName.MatchString(name) {
-			return fmt.Errorf("runnable name %q is invalid", name)
-		}
-		if _, ok := n.children[name]; ok {
-			return fmt.Errorf("runnable %q already exists", name)
-		}
-		if _, ok := n.reserved[name]; ok {
-			return fmt.Errorf("runnable %q would shadow reserved name (eg. sub-logger)", name)
-		}
-	}
-
-	// Create child nodes.
-	dns := make(map[string]string)
-	group := make(map[string]bool)
-	for name, runnable := range runnables {
-		if g := n.groupSiblings(name); g != nil {
-			return fmt.Errorf("duplicate child name %q", name)
-		}
-		node := newNode(name, runnable, n.sup, n)
-		n.children[name] = node
-
-		dns[name] = node.dn()
-		group[name] = true
-	}
-	// Add group.
-	n.groups = append(n.groups, group)
-
-	// Schedule execution of group members.
-	go func() {
-		for name := range runnables {
-			n.sup.pReq <- &processorRequest{
-				schedule: &processorRequestSchedule{
-					dn: dns[name],
-				},
-			}
-		}
-	}()
-	return nil
-}
-
-// signal sequences state changes by signals received from runnables and
-// updates a node's status accordingly.
-func (n *node) signal(signal SignalType) {
-	switch signal {
-	case SignalHealthy:
-		if n.state != nodeStateNew {
-			panic(fmt.Errorf("node %s signaled healthy", n))
-		}
-		n.state = nodeStateHealthy
-		n.bo.Reset()
-	case SignalDone:
-		if n.state != nodeStateHealthy {
-			panic(fmt.Errorf("node %s signaled done", n))
-		}
-		n.state = nodeStateDone
-		n.bo.Reset()
-	}
-}
diff --git a/metropolis/pkg/supervisor/supervisor_processor.go b/metropolis/pkg/supervisor/supervisor_processor.go
deleted file mode 100644
index 46cd1aa..0000000
--- a/metropolis/pkg/supervisor/supervisor_processor.go
+++ /dev/null
@@ -1,508 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package supervisor
-
-import (
-	"context"
-	"errors"
-	"fmt"
-	"runtime/debug"
-	"sort"
-	"time"
-)
-
-// The processor maintains runnable goroutines - ie., when requested will start
-// one, and then once it exists it will record the result and act accordingly.
-// It is also responsible for detecting and acting upon supervision subtrees
-// that need to be restarted after death (via a 'GC' process)
-
-// processorRequest is a request for the processor. Only one of the fields can
-// be set.
-type processorRequest struct {
-	schedule    *processorRequestSchedule
-	died        *processorRequestDied
-	waitSettled *processorRequestWaitSettled
-}
-
-// processorRequestSchedule requests that a given node's runnable be started.
-type processorRequestSchedule struct {
-	dn string
-}
-
-// processorRequestDied is a signal from a runnable goroutine that the runnable
-// has died.
-type processorRequestDied struct {
-	dn  string
-	err error
-}
-
-type processorRequestWaitSettled struct {
-	waiter chan struct{}
-}
-
-// processor is the main processing loop.
-func (s *supervisor) processor(ctx context.Context) {
-	s.ilogger.Info("supervisor processor started")
-
-	// Waiters waiting for the GC to be settled.
-	var waiters []chan struct{}
-
-	// The GC will run every millisecond if needed. Any time the processor
-	// requests a change in the supervision tree (ie a death or a new runnable)
-	// it will mark the state as dirty and run the GC on the next millisecond
-	// cycle.
-	gc := time.NewTicker(1 * time.Millisecond)
-	defer gc.Stop()
-	clean := true
-
-	// How long has the GC been clean. This is used to notify 'settled' waiters.
-	cleanCycles := 0
-
-	markDirty := func() {
-		clean = false
-		cleanCycles = 0
-	}
-
-	for {
-		select {
-		case <-ctx.Done():
-			s.ilogger.Infof("supervisor processor exiting: %v", ctx.Err())
-			s.processKill()
-			s.ilogger.Info("supervisor exited, starting liquidator to clean up remaining runnables...")
-			go s.liquidator()
-			return
-		case <-gc.C:
-			if !clean {
-				s.processGC()
-			}
-			clean = true
-			cleanCycles += 1
-
-			// This threshold is somewhat arbitrary. It's a balance between
-			// test speed and test reliability.
-			if cleanCycles > 50 {
-				for _, w := range waiters {
-					close(w)
-				}
-				waiters = nil
-			}
-		case r := <-s.pReq:
-			switch {
-			case r.schedule != nil:
-				s.processSchedule(r.schedule)
-				markDirty()
-			case r.died != nil:
-				s.processDied(r.died)
-				markDirty()
-			case r.waitSettled != nil:
-				waiters = append(waiters, r.waitSettled.waiter)
-			default:
-				panic(fmt.Errorf("unhandled request %+v", r))
-			}
-		}
-	}
-}
-
-// The liquidator is a context-free goroutine which the supervisor starts after
-// its context has been canceled. Its job is to take over listening on the
-// processing channels that the supervisor processor would usually listen on,
-// and implement the minimum amount of logic required to mark existing runnables
-// as DEAD.
-//
-// It exits when all runnables have exited one way or another, and the
-// supervision tree is well and truly dead. This will also be reflected by
-// liveRunnables returning an empty list.
-func (s *supervisor) liquidator() {
-	for {
-		r := <-s.pReq
-		switch {
-		case r.schedule != nil:
-			s.ilogger.Infof("liquidator: refusing to schedule %s", r.schedule.dn)
-			s.mu.Lock()
-			n := s.nodeByDN(r.schedule.dn)
-			n.state = nodeStateDead
-			s.mu.Unlock()
-		case r.died != nil:
-			s.ilogger.Infof("liquidator: %s exited", r.died.dn)
-			s.mu.Lock()
-			n := s.nodeByDN(r.died.dn)
-			n.state = nodeStateDead
-			s.mu.Unlock()
-		}
-		live := s.liveRunnables()
-		if len(live) == 0 {
-			s.ilogger.Infof("liquidator: complete, all runnables dead or done")
-			return
-		}
-	}
-}
-
-// liveRunnables returns a list of runnable DNs that aren't DONE/DEAD. This is
-// used by the liquidator to figure out when its job is done, and by the
-// TestHarness to know when to unblock the test cleanup function.
-func (s *supervisor) liveRunnables() []string {
-	s.mu.RLock()
-	defer s.mu.RUnlock()
-
-	// DFS through supervision tree, making not of live (non-DONE/DEAD runnables).
-	var live []string
-	seen := make(map[string]bool)
-	q := []*node{s.root}
-	for {
-		if len(q) == 0 {
-			break
-		}
-
-		// Pop from DFS queue.
-		el := q[0]
-		q = q[1:]
-
-		// Skip already visited runnables (this shouldn't happen because the supervision
-		// tree is, well, a tree - but better stay safe than get stuck in a loop).
-		eldn := el.dn()
-		if seen[eldn] {
-			continue
-		}
-		seen[eldn] = true
-
-		if el.state != nodeStateDead && el.state != nodeStateDone {
-			live = append(live, eldn)
-		}
-
-		// Recurse.
-		for _, child := range el.children {
-			q = append(q, child)
-		}
-	}
-
-	sort.Strings(live)
-	return live
-}
-
-// processKill cancels all nodes in the supervision tree. This is only called
-// right before exiting the processor, so they do not get automatically
-// restarted.
-func (s *supervisor) processKill() {
-	s.mu.Lock()
-	defer s.mu.Unlock()
-
-	// Gather all context cancel functions.
-	var cancels []func()
-	queue := []*node{s.root}
-	for {
-		if len(queue) == 0 {
-			break
-		}
-
-		cur := queue[0]
-		queue = queue[1:]
-
-		cancels = append(cancels, cur.ctxC)
-		for _, c := range cur.children {
-			queue = append(queue, c)
-		}
-	}
-
-	// Call all context cancels.
-	for _, c := range cancels {
-		c()
-	}
-}
-
-// processSchedule starts a node's runnable in a goroutine and records its
-// output once it's done.
-func (s *supervisor) processSchedule(r *processorRequestSchedule) {
-	s.mu.Lock()
-	defer s.mu.Unlock()
-
-	n := s.nodeByDN(r.dn)
-	go func() {
-		if !s.propagatePanic {
-			defer func() {
-				if rec := recover(); rec != nil {
-					s.pReq <- &processorRequest{
-						died: &processorRequestDied{
-							dn:  r.dn,
-							err: fmt.Errorf("panic: %v, stacktrace: %s", rec, string(debug.Stack())),
-						},
-					}
-				}
-			}()
-		}
-
-		res := n.runnable(n.ctx)
-
-		s.pReq <- &processorRequest{
-			died: &processorRequestDied{
-				dn:  r.dn,
-				err: res,
-			},
-		}
-	}()
-}
-
-// processDied records the result from a runnable goroutine, and updates its
-// node state accordingly. If the result is a death and not an expected exit,
-// related nodes (ie. children and group siblings) are canceled accordingly.
-func (s *supervisor) processDied(r *processorRequestDied) {
-	s.mu.Lock()
-	defer s.mu.Unlock()
-
-	// Okay, so a Runnable has quit. What now?
-	n := s.nodeByDN(r.dn)
-	ctx := n.ctx
-
-	// Simple case: it was marked as Done and quit with no error.
-	if n.state == nodeStateDone && r.err == nil {
-		// Do nothing. This was supposed to happen. Keep the process as DONE.
-		return
-	}
-
-	// Simple case: the context was canceled and the returned error is the
-	// context error.
-	if r.err != nil && ctx.Err() != nil && errors.Is(r.err, ctx.Err()) {
-		// Mark the node as canceled successfully.
-		n.state = nodeStateCanceled
-		return
-	}
-
-	// Otherwise, the Runnable should not have died or quit. Handle
-	// accordingly.
-	err := r.err
-	// A lack of returned error is also an error.
-	if err == nil {
-		err = fmt.Errorf("returned nil when %s", n.state)
-	}
-
-	s.ilogger.Errorf("%s: %v", n.dn(), err)
-	// Mark as dead.
-	n.state = nodeStateDead
-
-	// Cancel that node's context, just in case something still depends on it.
-	n.ctxC()
-
-	// Cancel all siblings.
-	if n.parent != nil {
-		for name := range n.parent.groupSiblings(n.name) {
-			if name == n.name {
-				continue
-			}
-			sibling := n.parent.children[name]
-			// TODO(q3k): does this need to run in a goroutine, ie. can a
-			// context cancel block?
-			sibling.ctxC()
-		}
-	}
-}
-
-// processGC runs the GC process. It's not really Garbage Collection, as in, it
-// doesn't remove unnecessary tree nodes - but it does find nodes that need to
-// be restarted, find the subset that can and then schedules them for running.
-// As such, it's less of a Garbage Collector and more of a Necromancer.
-// However, GC is a friendlier name.
-func (s *supervisor) processGC() {
-	s.mu.Lock()
-	defer s.mu.Unlock()
-
-	// The 'GC' serves is the main business logic of the supervision tree. It
-	// traverses a locked tree and tries to find subtrees that must be
-	// restarted (because of a DEAD/CANCELED runnable). It then finds which of
-	// these subtrees that should be restarted can be restarted, ie. which ones
-	// are fully recursively DEAD/CANCELED. It also finds the smallest set of
-	// largest subtrees that can be restarted, ie. if there's multiple DEAD
-	// runnables that can be restarted at once, it will do so.
-
-	// Phase one: Find all leaves.
-	// This is a simple DFS that finds all the leaves of the tree, ie all nodes
-	// that do not have children nodes.
-	leaves := make(map[string]bool)
-	queue := []*node{s.root}
-	for {
-		if len(queue) == 0 {
-			break
-		}
-		cur := queue[0]
-		queue = queue[1:]
-
-		for _, c := range cur.children {
-			queue = append([]*node{c}, queue...)
-		}
-
-		if len(cur.children) == 0 {
-			leaves[cur.dn()] = true
-		}
-	}
-
-	// Phase two: traverse tree from node to root and make note of all subtrees
-	// that can be restarted.
-	// A subtree is restartable/ready iff every node in that subtree is either
-	// CANCELED, DEAD or DONE.  Such a 'ready' subtree can be restarted by the
-	// supervisor if needed.
-
-	// DNs that we already visited.
-	visited := make(map[string]bool)
-	// DNs whose subtrees are ready to be restarted.
-	// These are all subtrees recursively - ie., root.a.a and root.a will both
-	// be marked here.
-	ready := make(map[string]bool)
-
-	// We build a queue of nodes to visit, starting from the leaves.
-	queue = []*node{}
-	for l := range leaves {
-		queue = append(queue, s.nodeByDN(l))
-	}
-
-	for {
-		if len(queue) == 0 {
-			break
-		}
-
-		cur := queue[0]
-		curDn := cur.dn()
-
-		queue = queue[1:]
-
-		// Do we have a decision about our children?
-		allVisited := true
-		for _, c := range cur.children {
-			if !visited[c.dn()] {
-				allVisited = false
-				break
-			}
-		}
-
-		// If no decision about children is available, it means we ended up in
-		// this subtree through some shorter path of a shorter/lower-order
-		// leaf. There is a path to a leaf that's longer than the one that
-		// caused this node to be enqueued. Easy solution: just push back the
-		// current element and retry later.
-		if !allVisited {
-			// Push back to queue and wait for a decision later.
-			queue = append(queue, cur)
-			continue
-		}
-
-		// All children have been visited and we have an idea about whether
-		// they're ready/restartable. All of the node's children must be
-		// restartable in order for this node to be restartable.
-		childrenReady := true
-		var childrenNotReady []string
-		for _, c := range cur.children {
-			if !ready[c.dn()] {
-				childrenNotReady = append(childrenNotReady, c.dn())
-				childrenReady = false
-				break
-			}
-		}
-
-		// In addition to children, the node itself must be restartable (ie.
-		// DONE, DEAD or CANCELED).
-		curReady := false
-		switch cur.state {
-		case nodeStateDone:
-			curReady = true
-		case nodeStateCanceled:
-			curReady = true
-		case nodeStateDead:
-			curReady = true
-		default:
-		}
-
-		if cur.state == nodeStateDead && !childrenReady {
-			s.ilogger.Warningf("Not restarting %s: children not ready to be restarted: %v", curDn, childrenNotReady)
-		}
-
-		// Note down that we have an opinion on this node, and note that
-		// opinion down.
-		visited[curDn] = true
-		ready[curDn] = childrenReady && curReady
-
-		// Now we can also enqueue the parent of this node for processing.
-		if cur.parent != nil && !visited[cur.parent.dn()] {
-			queue = append(queue, cur.parent)
-		}
-	}
-
-	// Phase 3: traverse tree from root to find largest subtrees that need to
-	// be restarted and are ready to be restarted.
-
-	// All DNs that need to be restarted by the GC process.
-	want := make(map[string]bool)
-	// All DNs that need to be restarted and can be restarted by the GC process
-	// - a subset of 'want' DNs.
-	can := make(map[string]bool)
-	// The set difference between 'want' and 'can' are all nodes that should be
-	// restarted but can't yet (ie. because a child is still in the process of
-	// being canceled).
-
-	// DFS from root.
-	queue = []*node{s.root}
-	for {
-		if len(queue) == 0 {
-			break
-		}
-
-		cur := queue[0]
-		queue = queue[1:]
-
-		// If this node is DEAD or CANCELED it should be restarted.
-		if cur.state == nodeStateDead || cur.state == nodeStateCanceled {
-			want[cur.dn()] = true
-		}
-
-		// If it should be restarted and is ready to be restarted...
-		if want[cur.dn()] && ready[cur.dn()] {
-			// And its parent context is valid (ie hasn't been canceled), mark
-			// it as restartable.
-			if cur.parent == nil || cur.parent.ctx.Err() == nil {
-				can[cur.dn()] = true
-				continue
-			}
-		}
-
-		// Otherwise, traverse further down the tree to see if something else
-		// needs to be done.
-		for _, c := range cur.children {
-			queue = append(queue, c)
-		}
-	}
-
-	// Reinitialize and reschedule all subtrees
-	for dn := range can {
-		n := s.nodeByDN(dn)
-
-		// Only back off when the node unexpectedly died - not when it got
-		// canceled.
-		bo := time.Duration(0)
-		if n.state == nodeStateDead {
-			bo = n.bo.NextBackOff()
-		}
-
-		// Prepare node for rescheduling - remove its children, reset its state
-		// to new.
-		n.reset()
-		s.ilogger.Infof("rescheduling supervised node %s with backoff %s", dn, bo.String())
-
-		// Reschedule node runnable to run after backoff.
-		go func(n *node, bo time.Duration) {
-			time.Sleep(bo)
-			s.pReq <- &processorRequest{
-				schedule: &processorRequestSchedule{dn: n.dn()},
-			}
-		}(n, bo)
-	}
-}
diff --git a/metropolis/pkg/supervisor/supervisor_support.go b/metropolis/pkg/supervisor/supervisor_support.go
deleted file mode 100644
index 8d836f2..0000000
--- a/metropolis/pkg/supervisor/supervisor_support.go
+++ /dev/null
@@ -1,165 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package supervisor
-
-// Supporting infrastructure to allow running some non-Go payloads under
-// supervision.
-
-import (
-	"context"
-	"errors"
-	"net"
-	"os"
-	"os/exec"
-
-	"google.golang.org/grpc"
-
-	"source.monogon.dev/metropolis/pkg/logtree"
-)
-
-// GRPCServer creates a Runnable that serves gRPC requests as longs as it's not
-// canceled.
-// If graceful is set to true, the server will be gracefully stopped instead of
-// plain stopped. This means all pending RPCs will finish, but also requires
-// streaming gRPC handlers to check their context liveliness and exit
-// accordingly.  If the server code does not support this, `graceful` should be
-// false and the server will be killed violently instead.
-func GRPCServer(srv *grpc.Server, lis net.Listener, graceful bool) Runnable {
-	return func(ctx context.Context) error {
-		Signal(ctx, SignalHealthy)
-		defer func() {
-			if graceful {
-				srv.GracefulStop()
-			} else {
-				srv.Stop()
-			}
-		}()
-		errC := make(chan error)
-		go func() {
-			errC <- srv.Serve(lis)
-		}()
-		select {
-		case <-ctx.Done():
-			return ctx.Err()
-		case err := <-errC:
-			return err
-		}
-	}
-}
-
-// RunCommand will create a Runnable that starts a long-running command, whose
-// exit is determined to be a failure.
-func RunCommand(ctx context.Context, cmd *exec.Cmd, opts ...RunCommandOption) error {
-	Signal(ctx, SignalHealthy)
-
-	var parseKLog bool
-	var signal <-chan os.Signal
-	for _, opt := range opts {
-		if opt.parseKlog {
-			parseKLog = true
-		}
-		if opt.signal != nil {
-			signal = opt.signal
-		}
-	}
-
-	if parseKLog {
-		// We make two klogs, one for each of stdout/stderr. This is to prevent
-		// accidental interleaving of both.
-		klogStdout := logtree.KLogParser(Logger(ctx))
-		defer klogStdout.Close()
-		klogStderr := logtree.KLogParser(Logger(ctx))
-		defer klogStderr.Close()
-
-		cmd.Stdout = klogStdout
-		cmd.Stderr = klogStderr
-	} else {
-		cmd.Stdout = RawLogger(ctx)
-		cmd.Stderr = RawLogger(ctx)
-	}
-	err := cmd.Start()
-	if err != nil {
-		return err
-	}
-
-	exited := make(chan struct{})
-	if signal != nil {
-		go func() {
-			for {
-				var err error
-				select {
-				case s := <-signal:
-					err = cmd.Process.Signal(s)
-				case <-exited:
-					return
-				}
-				if err != nil && !errors.Is(err, os.ErrProcessDone) {
-					Logger(ctx).Warningf("Failed sending signal to process: %v", err)
-				}
-			}
-		}()
-	}
-
-	err = cmd.Wait()
-	if signal != nil {
-		exited <- struct{}{}
-	}
-	Logger(ctx).Infof("Command returned: %v", err)
-	return err
-}
-
-type RunCommandOption struct {
-	parseKlog bool
-	signal    <-chan os.Signal
-}
-
-// ParseKLog signals that the command being run will return klog-compatible
-// logs to stdout and/or stderr, and these will be re-interpreted as structured
-// logging and emitted to the supervisor's logger.
-func ParseKLog() RunCommandOption {
-	return RunCommandOption{
-		parseKlog: true,
-	}
-}
-
-// SignalChan takes a channel which can be used to send signals to the
-// supervised process.
-//
-// The given channel will be read from as long as the underlying process is
-// running. If the process doesn't start successfully the channel will not be
-// read. When the process exits, the channel will stop being read.
-//
-// With the above in mind, and also taking into account the inherent lack of
-// reliability in delivering any process-handled signals in POSIX/Linux, it is
-// recommended to use unbuffered channels, always write to them in a non-blocking
-// fashion (eg. in a select { ... default: } block), and to not rely only on the
-// signal delivery mechanism for the intended behaviour.
-//
-// For example, if the signals are used to trigger some configuration reload,
-// these configuration reloads should either be verified and signal delivery should
-// be retried until confirmed successful, or there should be a backup periodic
-// reload performed by the target process independently of signal-based reload
-// triggers.
-//
-// Another example: if the signal delivered is a SIGTERM used to gracefully
-// terminate some process, it should be attempted to be delivered a number of
-// times before finally SIGKILLing the process.
-func SignalChan(s <-chan os.Signal) RunCommandOption {
-	return RunCommandOption{
-		signal: s,
-	}
-}
diff --git a/metropolis/pkg/supervisor/supervisor_test.go b/metropolis/pkg/supervisor/supervisor_test.go
deleted file mode 100644
index a735de4..0000000
--- a/metropolis/pkg/supervisor/supervisor_test.go
+++ /dev/null
@@ -1,655 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package supervisor
-
-import (
-	"context"
-	"fmt"
-	"testing"
-	"time"
-
-	"source.monogon.dev/metropolis/pkg/logtree"
-)
-
-// waitSettle waits until the supervisor reaches a 'settled' state - ie., one
-// where no actions have been performed for a number of GC cycles.
-// This is used in tests only.
-func (s *supervisor) waitSettle(ctx context.Context) error {
-	waiter := make(chan struct{})
-	s.pReq <- &processorRequest{
-		waitSettled: &processorRequestWaitSettled{
-			waiter: waiter,
-		},
-	}
-
-	select {
-	case <-ctx.Done():
-		return ctx.Err()
-	case <-waiter:
-		return nil
-	}
-}
-
-// waitSettleError wraps waitSettle to fail a test if an error occurs, eg. the
-// context is canceled.
-func (s *supervisor) waitSettleError(ctx context.Context, t *testing.T) {
-	err := s.waitSettle(ctx)
-	if err != nil {
-		t.Fatalf("waitSettle: %v", err)
-	}
-}
-
-func runnableBecomesHealthy(healthy, done chan struct{}) Runnable {
-	return func(ctx context.Context) error {
-		Signal(ctx, SignalHealthy)
-
-		go func() {
-			if healthy != nil {
-				healthy <- struct{}{}
-			}
-		}()
-
-		<-ctx.Done()
-
-		if done != nil {
-			done <- struct{}{}
-		}
-
-		return ctx.Err()
-	}
-}
-
-func runnableSpawnsMore(healthy, done chan struct{}, levels int) Runnable {
-	return func(ctx context.Context) error {
-		if levels > 0 {
-			err := RunGroup(ctx, map[string]Runnable{
-				"a": runnableSpawnsMore(nil, nil, levels-1),
-				"b": runnableSpawnsMore(nil, nil, levels-1),
-			})
-			if err != nil {
-				return err
-			}
-		}
-
-		Signal(ctx, SignalHealthy)
-
-		go func() {
-			if healthy != nil {
-				healthy <- struct{}{}
-			}
-		}()
-
-		<-ctx.Done()
-
-		if done != nil {
-			done <- struct{}{}
-		}
-		return ctx.Err()
-	}
-}
-
-// rc is a Remote Controlled runnable. It is a generic runnable used for
-// testing the supervisor.
-type rc struct {
-	req chan rcRunnableRequest
-}
-
-type rcRunnableRequest struct {
-	cmd    rcRunnableCommand
-	stateC chan rcRunnableState
-}
-
-type rcRunnableCommand int
-
-const (
-	rcRunnableCommandBecomeHealthy rcRunnableCommand = iota
-	rcRunnableCommandBecomeDone
-	rcRunnableCommandDie
-	rcRunnableCommandPanic
-	rcRunnableCommandState
-)
-
-type rcRunnableState int
-
-const (
-	rcRunnableStateNew rcRunnableState = iota
-	rcRunnableStateHealthy
-	rcRunnableStateDone
-)
-
-func (r *rc) becomeHealthy() {
-	r.req <- rcRunnableRequest{cmd: rcRunnableCommandBecomeHealthy}
-}
-
-func (r *rc) becomeDone() {
-	r.req <- rcRunnableRequest{cmd: rcRunnableCommandBecomeDone}
-}
-func (r *rc) die() {
-	r.req <- rcRunnableRequest{cmd: rcRunnableCommandDie}
-}
-
-func (r *rc) panic() {
-	r.req <- rcRunnableRequest{cmd: rcRunnableCommandPanic}
-}
-
-func (r *rc) state() rcRunnableState {
-	c := make(chan rcRunnableState)
-	r.req <- rcRunnableRequest{
-		cmd:    rcRunnableCommandState,
-		stateC: c,
-	}
-	return <-c
-}
-
-func (r *rc) waitState(s rcRunnableState) {
-	// This is poll based. Making it non-poll based would make the RC runnable
-	// logic a bit more complex for little gain.
-	for {
-		got := r.state()
-		if got == s {
-			return
-		}
-		time.Sleep(10 * time.Millisecond)
-	}
-}
-
-func newRC() *rc {
-	return &rc{
-		req: make(chan rcRunnableRequest),
-	}
-}
-
-// Remote Controlled Runnable
-func (r *rc) runnable() Runnable {
-	return func(ctx context.Context) error {
-		state := rcRunnableStateNew
-
-		for {
-			select {
-			case <-ctx.Done():
-				return ctx.Err()
-			case r := <-r.req:
-				switch r.cmd {
-				case rcRunnableCommandBecomeHealthy:
-					Signal(ctx, SignalHealthy)
-					state = rcRunnableStateHealthy
-				case rcRunnableCommandBecomeDone:
-					Signal(ctx, SignalDone)
-					state = rcRunnableStateDone
-				case rcRunnableCommandDie:
-					return fmt.Errorf("died on request")
-				case rcRunnableCommandPanic:
-					panic("at the disco")
-				case rcRunnableCommandState:
-					r.stateC <- state
-				}
-			}
-		}
-	}
-}
-
-func TestSimple(t *testing.T) {
-	h1 := make(chan struct{})
-	d1 := make(chan struct{})
-	h2 := make(chan struct{})
-	d2 := make(chan struct{})
-
-	ctx, ctxC := context.WithCancel(context.Background())
-	defer ctxC()
-	s := New(ctx, func(ctx context.Context) error {
-		err := RunGroup(ctx, map[string]Runnable{
-			"one": runnableBecomesHealthy(h1, d1),
-			"two": runnableBecomesHealthy(h2, d2),
-		})
-		if err != nil {
-			return err
-		}
-		Signal(ctx, SignalHealthy)
-		Signal(ctx, SignalDone)
-		return nil
-	}, WithPropagatePanic)
-
-	// Expect both to start running.
-	s.waitSettleError(ctx, t)
-	select {
-	case <-h1:
-	default:
-		t.Fatalf("runnable 'one' didn't start")
-	}
-	select {
-	case <-h2:
-	default:
-		t.Fatalf("runnable 'one' didn't start")
-	}
-}
-
-func TestSimpleFailure(t *testing.T) {
-	h1 := make(chan struct{})
-	d1 := make(chan struct{})
-	two := newRC()
-
-	ctx, ctxC := context.WithTimeout(context.Background(), 10*time.Second)
-	defer ctxC()
-	s := New(ctx, func(ctx context.Context) error {
-		err := RunGroup(ctx, map[string]Runnable{
-			"one": runnableBecomesHealthy(h1, d1),
-			"two": two.runnable(),
-		})
-		if err != nil {
-			return err
-		}
-		Signal(ctx, SignalHealthy)
-		Signal(ctx, SignalDone)
-		return nil
-	}, WithPropagatePanic)
-	s.waitSettleError(ctx, t)
-
-	two.becomeHealthy()
-	s.waitSettleError(ctx, t)
-	// Expect one to start running.
-	select {
-	case <-h1:
-	default:
-		t.Fatalf("runnable 'one' didn't start")
-	}
-
-	// Kill off two, one should restart.
-	two.die()
-	<-d1
-
-	// And one should start running again.
-	s.waitSettleError(ctx, t)
-	select {
-	case <-h1:
-	default:
-		t.Fatalf("runnable 'one' didn't restart")
-	}
-}
-
-func TestDeepFailure(t *testing.T) {
-	h1 := make(chan struct{})
-	d1 := make(chan struct{})
-	two := newRC()
-
-	ctx, ctxC := context.WithTimeout(context.Background(), 10*time.Second)
-	defer ctxC()
-	s := New(ctx, func(ctx context.Context) error {
-		err := RunGroup(ctx, map[string]Runnable{
-			"one": runnableSpawnsMore(h1, d1, 5),
-			"two": two.runnable(),
-		})
-		if err != nil {
-			return err
-		}
-		Signal(ctx, SignalHealthy)
-		Signal(ctx, SignalDone)
-		return nil
-	}, WithPropagatePanic)
-
-	two.becomeHealthy()
-	s.waitSettleError(ctx, t)
-	// Expect one to start running.
-	select {
-	case <-h1:
-	default:
-		t.Fatalf("runnable 'one' didn't start")
-	}
-
-	// Kill off two, one should restart.
-	two.die()
-	<-d1
-
-	// And one should start running again.
-	s.waitSettleError(ctx, t)
-	select {
-	case <-h1:
-	default:
-		t.Fatalf("runnable 'one' didn't restart")
-	}
-}
-
-func TestPanic(t *testing.T) {
-	h1 := make(chan struct{})
-	d1 := make(chan struct{})
-	two := newRC()
-
-	ctx, ctxC := context.WithCancel(context.Background())
-	defer ctxC()
-	s := New(ctx, func(ctx context.Context) error {
-		err := RunGroup(ctx, map[string]Runnable{
-			"one": runnableBecomesHealthy(h1, d1),
-			"two": two.runnable(),
-		})
-		if err != nil {
-			return err
-		}
-		Signal(ctx, SignalHealthy)
-		Signal(ctx, SignalDone)
-		return nil
-	})
-
-	two.becomeHealthy()
-	s.waitSettleError(ctx, t)
-	// Expect one to start running.
-	select {
-	case <-h1:
-	default:
-		t.Fatalf("runnable 'one' didn't start")
-	}
-
-	// Kill off two, one should restart.
-	two.panic()
-	<-d1
-
-	// And one should start running again.
-	s.waitSettleError(ctx, t)
-	select {
-	case <-h1:
-	default:
-		t.Fatalf("runnable 'one' didn't restart")
-	}
-}
-
-func TestMultipleLevelFailure(t *testing.T) {
-	ctx, ctxC := context.WithCancel(context.Background())
-	defer ctxC()
-	New(ctx, func(ctx context.Context) error {
-		err := RunGroup(ctx, map[string]Runnable{
-			"one": runnableSpawnsMore(nil, nil, 4),
-			"two": runnableSpawnsMore(nil, nil, 4),
-		})
-		if err != nil {
-			return err
-		}
-		Signal(ctx, SignalHealthy)
-		Signal(ctx, SignalDone)
-		return nil
-	}, WithPropagatePanic)
-}
-
-func TestBackoff(t *testing.T) {
-	one := newRC()
-
-	ctx, ctxC := context.WithTimeout(context.Background(), 20*time.Second)
-	defer ctxC()
-
-	s := New(ctx, func(ctx context.Context) error {
-		if err := Run(ctx, "one", one.runnable()); err != nil {
-			return err
-		}
-		Signal(ctx, SignalHealthy)
-		Signal(ctx, SignalDone)
-		return nil
-	}, WithPropagatePanic)
-
-	one.becomeHealthy()
-	// Die a bunch of times in a row, this brings up the next exponential
-	// backoff to over a second.
-	for i := 0; i < 4; i += 1 {
-		one.die()
-		one.waitState(rcRunnableStateNew)
-	}
-	// Measure how long it takes for the runnable to respawn after a number of
-	// failures
-	start := time.Now()
-	one.die()
-	one.becomeHealthy()
-	one.waitState(rcRunnableStateHealthy)
-	taken := time.Since(start)
-	if taken < 1*time.Second {
-		t.Errorf("Runnable took %v to restart, wanted at least a second from backoff", taken)
-	}
-
-	s.waitSettleError(ctx, t)
-	// Now that we've become healthy, die again. Becoming healthy resets the backoff.
-	start = time.Now()
-	one.die()
-	one.becomeHealthy()
-	one.waitState(rcRunnableStateHealthy)
-	taken = time.Since(start)
-	if taken > 1*time.Second || taken < 100*time.Millisecond {
-		t.Errorf("Runnable took %v to restart, wanted at least 100ms from backoff and at most 1s from backoff reset", taken)
-	}
-}
-
-// TestResilience throws some curveballs at the supervisor - either programming
-// errors or high load. It then ensures that another runnable is running, and
-// that it restarts on its sibling failure.
-func TestResilience(t *testing.T) {
-	// request/response channel for testing liveness of the 'one' runnable
-	req := make(chan chan struct{})
-
-	// A runnable that responds on the 'req' channel.
-	one := func(ctx context.Context) error {
-		Signal(ctx, SignalHealthy)
-		for {
-			select {
-			case <-ctx.Done():
-				return ctx.Err()
-			case r := <-req:
-				r <- struct{}{}
-			}
-		}
-	}
-	oneSibling := newRC()
-
-	oneTest := func() {
-		timeout := time.NewTicker(1000 * time.Millisecond)
-		ping := make(chan struct{})
-		req <- ping
-		select {
-		case <-ping:
-		case <-timeout.C:
-			t.Fatalf("one ping response timeout")
-		}
-		timeout.Stop()
-	}
-
-	// A nasty runnable that calls Signal with the wrong context (this is a
-	// programming error)
-	two := func(ctx context.Context) error {
-		Signal(context.TODO(), SignalHealthy)
-		return nil
-	}
-
-	// A nasty runnable that calls Signal wrong (this is a programming error).
-	three := func(ctx context.Context) error {
-		Signal(ctx, SignalDone)
-		return nil
-	}
-
-	// A nasty runnable that runs in a busy loop (this is a programming error).
-	four := func(ctx context.Context) error {
-		for {
-			time.Sleep(0)
-		}
-	}
-
-	// A nasty runnable that keeps creating more runnables.
-	five := func(ctx context.Context) error {
-		i := 1
-		for {
-			err := Run(ctx, fmt.Sprintf("r%d", i), runnableSpawnsMore(nil, nil, 2))
-			if err != nil {
-				return err
-			}
-
-			time.Sleep(100 * time.Millisecond)
-			i += 1
-		}
-	}
-
-	ctx, ctxC := context.WithCancel(context.Background())
-	defer ctxC()
-	New(ctx, func(ctx context.Context) error {
-		RunGroup(ctx, map[string]Runnable{
-			"one":        one,
-			"oneSibling": oneSibling.runnable(),
-		})
-		rs := map[string]Runnable{
-			"two": two, "three": three, "four": four, "five": five,
-		}
-		for k, v := range rs {
-			if err := Run(ctx, k, v); err != nil {
-				return err
-			}
-		}
-		Signal(ctx, SignalHealthy)
-		Signal(ctx, SignalDone)
-		return nil
-	})
-
-	// Five rounds of letting one run, then restarting it.
-	for i := 0; i < 5; i += 1 {
-		oneSibling.becomeHealthy()
-		oneSibling.waitState(rcRunnableStateHealthy)
-
-		// 'one' should work for at least a second.
-		deadline := time.Now().Add(1 * time.Second)
-		for {
-			if time.Now().After(deadline) {
-				break
-			}
-
-			oneTest()
-		}
-
-		// Killing 'oneSibling' should restart one.
-		oneSibling.panic()
-	}
-	// Make sure 'one' is still okay.
-	oneTest()
-}
-
-// TestSubLoggers exercises the reserved/sub-logger functionality of runnable
-// nodes. It ensures a sub-logger and runnable cannot have colliding names, and
-// that logging actually works.
-func TestSubLoggers(t *testing.T) {
-	ctx, ctxC := context.WithCancel(context.Background())
-	defer ctxC()
-
-	errCA := make(chan error)
-	errCB := make(chan error)
-	lt := logtree.New()
-	New(ctx, func(ctx context.Context) error {
-		err := RunGroup(ctx, map[string]Runnable{
-			// foo will first create a sublogger, then attempt to create a
-			// colliding runnable.
-			"foo": func(ctx context.Context) error {
-				sl, err := SubLogger(ctx, "dut")
-				if err != nil {
-					errCA <- fmt.Errorf("creating sub-logger: %w", err)
-					return nil
-				}
-				sl.Infof("hello from foo.dut")
-				err = Run(ctx, "dut", runnableBecomesHealthy(nil, nil))
-				if err == nil {
-					errCA <- fmt.Errorf("creating colliding runnable should have failed")
-					return nil
-				}
-				Signal(ctx, SignalHealthy)
-				Signal(ctx, SignalDone)
-				errCA <- nil
-				return nil
-			},
-		})
-		if err != nil {
-			return err
-		}
-		_, err = SubLogger(ctx, "foo")
-		if err == nil {
-			errCB <- fmt.Errorf("creating collising sub-logger should have failed")
-			return nil
-		}
-		Signal(ctx, SignalHealthy)
-		Signal(ctx, SignalDone)
-		errCB <- nil
-		return nil
-	}, WithPropagatePanic, WithExistingLogtree(lt))
-
-	err := <-errCA
-	if err != nil {
-		t.Fatalf("from root.foo: %v", err)
-	}
-	err = <-errCB
-	if err != nil {
-		t.Fatalf("from root: %v", err)
-	}
-
-	// Now enure that the expected message appears in the logtree.
-	dn := logtree.DN("root.foo.dut")
-	r, err := lt.Read(dn, logtree.WithBacklog(logtree.BacklogAllAvailable))
-	if err != nil {
-		t.Fatalf("logtree read failed: %v", err)
-	}
-	defer r.Close()
-	found := false
-	for _, e := range r.Backlog {
-		if e.DN != dn {
-			continue
-		}
-		if e.Leveled == nil {
-			continue
-		}
-		if e.Leveled.MessagesJoined() != "hello from foo.dut" {
-			continue
-		}
-		found = true
-		break
-	}
-	if !found {
-		t.Fatalf("did not find expected logline in %s", dn)
-	}
-}
-
-func ExampleNew() {
-	// Minimal runnable that is immediately done.
-	childC := make(chan struct{})
-	child := func(ctx context.Context) error {
-		Signal(ctx, SignalHealthy)
-		close(childC)
-		Signal(ctx, SignalDone)
-		return nil
-	}
-
-	// Start a supervision tree with a root runnable.
-	ctx, ctxC := context.WithCancel(context.Background())
-	defer ctxC()
-	New(ctx, func(ctx context.Context) error {
-		err := Run(ctx, "child", child)
-		if err != nil {
-			return fmt.Errorf("could not run 'child': %w", err)
-		}
-		Signal(ctx, SignalHealthy)
-
-		t := time.NewTicker(time.Second)
-		defer t.Stop()
-
-		// Do something in the background, and exit on context cancel.
-		for {
-			select {
-			case <-t.C:
-				fmt.Printf("tick!")
-			case <-ctx.Done():
-				return ctx.Err()
-			}
-		}
-	})
-
-	// root.child will close this channel.
-	<-childC
-}
diff --git a/metropolis/pkg/supervisor/supervisor_testhelpers.go b/metropolis/pkg/supervisor/supervisor_testhelpers.go
deleted file mode 100644
index d3f994f..0000000
--- a/metropolis/pkg/supervisor/supervisor_testhelpers.go
+++ /dev/null
@@ -1,103 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package supervisor
-
-import (
-	"context"
-	"errors"
-	"log"
-	"sort"
-	"testing"
-	"time"
-
-	"source.monogon.dev/metropolis/pkg/logtree"
-)
-
-// TestHarness runs a supervisor in a harness designed for unit testing
-// runnables and runnable trees.
-//
-// The given runnable will be run in a new supervisor, and the logs from this
-// supervisor will be streamed to stderr. If the runnable returns a non-context
-// error, the harness will throw a test error, but will not abort the test.
-//
-// The harness also returns a context cancel function that can be used to
-// terminate the started supervisor early. Regardless of manual cancellation,
-// the supervisor will always be terminated up at the end of the test/benchmark
-// it's running in. The supervision tree will also be cleaned up and the test
-// will block until all runnables have exited.
-//
-// The second returned value is the logtree used by this supervisor. It can be
-// used to assert some log messages are emitted in tests that exercise some
-// log-related functionality.
-func TestHarness(t testing.TB, r func(ctx context.Context) error) (context.CancelFunc, *logtree.LogTree) {
-	t.Helper()
-
-	ctx, ctxC := context.WithCancel(context.Background())
-
-	lt := logtree.New()
-
-	// Only log to stderr when we're running in a test, not in a fuzz harness or a
-	// benchmark - otherwise we just waste CPU cycles.
-	verbose := false
-	if _, ok := t.(*testing.T); ok {
-		verbose = true
-	}
-	if verbose {
-		logtree.PipeAllToTest(t, lt)
-	}
-
-	sup := New(ctx, func(ctx context.Context) error {
-		Logger(ctx).Infof("Starting test %s...", t.Name())
-		if err := r(ctx); err != nil && !errors.Is(err, ctx.Err()) {
-			t.Errorf("Supervised runnable in harness returned error: %v", err)
-			return err
-		}
-		return nil
-	}, WithExistingLogtree(lt), WithPropagatePanic)
-
-	t.Cleanup(func() {
-		ctxC()
-		if verbose {
-			log.Printf("supervisor.TestHarness: Waiting for supervisor runnables to die...")
-		}
-		timeoutNag := time.Now().Add(5 * time.Second)
-
-		for {
-			live := sup.liveRunnables()
-			if len(live) == 0 {
-				if verbose {
-					log.Printf("supervisor.TestHarness: All done.")
-				}
-				return
-			}
-
-			if time.Now().After(timeoutNag) {
-				timeoutNag = time.Now().Add(5 * time.Second)
-				sort.Strings(live)
-				if verbose {
-					log.Printf("supervisor.TestHarness: Still live:")
-					for _, l := range live {
-						log.Printf("supervisor.TestHarness: - %s", l)
-					}
-				}
-			}
-
-			time.Sleep(time.Second)
-		}
-	})
-	return ctxC, lt
-}
diff --git a/metropolis/pkg/sysctl/BUILD.bazel b/metropolis/pkg/sysctl/BUILD.bazel
deleted file mode 100644
index a945a03..0000000
--- a/metropolis/pkg/sysctl/BUILD.bazel
+++ /dev/null
@@ -1,8 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library")
-
-go_library(
-    name = "sysctl",
-    srcs = ["options.go"],
-    importpath = "source.monogon.dev/metropolis/pkg/sysctl",
-    visibility = ["//visibility:public"],
-)
diff --git a/metropolis/pkg/sysctl/options.go b/metropolis/pkg/sysctl/options.go
deleted file mode 100644
index b5e1e36..0000000
--- a/metropolis/pkg/sysctl/options.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package sysctl
-
-import (
-	"fmt"
-	"os"
-	"path"
-	"strings"
-)
-
-// Options contains sysctl options to apply
-type Options map[string]string
-
-// Apply attempts to apply all options in Options. It aborts on the first
-// one which returns an error when applying.
-func (o Options) Apply() error {
-	for name, value := range o {
-		filePath := path.Join("/proc/sys/", strings.ReplaceAll(name, ".", "/"))
-		optionFile, err := os.OpenFile(filePath, os.O_WRONLY, 0)
-		if err != nil {
-			return fmt.Errorf("failed to set option %v: %w", name, err)
-		}
-		if _, err := optionFile.WriteString(value + "\n"); err != nil {
-			optionFile.Close()
-			return fmt.Errorf("failed to set option %v: %w", name, err)
-		}
-		optionFile.Close() // In a loop, defer'ing could open a lot of FDs
-	}
-	return nil
-}
diff --git a/metropolis/pkg/sysfs/BUILD.bazel b/metropolis/pkg/sysfs/BUILD.bazel
deleted file mode 100644
index 06995e2..0000000
--- a/metropolis/pkg/sysfs/BUILD.bazel
+++ /dev/null
@@ -1,12 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library")
-
-go_library(
-    name = "sysfs",
-    srcs = [
-        "block.go",
-        "uevents.go",
-    ],
-    importpath = "source.monogon.dev/metropolis/pkg/sysfs",
-    visibility = ["//metropolis:__subpackages__"],
-    deps = ["@com_github_google_uuid//:uuid"],
-)
diff --git a/metropolis/pkg/sysfs/block.go b/metropolis/pkg/sysfs/block.go
deleted file mode 100644
index c7cbbd7..0000000
--- a/metropolis/pkg/sysfs/block.go
+++ /dev/null
@@ -1,132 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Implementation included in this file was written with the aim of easing
-// integration with the interface exposed at /sys/class/block. It assumes sysfs
-// is already mounted at /sys.
-package sysfs
-
-import (
-	"errors"
-	"fmt"
-	"os"
-	"path/filepath"
-	"strconv"
-	"strings"
-
-	"github.com/google/uuid"
-)
-
-// PartUUIDMap returns a mapping between partition UUIDs and block device
-// names based on information exposed by uevent. UUID keys of the returned
-// map are represented as lowercase strings.
-func PartUUIDMap() (map[string]string, error) {
-	m := make(map[string]string)
-	// Get a list of block device symlinks from sysfs.
-	const blkDirPath = "/sys/class/block"
-	blkDevs, err := os.ReadDir(blkDirPath)
-	if err != nil {
-		return m, fmt.Errorf("couldn't read %q: %w", blkDirPath, err)
-	}
-	// Iterate over block device symlinks present in blkDevs, creating a mapping
-	// in m for each device with both PARTUUID and DEVNAME keys present in their
-	// respective uevent files.
-	for _, devInfo := range blkDevs {
-		// Read the uevent file and transform it into a string->string map.
-		kv, err := ReadUevents(filepath.Join(blkDirPath, devInfo.Name(), "uevent"))
-		if err != nil {
-			return m, fmt.Errorf("while reading uevents: %w", err)
-		}
-		// Check that the required keys are present in the map.
-		if uuid, name := kv["PARTUUID"], kv["DEVNAME"]; uuid != "" && name != "" {
-			m[uuid] = name
-		}
-	}
-	return m, nil
-}
-
-var ErrDevNotFound = errors.New("device not found")
-
-// DeviceByPartUUID returns a block device name, given its corresponding
-// partition UUID.
-func DeviceByPartUUID(id uuid.UUID) (string, error) {
-	pm, err := PartUUIDMap()
-	if err != nil {
-		return "", err
-	}
-	if bdev, ok := pm[id.String()]; ok {
-		return bdev, nil
-	}
-	return "", ErrDevNotFound
-}
-
-// ParentBlockDevice transforms the block device name of a partition, eg
-// "sda1", to the name of the block device hosting it, eg "sda".
-func ParentBlockDevice(dev string) (string, error) {
-	// Build a path pointing to a sysfs block device symlink.
-	partLink := filepath.Join("/sys/class/block", dev)
-	// Read the symlink at partLink. This should leave us with a path of the form
-	// (...)/sda/sdaN.
-	linkTgt, err := os.Readlink(partLink)
-	if err != nil {
-		return "", fmt.Errorf("couldn't read the block device symlink at %q: %w", partLink, err)
-	}
-	// Remove the last element from the path, leaving us with a path pointing to
-	// the block device containting the installer partition, of the form
-	// (...)/sda.
-	devPath := filepath.Dir(linkTgt)
-	// Get the last element of the path, leaving us with just the block device
-	// name, eg sda
-	devName := filepath.Base(devPath)
-	return devName, nil
-}
-
-// PartitionBlockDevice returns the name of a block device associated with the
-// partition at index in the containing block device dev, eg "nvme0n1pN" for
-// "nvme0n1" or "sdaN" for "sda".
-func PartitionBlockDevice(dev string, index int) (string, error) {
-	dp := filepath.Join("/sys/class/block", dev)
-	dir, err := os.ReadDir(dp)
-	if err != nil {
-		return "", err
-	}
-	for _, info := range dir {
-		// Skip non-directories
-		if !info.IsDir() {
-			continue
-		}
-		// Check whether the directory contains a file named 'partition'. If that's
-		// the case, read the partition index from it and compare it with the one
-		// supplied as a function parameter. If they're equal, return the directory
-		// name.
-		istr, err := os.ReadFile(filepath.Join(dp, info.Name(), "partition"))
-		if os.IsNotExist(err) {
-			continue
-		}
-		if err != nil {
-			return "", err
-		}
-		// istr holds a newline-terminated ASCII-encoded decimal number.
-		pi, err := strconv.Atoi(strings.TrimSuffix(string(istr), "\n"))
-		if err != nil {
-			return "", fmt.Errorf("failed to parse partition index: %w", err)
-		}
-		if pi == index {
-			return info.Name(), nil
-		}
-	}
-	return "", fmt.Errorf("couldn't find partition %d of %q", index, dev)
-}
diff --git a/metropolis/pkg/sysfs/uevents.go b/metropolis/pkg/sysfs/uevents.go
deleted file mode 100644
index fed4319..0000000
--- a/metropolis/pkg/sysfs/uevents.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package sysfs
-
-import (
-	"bufio"
-	"io"
-	"os"
-	"strings"
-)
-
-func ReadUevents(filename string) (map[string]string, error) {
-	f, err := os.Open(filename)
-	if err != nil {
-		return nil, err
-	}
-	defer f.Close()
-	ueventMap := make(map[string]string)
-	reader := bufio.NewReader(f)
-	for {
-		name, err := reader.ReadString(byte('='))
-		if err == io.EOF {
-			break
-		} else if err != nil {
-			return nil, err
-		}
-		value, err := reader.ReadString(byte('\n'))
-		if err == io.EOF {
-			continue
-		} else if err != nil {
-			return nil, err
-		}
-		ueventMap[strings.Trim(name, "=")] = strings.TrimSpace(value)
-	}
-	return ueventMap, nil
-}
diff --git a/metropolis/pkg/tpm/BUILD.bazel b/metropolis/pkg/tpm/BUILD.bazel
deleted file mode 100644
index d8f59af..0000000
--- a/metropolis/pkg/tpm/BUILD.bazel
+++ /dev/null
@@ -1,22 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library")
-
-go_library(
-    name = "tpm",
-    srcs = [
-        "credactivation_compat.go",
-        "tpm.go",
-    ],
-    importpath = "source.monogon.dev/metropolis/pkg/tpm",
-    visibility = ["//metropolis:__subpackages__"],
-    deps = [
-        "//metropolis/pkg/logtree",
-        "//metropolis/pkg/sysfs",
-        "//metropolis/pkg/tpm/proto",
-        "@com_github_google_go_tpm//tpm2",
-        "@com_github_google_go_tpm//tpmutil",
-        "@com_github_google_go_tpm_tools//client",
-        "@org_golang_google_protobuf//proto",
-        "@org_golang_x_crypto//nacl/secretbox",
-        "@org_golang_x_sys//unix",
-    ],
-)
diff --git a/metropolis/pkg/tpm/credactivation_compat.go b/metropolis/pkg/tpm/credactivation_compat.go
deleted file mode 100644
index 24766a7..0000000
--- a/metropolis/pkg/tpm/credactivation_compat.go
+++ /dev/null
@@ -1,127 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tpm
-
-// This file is adapted from github.com/google/go-tpm/tpm2/credactivation which
-// outputs broken challenges for unknown reasons. They use u16 length-delimited
-// outputs for the challenge blobs which is incorrect. Rather than rewriting
-// the routine, we only applied minimal fixes to it and skip the ECC part of
-// the issue (because we would rather trust the proprietary RSA
-// implementation).
-//
-// TODO(lorenz): I'll eventually deal with this upstream, but for now just fix
-// it here (it's not that) much code after all.
-//   https://github.com/google/go-tpm/issues/121
-
-import (
-	"crypto/aes"
-	"crypto/cipher"
-	"crypto/hmac"
-	"crypto/rsa"
-	"fmt"
-	"io"
-
-	"github.com/google/go-tpm/tpm2"
-	"github.com/google/go-tpm/tpmutil"
-)
-
-const (
-	labelIdentity  = "IDENTITY"
-	labelStorage   = "STORAGE"
-	labelIntegrity = "INTEGRITY"
-)
-
-func generateRSA(aik *tpm2.HashValue, pub *rsa.PublicKey, symBlockSize int, secret []byte, rnd io.Reader) ([]byte, []byte, error) {
-	aikHash, err := aik.Alg.Hash()
-	if err != nil {
-		return nil, nil, err
-	}
-
-	// The seed length should match the keysize used by the EKs symmetric
-	// cipher.
-	// For typical RSA EKs, this will be 128 bits (16 bytes).
-	// Spec: TCG 2.0 EK Credential Profile revision 14, section 2.1.5.1.
-	seed := make([]byte, symBlockSize)
-	if _, err := io.ReadFull(rnd, seed); err != nil {
-		return nil, nil, fmt.Errorf("generating seed: %v", err)
-	}
-
-	// Encrypt the seed value using the provided public key.
-	// See annex B, section 10.4 of the TPM specification revision 2 part 1.
-	label := append([]byte(labelIdentity), 0)
-	encSecret, err := rsa.EncryptOAEP(aikHash.New(), rnd, pub, seed, label)
-	if err != nil {
-		return nil, nil, fmt.Errorf("generating encrypted seed: %v", err)
-	}
-
-	// Generate the encrypted credential by convolving the seed with the digest
-	// of the AIK, and using the result as the key to encrypt the secret.
-	// See section 24.4 of TPM 2.0 specification, part 1.
-	aikNameEncoded, err := aik.Encode()
-	if err != nil {
-		return nil, nil, fmt.Errorf("encoding aikName: %v", err)
-	}
-	symmetricKey, err := tpm2.KDFa(aik.Alg, seed, labelStorage, aikNameEncoded, nil, len(seed)*8)
-	if err != nil {
-		return nil, nil, fmt.Errorf("generating symmetric key: %v", err)
-	}
-	c, err := aes.NewCipher(symmetricKey)
-	if err != nil {
-		return nil, nil, fmt.Errorf("symmetric cipher setup: %v", err)
-	}
-	cv, err := tpmutil.Pack(tpmutil.U16Bytes(secret))
-	if err != nil {
-		return nil, nil, fmt.Errorf("generating cv (TPM2B_Digest): %v", err)
-	}
-
-	// IV is all null bytes. encIdentity represents the encrypted credential.
-	encIdentity := make([]byte, len(cv))
-	cipher.NewCFBEncrypter(c, make([]byte, len(symmetricKey))).XORKeyStream(encIdentity, cv)
-
-	// Generate the integrity HMAC, which is used to protect the integrity of the
-	// encrypted structure.
-	// See section 24.5 of the TPM specification revision 2 part 1.
-	macKey, err := tpm2.KDFa(aik.Alg, seed, labelIntegrity, nil, nil, aikHash.Size()*8)
-	if err != nil {
-		return nil, nil, fmt.Errorf("generating HMAC key: %v", err)
-	}
-
-	mac := hmac.New(aikHash.New, macKey)
-	mac.Write(encIdentity)
-	mac.Write(aikNameEncoded)
-	integrityHMAC := mac.Sum(nil)
-
-	idObject := &tpm2.IDObject{
-		IntegrityHMAC: integrityHMAC,
-		EncIdentity:   encIdentity,
-	}
-	id, err := tpmutil.Pack(idObject)
-	if err != nil {
-		return nil, nil, fmt.Errorf("encoding IDObject: %v", err)
-	}
-
-	packedID, err := tpmutil.Pack(id)
-	if err != nil {
-		return nil, nil, fmt.Errorf("packing id: %v", err)
-	}
-	packedEncSecret, err := tpmutil.Pack(encSecret)
-	if err != nil {
-		return nil, nil, fmt.Errorf("packing encSecret: %v", err)
-	}
-
-	return packedID, packedEncSecret, nil
-}
diff --git a/metropolis/pkg/tpm/eventlog/BUILD.bazel b/metropolis/pkg/tpm/eventlog/BUILD.bazel
deleted file mode 100644
index 7bbd464..0000000
--- a/metropolis/pkg/tpm/eventlog/BUILD.bazel
+++ /dev/null
@@ -1,17 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library")
-
-go_library(
-    name = "eventlog",
-    srcs = [
-        "compat.go",
-        "eventlog.go",
-        "secureboot.go",
-    ],
-    importpath = "source.monogon.dev/metropolis/pkg/tpm/eventlog",
-    visibility = ["//metropolis:__subpackages__"],
-    deps = [
-        "//metropolis/pkg/tpm/eventlog/internal",
-        "@com_github_google_certificate_transparency_go//x509",
-        "@com_github_google_go_tpm//tpm2",
-    ],
-)
diff --git a/metropolis/pkg/tpm/eventlog/LICENSE-3RD-PARTY.txt b/metropolis/pkg/tpm/eventlog/LICENSE-3RD-PARTY.txt
deleted file mode 100644
index 2d3298c..0000000
--- a/metropolis/pkg/tpm/eventlog/LICENSE-3RD-PARTY.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-Copyright 2020 Google Inc.
-Licensed under the Apache License, Version 2.0 (the "License"); you may not
-use this file except in compliance with the License. You may obtain a copy of
-the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-License for the specific language governing permissions and limitations under
-the License.
\ No newline at end of file
diff --git a/metropolis/pkg/tpm/eventlog/compat.go b/metropolis/pkg/tpm/eventlog/compat.go
deleted file mode 100644
index f83972b..0000000
--- a/metropolis/pkg/tpm/eventlog/compat.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package eventlog
-
-// This file contains compatibility functions for our TPM library
-
-import (
-	"crypto"
-)
-
-// ConvertRawPCRs converts from raw PCRs to eventlog PCR structures
-func ConvertRawPCRs(pcrs [][]byte) []PCR {
-	var evPCRs []PCR
-	for i, digest := range pcrs {
-		evPCRs = append(evPCRs, PCR{DigestAlg: crypto.SHA256, Index: i, Digest: digest})
-	}
-	return evPCRs
-}
diff --git a/metropolis/pkg/tpm/eventlog/eventlog.go b/metropolis/pkg/tpm/eventlog/eventlog.go
deleted file mode 100644
index dbfeae0..0000000
--- a/metropolis/pkg/tpm/eventlog/eventlog.go
+++ /dev/null
@@ -1,633 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Taken and pruned from go-attestation revision
-// 2453c8f39a4ff46009f6a9db6fb7c6cca789d9a1 under Apache 2.0
-
-package eventlog
-
-import (
-	"bytes"
-	"crypto"
-	"crypto/sha1"
-	"crypto/sha256"
-	"encoding/binary"
-	"errors"
-	"fmt"
-	"io"
-	"sort"
-
-	"github.com/google/go-tpm/tpm2"
-)
-
-// HashAlg identifies a hashing Algorithm.
-type HashAlg uint8
-
-// Valid hash algorithms.
-var (
-	HashSHA1   = HashAlg(tpm2.AlgSHA1)
-	HashSHA256 = HashAlg(tpm2.AlgSHA256)
-)
-
-func (a HashAlg) cryptoHash() crypto.Hash {
-	switch a {
-	case HashSHA1:
-		return crypto.SHA1
-	case HashSHA256:
-		return crypto.SHA256
-	}
-	return 0
-}
-
-func (a HashAlg) goTPMAlg() tpm2.Algorithm {
-	switch a {
-	case HashSHA1:
-		return tpm2.AlgSHA1
-	case HashSHA256:
-		return tpm2.AlgSHA256
-	}
-	return 0
-}
-
-// String returns a human-friendly representation of the hash algorithm.
-func (a HashAlg) String() string {
-	switch a {
-	case HashSHA1:
-		return "SHA1"
-	case HashSHA256:
-		return "SHA256"
-	}
-	return fmt.Sprintf("HashAlg<%d>", int(a))
-}
-
-// ReplayError describes the parsed events that failed to verify against
-// a particular PCR.
-type ReplayError struct {
-	Events      []Event
-	invalidPCRs []int
-}
-
-func (e ReplayError) affected(pcr int) bool {
-	for _, p := range e.invalidPCRs {
-		if p == pcr {
-			return true
-		}
-	}
-	return false
-}
-
-// Error returns a human-friendly description of replay failures.
-func (e ReplayError) Error() string {
-	return fmt.Sprintf("event log failed to verify: the following registers failed to replay: %v", e.invalidPCRs)
-}
-
-// TPM algorithms. See the TPM 2.0 specification section 6.3.
-//
-//   https://trustedcomputinggroup.org/wp-content/uploads/TPM-Rev-2.0-Part-2-Structures-01.38.pdf#page=42
-const (
-	algSHA1   uint16 = 0x0004
-	algSHA256 uint16 = 0x000B
-)
-
-// EventType indicates what kind of data an event is reporting.
-type EventType uint32
-
-// Event is a single event from a TCG event log. This reports descrete items such
-// as BIOs measurements or EFI states.
-type Event struct {
-	// order of the event in the event log.
-	sequence int
-
-	// PCR index of the event.
-	Index int
-	// Type of the event.
-	Type EventType
-
-	// Data of the event. For certain kinds of events, this must match the event
-	// digest to be valid.
-	Data []byte
-	// Digest is the verified digest of the event data. While an event can have
-	// multiple for different hash values, this is the one that was matched to the
-	// PCR value.
-	Digest []byte
-
-	// TODO(ericchiang): Provide examples or links for which event types must
-	// match their data to their digest.
-}
-
-func (e *Event) digestEquals(b []byte) error {
-	if len(e.Digest) == 0 {
-		return errors.New("no digests present")
-	}
-
-	switch len(e.Digest) {
-	case crypto.SHA256.Size():
-		s := sha256.Sum256(b)
-		if bytes.Equal(s[:], e.Digest) {
-			return nil
-		}
-	case crypto.SHA1.Size():
-		s := sha1.Sum(b)
-		if bytes.Equal(s[:], e.Digest) {
-			return nil
-		}
-	default:
-		return fmt.Errorf("cannot compare hash of length %d", len(e.Digest))
-	}
-
-	return fmt.Errorf("digest (len %d) does not match", len(e.Digest))
-}
-
-// EventLog is a parsed measurement log. This contains unverified data representing
-// boot events that must be replayed against PCR values to determine authenticity.
-type EventLog struct {
-	// Algs holds the set of algorithms that the event log uses.
-	Algs []HashAlg
-
-	rawEvents []rawEvent
-}
-
-func (e *EventLog) clone() *EventLog {
-	out := EventLog{
-		Algs:      make([]HashAlg, len(e.Algs)),
-		rawEvents: make([]rawEvent, len(e.rawEvents)),
-	}
-	copy(out.Algs, e.Algs)
-	copy(out.rawEvents, e.rawEvents)
-	return &out
-}
-
-type elWorkaround struct {
-	id          string
-	affectedPCR int
-	apply       func(e *EventLog) error
-}
-
-// inject3 appends two new events into the event log.
-func inject3(e *EventLog, pcr int, data1, data2, data3 string) error {
-	if err := inject(e, pcr, data1); err != nil {
-		return err
-	}
-	if err := inject(e, pcr, data2); err != nil {
-		return err
-	}
-	return inject(e, pcr, data3)
-}
-
-// inject2 appends two new events into the event log.
-func inject2(e *EventLog, pcr int, data1, data2 string) error {
-	if err := inject(e, pcr, data1); err != nil {
-		return err
-	}
-	return inject(e, pcr, data2)
-}
-
-// inject appends a new event into the event log.
-func inject(e *EventLog, pcr int, data string) error {
-	evt := rawEvent{
-		data:     []byte(data),
-		index:    pcr,
-		sequence: e.rawEvents[len(e.rawEvents)-1].sequence + 1,
-	}
-	for _, alg := range e.Algs {
-		h := alg.cryptoHash().New()
-		h.Write([]byte(data))
-		evt.digests = append(evt.digests, digest{hash: alg.cryptoHash(), data: h.Sum(nil)})
-	}
-	e.rawEvents = append(e.rawEvents, evt)
-	return nil
-}
-
-const (
-	ebsInvocation = "Exit Boot Services Invocation"
-	ebsSuccess    = "Exit Boot Services Returned with Success"
-	ebsFailure    = "Exit Boot Services Returned with Failure"
-)
-
-var eventlogWorkarounds = []elWorkaround{
-	{
-		id:          "EBS Invocation + Success",
-		affectedPCR: 5,
-		apply: func(e *EventLog) error {
-			return inject2(e, 5, ebsInvocation, ebsSuccess)
-		},
-	},
-	{
-		id:          "EBS Invocation + Failure",
-		affectedPCR: 5,
-		apply: func(e *EventLog) error {
-			return inject2(e, 5, ebsInvocation, ebsFailure)
-		},
-	},
-	{
-		id:          "EBS Invocation + Failure + Success",
-		affectedPCR: 5,
-		apply: func(e *EventLog) error {
-			return inject3(e, 5, ebsInvocation, ebsFailure, ebsSuccess)
-		},
-	},
-}
-
-// Verify replays the event log against a TPM's PCR values, returning the
-// events which could be matched to a provided PCR value.
-// An error is returned if the replayed digest for events with a given PCR
-// index do not match any provided value for that PCR index.
-func (e *EventLog) Verify(pcrs []PCR) ([]Event, error) {
-	events, rErr := replayEvents(e.rawEvents, pcrs)
-	if rErr == nil {
-		return events, nil
-	}
-	// If there were any issues replaying the PCRs, try each of the workarounds
-	// in turn.
-	// TODO(jsonp): Allow workarounds to be combined.
-	for _, wkrd := range eventlogWorkarounds {
-		if !rErr.affected(wkrd.affectedPCR) {
-			continue
-		}
-		el := e.clone()
-		if err := wkrd.apply(el); err != nil {
-			return nil, fmt.Errorf("failed applying workaround %q: %v", wkrd.id, err)
-		}
-		if events, err := replayEvents(el.rawEvents, pcrs); err == nil {
-			return events, nil
-		}
-	}
-
-	return events, rErr
-}
-
-// PCR encapsulates the value of a PCR at a point in time.
-type PCR struct {
-	Index     int
-	Digest    []byte
-	DigestAlg crypto.Hash
-}
-
-func extend(pcr PCR, replay []byte, e rawEvent) (pcrDigest []byte, eventDigest []byte, err error) {
-	h := pcr.DigestAlg
-
-	for _, digest := range e.digests {
-		if digest.hash != pcr.DigestAlg {
-			continue
-		}
-		if len(digest.data) != len(pcr.Digest) {
-			return nil, nil, fmt.Errorf("digest data length (%d) doesn't match PCR digest length (%d)", len(digest.data), len(pcr.Digest))
-		}
-		hash := h.New()
-		if len(replay) != 0 {
-			hash.Write(replay)
-		} else {
-			b := make([]byte, h.Size())
-			hash.Write(b)
-		}
-		hash.Write(digest.data)
-		return hash.Sum(nil), digest.data, nil
-	}
-	return nil, nil, fmt.Errorf("no event digest matches pcr algorithm: %v", pcr.DigestAlg)
-}
-
-// replayPCR replays the event log for a specific PCR, using pcr and
-// event digests with the algorithm in pcr. An error is returned if the
-// replayed values do not match the final PCR digest, or any event tagged
-// with that PCR does not posess an event digest with the specified algorithm.
-func replayPCR(rawEvents []rawEvent, pcr PCR) ([]Event, bool) {
-	var (
-		replay    []byte
-		outEvents []Event
-	)
-
-	for _, e := range rawEvents {
-		if e.index != pcr.Index {
-			continue
-		}
-
-		replayValue, digest, err := extend(pcr, replay, e)
-		if err != nil {
-			return nil, false
-		}
-		replay = replayValue
-		outEvents = append(outEvents, Event{sequence: e.sequence, Data: e.data, Digest: digest, Index: pcr.Index, Type: e.typ})
-	}
-
-	if len(outEvents) > 0 && !bytes.Equal(replay, pcr.Digest) {
-		return nil, false
-	}
-	return outEvents, true
-}
-
-type pcrReplayResult struct {
-	events     []Event
-	successful bool
-}
-
-func replayEvents(rawEvents []rawEvent, pcrs []PCR) ([]Event, *ReplayError) {
-	var (
-		invalidReplays []int
-		verifiedEvents []Event
-		allPCRReplays  = map[int][]pcrReplayResult{}
-	)
-
-	// Replay the event log for every PCR and digest algorithm combination.
-	for _, pcr := range pcrs {
-		events, ok := replayPCR(rawEvents, pcr)
-		allPCRReplays[pcr.Index] = append(allPCRReplays[pcr.Index], pcrReplayResult{events, ok})
-	}
-
-	// Record PCR indices which do not have any successful replay. Record the
-	// events for a successful replay.
-pcrLoop:
-	for i, replaysForPCR := range allPCRReplays {
-		for _, replay := range replaysForPCR {
-			if replay.successful {
-				// We consider the PCR verified at this stage: The replay of values with
-				// one digest algorithm matched a provided value.
-				// As such, we save the PCR's events, and proceed to the next PCR.
-				verifiedEvents = append(verifiedEvents, replay.events...)
-				continue pcrLoop
-			}
-		}
-		invalidReplays = append(invalidReplays, i)
-	}
-
-	if len(invalidReplays) > 0 {
-		events := make([]Event, 0, len(rawEvents))
-		for _, e := range rawEvents {
-			events = append(events, Event{e.sequence, e.index, e.typ, e.data, nil})
-		}
-		return nil, &ReplayError{
-			Events:      events,
-			invalidPCRs: invalidReplays,
-		}
-	}
-
-	sort.Slice(verifiedEvents, func(i int, j int) bool {
-		return verifiedEvents[i].sequence < verifiedEvents[j].sequence
-	})
-	return verifiedEvents, nil
-}
-
-// EV_NO_ACTION is a special event type that indicates information to the
-// parser instead of holding a measurement. For TPM 2.0, this event type is
-// used to signal switching from SHA1 format to a variable length digest.
-//
-//   https://trustedcomputinggroup.org/wp-content/uploads/TCG_PCClientSpecPlat_TPM_2p0_1p04_pub.pdf#page=110
-const eventTypeNoAction = 0x03
-
-// ParseEventLog parses an unverified measurement log.
-func ParseEventLog(measurementLog []byte) (*EventLog, error) {
-	var specID *specIDEvent
-	r := bytes.NewBuffer(measurementLog)
-	parseFn := parseRawEvent
-	var el EventLog
-	e, err := parseFn(r, specID)
-	if err != nil {
-		return nil, fmt.Errorf("parse first event: %v", err)
-	}
-	if e.typ == eventTypeNoAction {
-		specID, err = parseSpecIDEvent(e.data)
-		if err != nil {
-			return nil, fmt.Errorf("failed to parse spec ID event: %v", err)
-		}
-		for _, alg := range specID.algs {
-			switch tpm2.Algorithm(alg.ID) {
-			case tpm2.AlgSHA1:
-				el.Algs = append(el.Algs, HashSHA1)
-			case tpm2.AlgSHA256:
-				el.Algs = append(el.Algs, HashSHA256)
-			}
-		}
-		if len(el.Algs) == 0 {
-			return nil, fmt.Errorf("measurement log didn't use sha1 or sha256 digests")
-		}
-		// Switch to parsing crypto agile events. Don't include this in the
-		// replayed events since it intentionally doesn't extend the PCRs.
-		//
-		// Note that this doesn't actually guarentee that events have SHA256
-		// digests.
-		parseFn = parseRawEvent2
-	} else {
-		el.Algs = []HashAlg{HashSHA1}
-		el.rawEvents = append(el.rawEvents, e)
-	}
-	sequence := 1
-	for r.Len() != 0 {
-		e, err := parseFn(r, specID)
-		if err != nil {
-			return nil, err
-		}
-		e.sequence = sequence
-		sequence++
-		el.rawEvents = append(el.rawEvents, e)
-	}
-	return &el, nil
-}
-
-type specIDEvent struct {
-	algs []specAlgSize
-}
-
-type specAlgSize struct {
-	ID   uint16
-	Size uint16
-}
-
-// Expected values for various Spec ID Event fields.
-//   https://trustedcomputinggroup.org/wp-content/uploads/EFI-Protocol-Specification-rev13-160330final.pdf#page=19
-var wantSignature = [16]byte{0x53, 0x70,
-	0x65, 0x63, 0x20, 0x49,
-	0x44, 0x20, 0x45, 0x76,
-	0x65, 0x6e, 0x74, 0x30,
-	0x33, 0x00} // "Spec ID Event03\0"
-
-const (
-	wantMajor  = 2
-	wantMinor  = 0
-	wantErrata = 0
-)
-
-// parseSpecIDEvent parses a TCG_EfiSpecIDEventStruct structure from the reader.
-//   https://trustedcomputinggroup.org/wp-content/uploads/EFI-Protocol-Specification-rev13-160330final.pdf#page=18
-func parseSpecIDEvent(b []byte) (*specIDEvent, error) {
-	r := bytes.NewReader(b)
-	var header struct {
-		Signature     [16]byte
-		PlatformClass uint32
-		VersionMinor  uint8
-		VersionMajor  uint8
-		Errata        uint8
-		UintnSize     uint8
-		NumAlgs       uint32
-	}
-	if err := binary.Read(r, binary.LittleEndian, &header); err != nil {
-		return nil, fmt.Errorf("reading event header: %v", err)
-	}
-	if header.Signature != wantSignature {
-		return nil, fmt.Errorf("invalid spec id signature: %x", header.Signature)
-	}
-	if header.VersionMajor != wantMajor {
-		return nil, fmt.Errorf("invalid spec major version, got %02x, wanted %02x",
-			header.VersionMajor, wantMajor)
-	}
-	if header.VersionMinor != wantMinor {
-		return nil, fmt.Errorf("invalid spec minor version, got %02x, wanted %02x",
-			header.VersionMajor, wantMinor)
-	}
-
-	// TODO(ericchiang): Check errata? Or do we expect that to change in ways
-	// we're okay with?
-
-	var specAlg specAlgSize
-	var e specIDEvent
-	for i := 0; i < int(header.NumAlgs); i++ {
-		if err := binary.Read(r, binary.LittleEndian, &specAlg); err != nil {
-			return nil, fmt.Errorf("reading algorithm: %v", err)
-		}
-		e.algs = append(e.algs, specAlg)
-	}
-
-	var vendorInfoSize uint8
-	if err := binary.Read(r, binary.LittleEndian, &vendorInfoSize); err != nil {
-		return nil, fmt.Errorf("reading vender info size: %v", err)
-	}
-	if r.Len() != int(vendorInfoSize) {
-		return nil, fmt.Errorf("reading vendor info, expected %d remaining bytes, got %d", vendorInfoSize, r.Len())
-	}
-	return &e, nil
-}
-
-type digest struct {
-	hash crypto.Hash
-	data []byte
-}
-
-type rawEvent struct {
-	sequence int
-	index    int
-	typ      EventType
-	data     []byte
-	digests  []digest
-}
-
-// TPM 1.2 event log format. See "5.1 SHA1 Event Log Entry Format"
-//   https://trustedcomputinggroup.org/wp-content/uploads/EFI-Protocol-Specification-rev13-160330final.pdf#page=15
-type rawEventHeader struct {
-	PCRIndex  uint32
-	Type      uint32
-	Digest    [20]byte
-	EventSize uint32
-}
-
-type eventSizeErr struct {
-	eventSize uint32
-	logSize   int
-}
-
-func (e *eventSizeErr) Error() string {
-	return fmt.Sprintf("event data size (%d bytes) is greater than remaining measurement log (%d bytes)", e.eventSize, e.logSize)
-}
-
-func parseRawEvent(r *bytes.Buffer, specID *specIDEvent) (event rawEvent, err error) {
-	var h rawEventHeader
-	if err = binary.Read(r, binary.LittleEndian, &h); err != nil {
-		return event, err
-	}
-	if h.EventSize == 0 {
-		return event, errors.New("event data size is 0")
-	}
-	if h.EventSize > uint32(r.Len()) {
-		return event, &eventSizeErr{h.EventSize, r.Len()}
-	}
-
-	data := make([]byte, int(h.EventSize))
-	if _, err := io.ReadFull(r, data); err != nil {
-		return event, err
-	}
-
-	digests := []digest{{hash: crypto.SHA1, data: h.Digest[:]}}
-
-	return rawEvent{
-		typ:     EventType(h.Type),
-		data:    data,
-		index:   int(h.PCRIndex),
-		digests: digests,
-	}, nil
-}
-
-// TPM 2.0 event log format. See "5.2 Crypto Agile Log Entry Format"
-//   https://trustedcomputinggroup.org/wp-content/uploads/EFI-Protocol-Specification-rev13-160330final.pdf#page=15
-type rawEvent2Header struct {
-	PCRIndex uint32
-	Type     uint32
-}
-
-func parseRawEvent2(r *bytes.Buffer, specID *specIDEvent) (event rawEvent, err error) {
-	var h rawEvent2Header
-
-	if err = binary.Read(r, binary.LittleEndian, &h); err != nil {
-		return event, err
-	}
-	event.typ = EventType(h.Type)
-	event.index = int(h.PCRIndex)
-
-	// parse the event digests
-	var numDigests uint32
-	if err := binary.Read(r, binary.LittleEndian, &numDigests); err != nil {
-		return event, err
-	}
-
-	for i := 0; i < int(numDigests); i++ {
-		var algID uint16
-		if err := binary.Read(r, binary.LittleEndian, &algID); err != nil {
-			return event, err
-		}
-		var digest digest
-
-		for _, alg := range specID.algs {
-			if alg.ID != algID {
-				continue
-			}
-			if uint16(r.Len()) < alg.Size {
-				return event, fmt.Errorf("reading digest: %v", io.ErrUnexpectedEOF)
-			}
-			digest.data = make([]byte, alg.Size)
-			digest.hash = HashAlg(alg.ID).cryptoHash()
-		}
-		if len(digest.data) == 0 {
-			return event, fmt.Errorf("unknown algorithm ID %x", algID)
-		}
-		if _, err := io.ReadFull(r, digest.data); err != nil {
-			return event, err
-		}
-		event.digests = append(event.digests, digest)
-	}
-
-	// parse event data
-	var eventSize uint32
-	if err = binary.Read(r, binary.LittleEndian, &eventSize); err != nil {
-		return event, err
-	}
-	if eventSize == 0 {
-		return event, errors.New("event data size is 0")
-	}
-	if eventSize > uint32(r.Len()) {
-		return event, &eventSizeErr{eventSize, r.Len()}
-	}
-	event.data = make([]byte, int(eventSize))
-	if _, err := io.ReadFull(r, event.data); err != nil {
-		return event, err
-	}
-	return event, err
-}
diff --git a/metropolis/pkg/tpm/eventlog/internal/BUILD.bazel b/metropolis/pkg/tpm/eventlog/internal/BUILD.bazel
deleted file mode 100644
index f134e7c..0000000
--- a/metropolis/pkg/tpm/eventlog/internal/BUILD.bazel
+++ /dev/null
@@ -1,12 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library")
-
-go_library(
-    name = "internal",
-    srcs = ["events.go"],
-    importpath = "source.monogon.dev/metropolis/pkg/tpm/eventlog/internal",
-    visibility = ["//metropolis/pkg/tpm/eventlog:__subpackages__"],
-    deps = [
-        "@com_github_google_certificate_transparency_go//asn1",
-        "@com_github_google_certificate_transparency_go//x509",
-    ],
-)
diff --git a/metropolis/pkg/tpm/eventlog/internal/events.go b/metropolis/pkg/tpm/eventlog/internal/events.go
deleted file mode 100644
index eb121b2..0000000
--- a/metropolis/pkg/tpm/eventlog/internal/events.go
+++ /dev/null
@@ -1,405 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Taken from go-attestation under Apache 2.0
-package internal
-
-import (
-	"bytes"
-	"encoding/binary"
-	"errors"
-	"fmt"
-	"io"
-	"unicode/utf16"
-
-	"github.com/google/certificate-transparency-go/asn1"
-	"github.com/google/certificate-transparency-go/x509"
-)
-
-const (
-	// maxNameLen is the maximum accepted byte length for a name field.
-	// This value should be larger than any reasonable value.
-	maxNameLen = 2048
-	// maxDataLen is the maximum size in bytes of a variable data field.
-	// This value should be larger than any reasonable value.
-	maxDataLen = 1024 * 1024 // 1 Megabyte.
-)
-
-// GUIDs representing the contents of an UEFI_SIGNATURE_LIST.
-var (
-	hashSHA256SigGUID        = efiGUID{0xc1c41626, 0x504c, 0x4092, [8]byte{0xac, 0xa9, 0x41, 0xf9, 0x36, 0x93, 0x43, 0x28}}
-	hashSHA1SigGUID          = efiGUID{0x826ca512, 0xcf10, 0x4ac9, [8]byte{0xb1, 0x87, 0xbe, 0x01, 0x49, 0x66, 0x31, 0xbd}}
-	hashSHA224SigGUID        = efiGUID{0x0b6e5233, 0xa65c, 0x44c9, [8]byte{0x94, 0x07, 0xd9, 0xab, 0x83, 0xbf, 0xc8, 0xbd}}
-	hashSHA384SigGUID        = efiGUID{0xff3e5307, 0x9fd0, 0x48c9, [8]byte{0x85, 0xf1, 0x8a, 0xd5, 0x6c, 0x70, 0x1e, 0x01}}
-	hashSHA512SigGUID        = efiGUID{0x093e0fae, 0xa6c4, 0x4f50, [8]byte{0x9f, 0x1b, 0xd4, 0x1e, 0x2b, 0x89, 0xc1, 0x9a}}
-	keyRSA2048SigGUID        = efiGUID{0x3c5766e8, 0x269c, 0x4e34, [8]byte{0xaa, 0x14, 0xed, 0x77, 0x6e, 0x85, 0xb3, 0xb6}}
-	certRSA2048SHA256SigGUID = efiGUID{0xe2b36190, 0x879b, 0x4a3d, [8]byte{0xad, 0x8d, 0xf2, 0xe7, 0xbb, 0xa3, 0x27, 0x84}}
-	certRSA2048SHA1SigGUID   = efiGUID{0x67f8444f, 0x8743, 0x48f1, [8]byte{0xa3, 0x28, 0x1e, 0xaa, 0xb8, 0x73, 0x60, 0x80}}
-	certX509SigGUID          = efiGUID{0xa5c059a1, 0x94e4, 0x4aa7, [8]byte{0x87, 0xb5, 0xab, 0x15, 0x5c, 0x2b, 0xf0, 0x72}}
-	certHashSHA256SigGUID    = efiGUID{0x3bd2a492, 0x96c0, 0x4079, [8]byte{0xb4, 0x20, 0xfc, 0xf9, 0x8e, 0xf1, 0x03, 0xed}}
-	certHashSHA384SigGUID    = efiGUID{0x7076876e, 0x80c2, 0x4ee6, [8]byte{0xaa, 0xd2, 0x28, 0xb3, 0x49, 0xa6, 0x86, 0x5b}}
-	certHashSHA512SigGUID    = efiGUID{0x446dbf63, 0x2502, 0x4cda, [8]byte{0xbc, 0xfa, 0x24, 0x65, 0xd2, 0xb0, 0xfe, 0x9d}}
-)
-
-// EventType describes the type of event signalled in the event log.
-type EventType uint32
-
-// 	BIOS Events (TCG PC Client Specific Implementation Specification for Conventional BIOS 1.21)
-const (
-	PrebootCert          EventType = 0x00000000
-	PostCode             EventType = 0x00000001
-	unused               EventType = 0x00000002
-	NoAction             EventType = 0x00000003
-	Separator            EventType = 0x00000004
-	Action               EventType = 0x00000005
-	EventTag             EventType = 0x00000006
-	SCRTMContents        EventType = 0x00000007
-	SCRTMVersion         EventType = 0x00000008
-	CpuMicrocode         EventType = 0x00000009
-	PlatformConfigFlags  EventType = 0x0000000A
-	TableOfDevices       EventType = 0x0000000B
-	CompactHash          EventType = 0x0000000C
-	Ipl                  EventType = 0x0000000D
-	IplPartitionData     EventType = 0x0000000E
-	NonhostCode          EventType = 0x0000000F
-	NonhostConfig        EventType = 0x00000010
-	NonhostInfo          EventType = 0x00000011
-	OmitBootDeviceEvents EventType = 0x00000012
-)
-
-// EFI Events (TCG EFI Platform Specification Version 1.22)
-const (
-	EFIEventBase               EventType = 0x80000000
-	EFIVariableDriverConfig    EventType = 0x80000001
-	EFIVariableBoot            EventType = 0x80000002
-	EFIBootServicesApplication EventType = 0x80000003
-	EFIBootServicesDriver      EventType = 0x80000004
-	EFIRuntimeServicesDriver   EventType = 0x80000005
-	EFIGPTEvent                EventType = 0x80000006
-	EFIAction                  EventType = 0x80000007
-	EFIPlatformFirmwareBlob    EventType = 0x80000008
-	EFIHandoffTables           EventType = 0x80000009
-	EFIHCRTMEvent              EventType = 0x80000010
-	EFIVariableAuthority       EventType = 0x800000e0
-)
-
-// ErrSigMissingGUID is returned if an EFI_SIGNATURE_DATA structure was parsed
-// successfully, however was missing the SignatureOwner GUID. This case is
-// handled specially as a workaround for a bug relating to authority events.
-var ErrSigMissingGUID = errors.New("signature data was missing owner GUID")
-
-var eventTypeNames = map[EventType]string{
-	PrebootCert:          "Preboot Cert",
-	PostCode:             "POST Code",
-	unused:               "Unused",
-	NoAction:             "No Action",
-	Separator:            "Separator",
-	Action:               "Action",
-	EventTag:             "Event Tag",
-	SCRTMContents:        "S-CRTM Contents",
-	SCRTMVersion:         "S-CRTM Version",
-	CpuMicrocode:         "CPU Microcode",
-	PlatformConfigFlags:  "Platform Config Flags",
-	TableOfDevices:       "Table of Devices",
-	CompactHash:          "Compact Hash",
-	Ipl:                  "IPL",
-	IplPartitionData:     "IPL Partition Data",
-	NonhostCode:          "Non-Host Code",
-	NonhostConfig:        "Non-HostConfig",
-	NonhostInfo:          "Non-Host Info",
-	OmitBootDeviceEvents: "Omit Boot Device Events",
-
-	EFIEventBase:               "EFI Event Base",
-	EFIVariableDriverConfig:    "EFI Variable Driver Config",
-	EFIVariableBoot:            "EFI Variable Boot",
-	EFIBootServicesApplication: "EFI Boot Services Application",
-	EFIBootServicesDriver:      "EFI Boot Services Driver",
-	EFIRuntimeServicesDriver:   "EFI Runtime Services Driver",
-	EFIGPTEvent:                "EFI GPT Event",
-	EFIAction:                  "EFI Action",
-	EFIPlatformFirmwareBlob:    "EFI Platform Firmware Blob",
-	EFIVariableAuthority:       "EFI Variable Authority",
-	EFIHandoffTables:           "EFI Handoff Tables",
-	EFIHCRTMEvent:              "EFI H-CRTM Event",
-}
-
-func (e EventType) String() string {
-	if s, ok := eventTypeNames[e]; ok {
-		return s
-	}
-	return fmt.Sprintf("EventType(0x%x)", uint32(e))
-}
-
-// UntrustedParseEventType returns the event type indicated by
-// the provided value.
-func UntrustedParseEventType(et uint32) (EventType, error) {
-	// "The value associated with a UEFI specific platform event type MUST be in
-	// the range between 0x80000000 and 0x800000FF, inclusive."
-	if (et < 0x80000000 || et > 0x800000FF) && et > 0x12 {
-		return EventType(0), fmt.Errorf("event type not between [0x0, 0x12] or [0x80000000, 0x800000FF]: got %#x", et)
-	}
-	if _, ok := eventTypeNames[EventType(et)]; !ok {
-		return EventType(0), fmt.Errorf("unknown event type %#x", et)
-	}
-	return EventType(et), nil
-}
-
-// efiGUID represents the EFI_GUID type.
-// See section "2.3.1 Data Types" in the specification for more information.
-// type efiGUID [16]byte
-type efiGUID struct {
-	Data1 uint32
-	Data2 uint16
-	Data3 uint16
-	Data4 [8]byte
-}
-
-func (d efiGUID) String() string {
-	var u [8]byte
-	binary.BigEndian.PutUint32(u[:4], d.Data1)
-	binary.BigEndian.PutUint16(u[4:6], d.Data2)
-	binary.BigEndian.PutUint16(u[6:8], d.Data3)
-	return fmt.Sprintf("%x-%x-%x-%x-%x", u[:4], u[4:6], u[6:8], d.Data4[:2], d.Data4[2:])
-}
-
-// UEFIVariableDataHeader represents the leading fixed-size fields
-// within UEFI_VARIABLE_DATA.
-type UEFIVariableDataHeader struct {
-	VariableName       efiGUID
-	UnicodeNameLength  uint64 // uintN
-	VariableDataLength uint64 // uintN
-}
-
-// UEFIVariableData represents the UEFI_VARIABLE_DATA structure.
-type UEFIVariableData struct {
-	Header       UEFIVariableDataHeader
-	UnicodeName  []uint16
-	VariableData []byte // []int8
-}
-
-// ParseUEFIVariableData parses the data section of an event structured as a
-// UEFI variable.
-//
-//   https://trustedcomputinggroup.org/wp-content/uploads/TCG_PCClient_Specific_Platform_Profile_for_TPM_2p0_1p04_PUBLIC.pdf#page=100
-func ParseUEFIVariableData(r io.Reader) (ret UEFIVariableData, err error) {
-	err = binary.Read(r, binary.LittleEndian, &ret.Header)
-	if err != nil {
-		return
-	}
-	if ret.Header.UnicodeNameLength > maxNameLen {
-		return UEFIVariableData{}, fmt.Errorf("unicode name too long: %d > %d", ret.Header.UnicodeNameLength, maxNameLen)
-	}
-	ret.UnicodeName = make([]uint16, ret.Header.UnicodeNameLength)
-	for i := 0; uint64(i) < ret.Header.UnicodeNameLength; i++ {
-		err = binary.Read(r, binary.LittleEndian, &ret.UnicodeName[i])
-		if err != nil {
-			return
-		}
-	}
-	if ret.Header.VariableDataLength > maxDataLen {
-		return UEFIVariableData{}, fmt.Errorf("variable data too long: %d > %d", ret.Header.VariableDataLength, maxDataLen)
-	}
-	ret.VariableData = make([]byte, ret.Header.VariableDataLength)
-	_, err = io.ReadFull(r, ret.VariableData)
-	return
-}
-
-func (v *UEFIVariableData) VarName() string {
-	return string(utf16.Decode(v.UnicodeName))
-}
-
-func (v *UEFIVariableData) SignatureData() (certs []x509.Certificate, hashes [][]byte, err error) {
-	return parseEfiSignatureList(v.VariableData)
-}
-
-// UEFIVariableAuthority describes the contents of a UEFI variable authority
-// event.
-type UEFIVariableAuthority struct {
-	Certs []x509.Certificate
-}
-
-// ParseUEFIVariableAuthority parses the data section of an event structured as
-// a UEFI variable authority.
-//
-// https://uefi.org/sites/default/files/resources/UEFI_Spec_2_8_final.pdf#page=1789
-func ParseUEFIVariableAuthority(r io.Reader) (UEFIVariableAuthority, error) {
-	v, err := ParseUEFIVariableData(r)
-	if err != nil {
-		return UEFIVariableAuthority{}, err
-	}
-	certs, err := parseEfiSignature(v.VariableData)
-	return UEFIVariableAuthority{Certs: certs}, err
-}
-
-// efiSignatureData represents the EFI_SIGNATURE_DATA type.  See section
-// "31.4.1 Signature Database" in the specification for more information.
-type efiSignatureData struct {
-	SignatureOwner efiGUID
-	SignatureData  []byte // []int8
-}
-
-// efiSignatureList represents the EFI_SIGNATURE_LIST type.
-// See section "31.4.1 Signature Database" in the specification for more
-// information.
-type efiSignatureListHeader struct {
-	SignatureType       efiGUID
-	SignatureListSize   uint32
-	SignatureHeaderSize uint32
-	SignatureSize       uint32
-}
-
-type efiSignatureList struct {
-	Header        efiSignatureListHeader
-	SignatureData []byte
-	Signatures    []byte
-}
-
-// parseEfiSignatureList parses a EFI_SIGNATURE_LIST structure.
-// The structure and related GUIDs are defined at:
-// https://uefi.org/sites/default/files/resources/UEFI_Spec_2_8_final.pdf#page=1790
-func parseEfiSignatureList(b []byte) ([]x509.Certificate, [][]byte, error) {
-	if len(b) < 28 {
-		// Being passed an empty signature list here appears to be valid
-		return nil, nil, nil
-	}
-	var signatures efiSignatureList
-	buf := bytes.NewReader(b)
-	var certificates []x509.Certificate
-	var hashes [][]byte
-
-	for buf.Len() > 0 {
-		err := binary.Read(buf, binary.LittleEndian, &signatures.Header)
-		if err != nil {
-			return nil, nil, err
-		}
-
-		if signatures.Header.SignatureHeaderSize > maxDataLen {
-			return nil, nil, fmt.Errorf("signature header too large: %d > %d", signatures.Header.SignatureHeaderSize, maxDataLen)
-		}
-		if signatures.Header.SignatureListSize > maxDataLen {
-			return nil, nil, fmt.Errorf("signature list too large: %d > %d", signatures.Header.SignatureListSize, maxDataLen)
-		}
-
-		signatureType := signatures.Header.SignatureType
-		switch signatureType {
-		case certX509SigGUID: // X509 certificate
-			for sigOffset := 0; uint32(sigOffset) < signatures.Header.SignatureListSize-28; {
-				var signature efiSignatureData
-				signature.SignatureData = make([]byte, signatures.Header.SignatureSize-16)
-				err := binary.Read(buf, binary.LittleEndian, &signature.SignatureOwner)
-				if err != nil {
-					return nil, nil, err
-				}
-				err = binary.Read(buf, binary.LittleEndian, &signature.SignatureData)
-				if err != nil {
-					return nil, nil, err
-				}
-				cert, err := x509.ParseCertificate(signature.SignatureData)
-				if err != nil {
-					return nil, nil, err
-				}
-				sigOffset += int(signatures.Header.SignatureSize)
-				certificates = append(certificates, *cert)
-			}
-		case hashSHA256SigGUID: // SHA256
-			for sigOffset := 0; uint32(sigOffset) < signatures.Header.SignatureListSize-28; {
-				var signature efiSignatureData
-				signature.SignatureData = make([]byte, signatures.Header.SignatureSize-16)
-				err := binary.Read(buf, binary.LittleEndian, &signature.SignatureOwner)
-				if err != nil {
-					return nil, nil, err
-				}
-				err = binary.Read(buf, binary.LittleEndian, &signature.SignatureData)
-				if err != nil {
-					return nil, nil, err
-				}
-				hashes = append(hashes, signature.SignatureData)
-				sigOffset += int(signatures.Header.SignatureSize)
-			}
-		case keyRSA2048SigGUID:
-			err = errors.New("unhandled RSA2048 key")
-		case certRSA2048SHA256SigGUID:
-			err = errors.New("unhandled RSA2048-SHA256 key")
-		case hashSHA1SigGUID:
-			err = errors.New("unhandled SHA1 hash")
-		case certRSA2048SHA1SigGUID:
-			err = errors.New("unhandled RSA2048-SHA1 key")
-		case hashSHA224SigGUID:
-			err = errors.New("unhandled SHA224 hash")
-		case hashSHA384SigGUID:
-			err = errors.New("unhandled SHA384 hash")
-		case hashSHA512SigGUID:
-			err = errors.New("unhandled SHA512 hash")
-		case certHashSHA256SigGUID:
-			err = errors.New("unhandled X509-SHA256 hash metadata")
-		case certHashSHA384SigGUID:
-			err = errors.New("unhandled X509-SHA384 hash metadata")
-		case certHashSHA512SigGUID:
-			err = errors.New("unhandled X509-SHA512 hash metadata")
-		default:
-			err = fmt.Errorf("unhandled signature type %s", signatureType)
-		}
-		if err != nil {
-			return nil, nil, err
-		}
-	}
-	return certificates, hashes, nil
-}
-
-// EFISignatureData represents the EFI_SIGNATURE_DATA type.
-// See section "31.4.1 Signature Database" in the specification
-// for more information.
-type EFISignatureData struct {
-	SignatureOwner efiGUID
-	SignatureData  []byte // []int8
-}
-
-func parseEfiSignature(b []byte) ([]x509.Certificate, error) {
-	var certificates []x509.Certificate
-
-	if len(b) < 16 {
-		return nil, fmt.Errorf("invalid signature: buffer smaller than header (%d < %d)", len(b), 16)
-	}
-
-	buf := bytes.NewReader(b)
-	var signature EFISignatureData
-	signature.SignatureData = make([]byte, len(b)-16)
-
-	if err := binary.Read(buf, binary.LittleEndian, &signature.SignatureOwner); err != nil {
-		return certificates, err
-	}
-	if err := binary.Read(buf, binary.LittleEndian, &signature.SignatureData); err != nil {
-		return certificates, err
-	}
-
-	cert, err := x509.ParseCertificate(signature.SignatureData)
-	if err == nil {
-		certificates = append(certificates, *cert)
-	} else {
-		// A bug in shim may cause an event to be missing the SignatureOwner GUID.
-		// We handle this, but signal back to the caller using ErrSigMissingGUID.
-		var structuralError asn1.StructuralError
-		if errors.As(err, &structuralError) {
-			var err2 error
-			cert, err2 = x509.ParseCertificate(b)
-			if err2 == nil {
-				certificates = append(certificates, *cert)
-				err = ErrSigMissingGUID
-			}
-		}
-	}
-	return certificates, err
-}
diff --git a/metropolis/pkg/tpm/eventlog/secureboot.go b/metropolis/pkg/tpm/eventlog/secureboot.go
deleted file mode 100644
index 090be5c..0000000
--- a/metropolis/pkg/tpm/eventlog/secureboot.go
+++ /dev/null
@@ -1,210 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Taken and pruned from go-attestation under Apache 2.0
-package eventlog
-
-import (
-	"bytes"
-	"errors"
-	"fmt"
-
-	"github.com/google/certificate-transparency-go/x509"
-
-	"source.monogon.dev/metropolis/pkg/tpm/eventlog/internal"
-)
-
-// SecurebootState describes the secure boot status of a machine, as determined
-// by processing its event log.
-type SecurebootState struct {
-	Enabled bool
-
-	// PlatformKeys enumerates keys which can sign a key exchange key.
-	PlatformKeys []x509.Certificate
-	// PlatformKeys enumerates key hashes which can sign a key exchange key.
-	PlatformKeyHashes [][]byte
-
-	// ExchangeKeys enumerates keys which can sign a database of permitted or
-	// forbidden keys.
-	ExchangeKeys []x509.Certificate
-	// ExchangeKeyHashes enumerates key hashes which can sign a database or
-	// permitted or forbidden keys.
-	ExchangeKeyHashes [][]byte
-
-	// PermittedKeys enumerates keys which may sign binaries to run.
-	PermittedKeys []x509.Certificate
-	// PermittedHashes enumerates hashes which permit binaries to run.
-	PermittedHashes [][]byte
-
-	// ForbiddenKeys enumerates keys which must not permit a binary to run.
-	ForbiddenKeys []x509.Certificate
-	// ForbiddenKeys enumerates hashes which must not permit a binary to run.
-	ForbiddenHashes [][]byte
-
-	// PreSeparatorAuthority describes the use of a secure-boot key to authorize
-	// the execution of a binary before the separator.
-	PreSeparatorAuthority []x509.Certificate
-	// PostSeparatorAuthority describes the use of a secure-boot key to authorize
-	// the execution of a binary after the separator.
-	PostSeparatorAuthority []x509.Certificate
-}
-
-// ParseSecurebootState parses a series of events to determine the
-// configuration of secure boot on a device. An error is returned if
-// the state cannot be determined, or if the event log is structured
-// in such a way that it may have been tampered post-execution of
-// platform firmware.
-func ParseSecurebootState(events []Event) (*SecurebootState, error) {
-	// This algorithm verifies the following:
-	// - All events in PCR 7 have event types which are expected in PCR 7.
-	// - All events are parsable according to their event type.
-	// - All events have digests values corresponding to their data/event type.
-	// - No unverifiable events were present.
-	// - All variables are specified before the separator and never duplicated.
-	// - The SecureBoot variable has a value of 0 or 1.
-	// - If SecureBoot was 1 (enabled), authority events were present indicating
-	//   keys were used to perform verification.
-	// - If SecureBoot was 1 (enabled), platform + exchange + database keys
-	//   were specified.
-	// - No UEFI debugger was attached.
-
-	var (
-		out           SecurebootState
-		seenSeparator bool
-		seenAuthority bool
-		seenVars      = map[string]bool{}
-	)
-
-	for _, e := range events {
-		if e.Index != 7 {
-			continue
-		}
-
-		et, err := internal.UntrustedParseEventType(uint32(e.Type))
-		if err != nil {
-			return nil, fmt.Errorf("unrecognised event type: %v", err)
-		}
-
-		digestVerify := e.digestEquals(e.Data)
-		switch et {
-		case internal.Separator:
-			if seenSeparator {
-				return nil, fmt.Errorf("duplicate separator at event %d", e.sequence)
-			}
-			seenSeparator = true
-			if !bytes.Equal(e.Data, []byte{0, 0, 0, 0}) {
-				return nil, fmt.Errorf("invalid separator data at event %d: %v", e.sequence, e.Data)
-			}
-			if digestVerify != nil {
-				return nil, fmt.Errorf("invalid separator digest at event %d: %v", e.sequence, digestVerify)
-			}
-
-		case internal.EFIAction:
-			if string(e.Data) == "UEFI Debug Mode" {
-				return nil, errors.New("a UEFI debugger was present during boot")
-			}
-			return nil, fmt.Errorf("event %d: unexpected EFI action event", e.sequence)
-
-		case internal.EFIVariableDriverConfig:
-			v, err := internal.ParseUEFIVariableData(bytes.NewReader(e.Data))
-			if err != nil {
-				return nil, fmt.Errorf("failed parsing EFI variable at event %d: %v", e.sequence, err)
-			}
-			if _, seenBefore := seenVars[v.VarName()]; seenBefore {
-				return nil, fmt.Errorf("duplicate EFI variable %q at event %d", v.VarName(), e.sequence)
-			}
-			seenVars[v.VarName()] = true
-			if seenSeparator {
-				return nil, fmt.Errorf("event %d: variable %q specified after separator", e.sequence, v.VarName())
-			}
-
-			if digestVerify != nil {
-				return nil, fmt.Errorf("invalid digest for variable %q on event %d: %v", v.VarName(), e.sequence, digestVerify)
-			}
-
-			switch v.VarName() {
-			case "SecureBoot":
-				if len(v.VariableData) != 1 {
-					return nil, fmt.Errorf("event %d: SecureBoot data len is %d, expected 1", e.sequence, len(v.VariableData))
-				}
-				out.Enabled = v.VariableData[0] == 1
-			case "PK":
-				if out.PlatformKeys, out.PlatformKeyHashes, err = v.SignatureData(); err != nil {
-					return nil, fmt.Errorf("event %d: failed parsing platform keys: %v", e.sequence, err)
-				}
-			case "KEK":
-				if out.ExchangeKeys, out.ExchangeKeyHashes, err = v.SignatureData(); err != nil {
-					return nil, fmt.Errorf("event %d: failed parsing key exchange keys: %v", e.sequence, err)
-				}
-			case "db":
-				if out.PermittedKeys, out.PermittedHashes, err = v.SignatureData(); err != nil {
-					return nil, fmt.Errorf("event %d: failed parsing signature database: %v", e.sequence, err)
-				}
-			case "dbx":
-				if out.ForbiddenKeys, out.ForbiddenHashes, err = v.SignatureData(); err != nil {
-					return nil, fmt.Errorf("event %d: failed parsing forbidden signature database: %v", e.sequence, err)
-				}
-			}
-
-		case internal.EFIVariableAuthority:
-			a, err := internal.ParseUEFIVariableAuthority(bytes.NewReader(e.Data))
-			if err != nil {
-				// Workaround for: https://github.com/google/go-attestation/issues/157
-				if errors.Is(err, internal.ErrSigMissingGUID) {
-					// Versions of shim which do not carry
-					// https://github.com/rhboot/shim/commit/8a27a4809a6a2b40fb6a4049071bf96d6ad71b50
-					// have an erroneous additional byte in the event, which breaks digest
-					// verification. If verification failed, we try removing the last byte.
-					if digestVerify != nil {
-						digestVerify = e.digestEquals(e.Data[:len(e.Data)-1])
-					}
-				} else {
-					return nil, fmt.Errorf("failed parsing EFI variable authority at event %d: %v", e.sequence, err)
-				}
-			}
-			seenAuthority = true
-			if digestVerify != nil {
-				return nil, fmt.Errorf("invalid digest for authority on event %d: %v", e.sequence, digestVerify)
-			}
-			if !seenSeparator {
-				out.PreSeparatorAuthority = append(out.PreSeparatorAuthority, a.Certs...)
-			} else {
-				out.PostSeparatorAuthority = append(out.PostSeparatorAuthority, a.Certs...)
-			}
-
-		default:
-			return nil, fmt.Errorf("unexpected event type: %v", et)
-		}
-	}
-
-	if !out.Enabled {
-		return &out, nil
-	}
-
-	if !seenAuthority {
-		return nil, errors.New("secure boot was enabled but no key was used")
-	}
-	if len(out.PlatformKeys) == 0 && len(out.PlatformKeyHashes) == 0 {
-		return nil, errors.New("secure boot was enabled but no platform keys were known")
-	}
-	if len(out.ExchangeKeys) == 0 && len(out.ExchangeKeyHashes) == 0 {
-		return nil, errors.New("secure boot was enabled but no key exchange keys were known")
-	}
-	if len(out.PermittedKeys) == 0 && len(out.PermittedHashes) == 0 {
-		return nil, errors.New("secure boot was enabled but no keys or hashes were permitted")
-	}
-	return &out, nil
-}
diff --git a/metropolis/pkg/tpm/proto/BUILD.bazel b/metropolis/pkg/tpm/proto/BUILD.bazel
deleted file mode 100644
index 81c42e6..0000000
--- a/metropolis/pkg/tpm/proto/BUILD.bazel
+++ /dev/null
@@ -1,25 +0,0 @@
-load("@rules_proto//proto:defs.bzl", "proto_library")
-load("@io_bazel_rules_go//go:def.bzl", "go_library")
-load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
-
-proto_library(
-    name = "proto_proto",
-    srcs = ["tpm.proto"],
-    visibility = ["//visibility:public"],
-    deps = ["@com_github_google_go_tpm_tools//proto/tpm:tpm_proto"],  #keep
-)
-
-go_proto_library(
-    name = "proto_go_proto",
-    importpath = "source.monogon.dev/metropolis/pkg/tpm/proto",
-    proto = ":proto_proto",
-    visibility = ["//visibility:public"],
-    deps = ["@com_github_google_go_tpm_tools//proto/tpm"],  #keep
-)
-
-go_library(
-    name = "proto",
-    embed = [":proto_go_proto"],
-    importpath = "source.monogon.dev/metropolis/pkg/tpm/proto",
-    visibility = ["//visibility:public"],
-)
diff --git a/metropolis/pkg/tpm/proto/gomod-generated-placeholder.go b/metropolis/pkg/tpm/proto/gomod-generated-placeholder.go
deleted file mode 100644
index 92256db..0000000
--- a/metropolis/pkg/tpm/proto/gomod-generated-placeholder.go
+++ /dev/null
@@ -1 +0,0 @@
-package proto
diff --git a/metropolis/pkg/tpm/proto/tpm.proto b/metropolis/pkg/tpm/proto/tpm.proto
deleted file mode 100644
index 9f86291..0000000
--- a/metropolis/pkg/tpm/proto/tpm.proto
+++ /dev/null
@@ -1,16 +0,0 @@
-syntax = "proto3";
-option go_package = "source.monogon.dev/metropolis/pkg/tpm/proto";
-package metropolis.pkg.tpm;
-
-import "proto/tpm/tpm.proto";
-
-// ExtendedSealedBytes contains data sealed by a TPM2 via an indirection to
-// allow for more than 128 bytes of payload. It seals an ephemeral key for
-// a nacl secretbox in the TPM and stores the encrypted box next to the sealed
-// key.
-message ExtendedSealedBytes {
-  // The secretbox key, as sealed by the TPM.
-  .tpm.SealedBytes sealed_key = 1;
-  // The encrypted box contents.
-  bytes encrypted_payload = 2;
-}
\ No newline at end of file
diff --git a/metropolis/pkg/tpm/tpm.go b/metropolis/pkg/tpm/tpm.go
deleted file mode 100644
index 13a3e59..0000000
--- a/metropolis/pkg/tpm/tpm.go
+++ /dev/null
@@ -1,631 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tpm
-
-import (
-	"bytes"
-	"crypto"
-	"crypto/rand"
-	"crypto/rsa"
-	"crypto/x509"
-	"errors"
-	"fmt"
-	"io"
-	"os"
-	"path/filepath"
-	"strconv"
-	"strings"
-	"sync"
-	"time"
-
-	tpm2tools "github.com/google/go-tpm-tools/client"
-	"github.com/google/go-tpm/tpm2"
-	"github.com/google/go-tpm/tpmutil"
-	"golang.org/x/crypto/nacl/secretbox"
-	"golang.org/x/sys/unix"
-	"google.golang.org/protobuf/proto"
-
-	tpmpb "source.monogon.dev/metropolis/pkg/tpm/proto"
-
-	"source.monogon.dev/metropolis/pkg/logtree"
-	"source.monogon.dev/metropolis/pkg/sysfs"
-)
-
-var (
-	// SecureBootPCRs are all PCRs that measure the current Secure Boot
-	// configuration.  This is what we want if we rely on secure boot to verify
-	// boot integrity. The firmware hashes the secure boot policy and custom
-	// keys into the PCR.
-	//
-	// This requires an extra step that provisions the custom keys.
-	//
-	// Some background: https://mjg59.dreamwidth.org/48897.html?thread=1847297
-	// (the initramfs issue mentioned in the article has been solved by
-	// integrating it into the kernel binary, and we don't have a shim
-	// bootloader)
-	//
-	// PCR7 alone is not sufficient - it needs to be combined with firmware
-	// measurements.
-	SecureBootPCRs = []int{7}
-
-	// FirmwarePCRs are alle PCRs that contain the firmware measurements. See:
-	//   https://trustedcomputinggroup.org/wp-content/uploads/TCG_EFI_Platform_1_22_Final_-v15.pdf
-	FirmwarePCRs = []int{
-		0, // platform firmware
-		2, // option ROM code
-		3, // option ROM configuration and data
-	}
-
-	// FullSystemPCRs are all PCRs that contain any measurements up to the
-	// currently running EFI payload.
-	FullSystemPCRs = []int{
-		0, // platform firmware
-		1, // host platform configuration
-		2, // option ROM code
-		3, // option ROM configuration and data
-		4, // EFI payload
-	}
-
-	// Using FullSystemPCRs is the most secure, but also the most brittle
-	// option since updating the EFI binary, updating the platform firmware,
-	// changing platform settings or updating the binary would invalidate the
-	// sealed data. It's annoying (but possible) to predict values for PCR4,
-	// and even more annoying for the firmware PCR (comparison to known values
-	// on similar hardware is the only thing that comes to mind).
-	//
-	// See also: https://github.com/mxre/sealkey (generates PCR4 from EFI
-	// image, BSD license)
-	//
-	// Using only SecureBootPCRs is the easiest and still reasonably secure, if
-	// we assume that the platform knows how to take care of itself (i.e. Intel
-	// Boot Guard), and that secure boot is implemented properly. It is,
-	// however, a much larger amount of code we need to trust.
-	//
-	// We do not care about PCR 5 (GPT partition table) since modifying it is
-	// harmless. All of the boot options and cmdline are hardcoded in the
-	// kernel image, and we use no bootloader, so there's no PCR for bootloader
-	// configuration or kernel cmdline.
-)
-
-var (
-	numSRTMPCRs = 16
-	srtmPCRs    = tpm2.PCRSelection{Hash: tpm2.AlgSHA256, PCRs: []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}}
-	// TCG Trusted Platform Module Library Level 00 Revision 0.99 Table 6
-	tpmGeneratedValue = uint32(0xff544347)
-)
-
-var (
-	// ErrNotExists is returned when no TPMs are available in the system
-	ErrNotExists = errors.New("no TPMs found")
-	// ErrNotInitialized is returned when this package was not initialized
-	// successfully
-	ErrNotInitialized = errors.New("no TPM was initialized")
-)
-
-// Singleton since the TPM is too
-var tpm *TPM
-
-// We're serializing all TPM operations since it has a limited number of
-// handles and recovering if it runs out is difficult to implement correctly.
-// Might also be marginally more secure.
-var lock sync.Mutex
-
-// TPM represents a high-level interface to a connected TPM 2.0
-type TPM struct {
-	logger logtree.LeveledLogger
-	device io.ReadWriteCloser
-
-	// We keep the AK loaded since it's used fairly often and deriving it is
-	// expensive
-	akHandleCache tpmutil.Handle
-	akPublicKey   crypto.PublicKey
-}
-
-// Initialize finds and opens the TPM (if any). If there is no TPM available it
-// returns ErrNotExists
-func Initialize(logger logtree.LeveledLogger) error {
-	lock.Lock()
-	defer lock.Unlock()
-	tpmDir, err := os.Open("/sys/class/tpm")
-	if err != nil {
-		return fmt.Errorf("failed to open sysfs TPM class: %w", err)
-	}
-	defer tpmDir.Close()
-
-	tpms, err := tpmDir.Readdirnames(2)
-	if err != nil {
-		return fmt.Errorf("failed to read TPM device class: %w", err)
-	}
-
-	if len(tpms) == 0 {
-		return ErrNotExists
-	}
-	if len(tpms) > 1 {
-		// If this is changed GetMeasurementLog() needs to be updated too
-		logger.Warningf("Found more than one TPM, using the first one")
-	}
-	tpmName := tpms[0]
-	ueventData, err := sysfs.ReadUevents(filepath.Join("/sys/class/tpm", tpmName, "uevent"))
-	if err != nil {
-		return fmt.Errorf("failed to read uevents: %w", err)
-	}
-	majorDev, err := strconv.Atoi(ueventData["MAJOR"])
-	if err != nil {
-		return fmt.Errorf("failed to convert uevent: %w", err)
-	}
-	minorDev, err := strconv.Atoi(ueventData["MINOR"])
-	if err != nil {
-		return fmt.Errorf("failed to convert uevent: %w", err)
-	}
-	if err := unix.Mknod("/dev/tpm", 0600|unix.S_IFCHR, int(unix.Mkdev(uint32(majorDev), uint32(minorDev)))); err != nil {
-		return fmt.Errorf("failed to create TPM device node: %w", err)
-	}
-	device, err := tpm2.OpenTPM("/dev/tpm")
-	if err != nil {
-		return fmt.Errorf("failed to open TPM: %w", err)
-	}
-	tpm = &TPM{
-		device: device,
-		logger: logger,
-	}
-	return nil
-}
-
-// IsInitialized returns true if Initialize was called an at least one
-// TPM 2.0 was found and initialized. Otherwise it returns false.
-func IsInitialized() bool {
-	lock.Lock()
-	defer lock.Unlock()
-	return tpm != nil
-}
-
-// GenerateSafeKey uses two sources of randomness (Kernel & TPM) to generate
-// the key
-func GenerateSafeKey(size uint16) ([]byte, error) {
-	lock.Lock()
-	defer lock.Unlock()
-	if tpm == nil {
-		return nil, ErrNotInitialized
-	}
-	encryptionKeyHost := make([]byte, size)
-	if _, err := io.ReadFull(rand.Reader, encryptionKeyHost); err != nil {
-		return nil, fmt.Errorf("failed to generate host portion of new key: %w", err)
-	}
-	var encryptionKeyTPM []byte
-	for i := 48; i > 0; i-- {
-		tpmKeyPart, err := tpm2.GetRandom(tpm.device, size-uint16(len(encryptionKeyTPM)))
-		if err != nil {
-			return nil, fmt.Errorf("failed to generate TPM portion of new key: %w", err)
-		}
-		encryptionKeyTPM = append(encryptionKeyTPM, tpmKeyPart...)
-		if len(encryptionKeyTPM) >= int(size) {
-			break
-		}
-	}
-
-	if len(encryptionKeyTPM) != int(size) {
-		return nil, fmt.Errorf("got incorrect amount of TPM randomess: %v, requested %v", len(encryptionKeyTPM), size)
-	}
-
-	encryptionKey := make([]byte, size)
-	for i := uint16(0); i < size; i++ {
-		encryptionKey[i] = encryptionKeyHost[i] ^ encryptionKeyTPM[i]
-	}
-	return encryptionKey, nil
-}
-
-// Seal seals sensitive data and only allows access if the current platform
-// configuration in matches the one the data was sealed on.
-func Seal(data []byte, pcrs []int) ([]byte, error) {
-	// Generate a key and use secretbox to encrypt and authenticate the actual
-	// payload as go-tpm2 uses a raw seal operation limiting payload size to
-	// 128 bytes which is insufficient.
-	boxKey, err := GenerateSafeKey(32)
-	if err != nil {
-		return nil, fmt.Errorf("failed to generate boxKey: %w", err)
-	}
-	lock.Lock()
-	defer lock.Unlock()
-	srk, err := tpm2tools.StorageRootKeyRSA(tpm.device)
-	if err != nil {
-		return nil, fmt.Errorf("failed to load TPM SRK: %w", err)
-	}
-	defer srk.Close()
-	var boxKeyArr [32]byte
-	copy(boxKeyArr[:], boxKey)
-	// Nonce is not used as we're generating a new boxKey for every operation,
-	// therefore we can just leave it all-zero.
-	var unusedNonce [24]byte
-	encryptedData := secretbox.Seal(nil, data, &unusedNonce, &boxKeyArr)
-	sealedKey, err := srk.Seal(boxKey, tpm2tools.SealOpts{Current: tpm2.PCRSelection{Hash: tpm2.AlgSHA256, PCRs: pcrs}})
-	if err != nil {
-		return nil, fmt.Errorf("failed to seal boxKey: %w", err)
-	}
-	sealedBytes := tpmpb.ExtendedSealedBytes{
-		SealedKey:        sealedKey,
-		EncryptedPayload: encryptedData,
-	}
-	rawSealedBytes, err := proto.Marshal(&sealedBytes)
-	if err != nil {
-		return nil, fmt.Errorf("failed to marshal sealed data: %w", err)
-	}
-	return rawSealedBytes, nil
-}
-
-// Unseal unseals sensitive data if the current platform configuration allows
-// and sealing constraints allow it.
-func Unseal(data []byte) ([]byte, error) {
-	lock.Lock()
-	defer lock.Unlock()
-	if tpm == nil {
-		return nil, ErrNotInitialized
-	}
-	srk, err := tpm2tools.StorageRootKeyRSA(tpm.device)
-	if err != nil {
-		return nil, fmt.Errorf("failed to load TPM SRK: %w", err)
-	}
-	defer srk.Close()
-
-	var sealedBytes tpmpb.ExtendedSealedBytes
-	if err := proto.Unmarshal(data, &sealedBytes); err != nil {
-		return nil, fmt.Errorf("failed to unmarshal sealed data: %w", err)
-	}
-	if sealedBytes.SealedKey == nil {
-		return nil, fmt.Errorf("sealed data structure is invalid: no sealed key")
-	}
-	// Logging this for auditing purposes
-	var pcrList []string
-	for _, pcr := range sealedBytes.SealedKey.Pcrs {
-		pcrList = append(pcrList, strconv.FormatUint(uint64(pcr), 10))
-	}
-	tpm.logger.Infof("Attempting to unseal key protected with PCRs %s", strings.Join(pcrList, ","))
-	unsealedKey, err := srk.Unseal(sealedBytes.SealedKey, tpm2tools.UnsealOpts{})
-	if err != nil {
-		return nil, fmt.Errorf("failed to unseal key: %w", err)
-	}
-	var key [32]byte
-	if len(unsealedKey) != len(key) {
-		return nil, fmt.Errorf("unsealed key has wrong length: expected %v bytes, got %v", len(key), len(unsealedKey))
-	}
-	copy(key[:], unsealedKey)
-	var unusedNonce [24]byte
-	payload, ok := secretbox.Open(nil, sealedBytes.EncryptedPayload, &unusedNonce, &key)
-	if !ok {
-		return nil, errors.New("payload box cannot be opened")
-	}
-	return payload, nil
-}
-
-// Standard AK template for RSA2048 non-duplicatable restricted signing for
-// attestation
-var akTemplate = tpm2.Public{
-	Type:       tpm2.AlgRSA,
-	NameAlg:    tpm2.AlgSHA256,
-	Attributes: tpm2.FlagSignerDefault,
-	RSAParameters: &tpm2.RSAParams{
-		Sign: &tpm2.SigScheme{
-			Alg:  tpm2.AlgRSASSA,
-			Hash: tpm2.AlgSHA256,
-		},
-		KeyBits: 2048,
-	},
-}
-
-func loadAK() error {
-	var err error
-	// Rationale: The AK is an EK-equivalent key and used only for attestation.
-	// Using a non-primary key here would require us to store the wrapped
-	// version somewhere, which is inconvenient.  This being a primary key in
-	// the Endorsement hierarchy means that it can always be recreated and can
-	// never be "destroyed". Under our security model this is of no concern
-	// since we identify a node by its IK (Identity Key) which we can destroy.
-	tpm.akHandleCache, tpm.akPublicKey, err = tpm2.CreatePrimary(tpm.device, tpm2.HandleEndorsement,
-		tpm2.PCRSelection{}, "", "", akTemplate)
-	return err
-}
-
-// Process documented in TCG EK Credential Profile 2.2.1
-func loadEK() (tpmutil.Handle, crypto.PublicKey, error) {
-	// The EK is a primary key which is supposed to be certified by the
-	// manufacturer of the TPM.  Its public attributes are standardized in TCG
-	// EK Credential Profile 2.0 Table 1. These need to match exactly or we
-	// aren't getting the key the manufacturere signed. tpm2tools contains such
-	// a template already, so we're using that instead of redoing it ourselves.
-	// This ignores the more complicated ways EKs can be specified, the
-	// additional stuff you can do is just absolutely crazy (see 2.2.1.2
-	// onward)
-	return tpm2.CreatePrimary(tpm.device, tpm2.HandleEndorsement,
-		tpm2.PCRSelection{}, "", "", tpm2tools.DefaultEKTemplateRSA())
-}
-
-// GetAKPublic gets the TPM2T_PUBLIC of the AK key
-func GetAKPublic() ([]byte, error) {
-	lock.Lock()
-	defer lock.Unlock()
-	if tpm == nil {
-		return nil, ErrNotInitialized
-	}
-	if tpm.akHandleCache == tpmutil.Handle(0) {
-		if err := loadAK(); err != nil {
-			return nil, fmt.Errorf("failed to load AK primary key: %w", err)
-		}
-	}
-	public, _, _, err := tpm2.ReadPublic(tpm.device, tpm.akHandleCache)
-	if err != nil {
-		return nil, err
-	}
-	return public.Encode()
-}
-
-// TCG TPM v2.0 Provisioning Guidance v1.0 7.8 Table 2 and TCG EK Credential
-// Profile v2.1 2.2.1.4 de-facto Standard for Windows These are both
-// non-normative and reference Windows 10 documentation that's no longer
-// available :( But in practice this is what people are using, so if it's
-// normative or not doesn't really matter
-const ekCertHandle = 0x01c00002
-
-// GetEKPublic gets the public key and (if available) Certificate of the EK
-func GetEKPublic() ([]byte, []byte, error) {
-	lock.Lock()
-	defer lock.Unlock()
-	if tpm == nil {
-		return nil, []byte{}, ErrNotInitialized
-	}
-	ekHandle, publicRaw, err := loadEK()
-	if err != nil {
-		return nil, []byte{}, fmt.Errorf("failed to load EK primary key: %w", err)
-	}
-	defer tpm2.FlushContext(tpm.device, ekHandle)
-	// Don't question the use of HandleOwner, that's the Standard™
-	ekCertRaw, err := tpm2.NVReadEx(tpm.device, ekCertHandle, tpm2.HandleOwner, "", 0)
-	if err != nil {
-		return nil, []byte{}, err
-	}
-
-	publicKey, err := x509.MarshalPKIXPublicKey(publicRaw)
-	if err != nil {
-		return nil, []byte{}, err
-	}
-
-	return publicKey, ekCertRaw, nil
-}
-
-// MakeAKChallenge generates a challenge for TPM residency and attributes of
-// the AK
-func MakeAKChallenge(ekPubKey, akPub []byte, nonce []byte) ([]byte, []byte, error) {
-	ekPubKeyData, err := x509.ParsePKIXPublicKey(ekPubKey)
-	if err != nil {
-		return nil, []byte{}, fmt.Errorf("failed to decode EK pubkey: %w", err)
-	}
-	akPubData, err := tpm2.DecodePublic(akPub)
-	if err != nil {
-		return nil, []byte{}, fmt.Errorf("failed to decode AK public part: %w", err)
-	}
-	// Make sure we're attesting the right attributes (in particular Restricted)
-	if !akPubData.MatchesTemplate(akTemplate) {
-		return nil, []byte{}, errors.New("the key being challenged is not a valid AK")
-	}
-	akName, err := akPubData.Name()
-	if err != nil {
-		return nil, []byte{}, fmt.Errorf("failed to derive AK name: %w", err)
-	}
-	return generateRSA(akName.Digest, ekPubKeyData.(*rsa.PublicKey), 16, nonce, rand.Reader)
-}
-
-// SolveAKChallenge solves a challenge for TPM residency of the AK
-func SolveAKChallenge(credBlob, secretChallenge []byte) ([]byte, error) {
-	lock.Lock()
-	defer lock.Unlock()
-	if tpm == nil {
-		return nil, ErrNotInitialized
-	}
-	if tpm.akHandleCache == tpmutil.Handle(0) {
-		if err := loadAK(); err != nil {
-			return nil, fmt.Errorf("failed to load AK primary key: %w", err)
-		}
-	}
-
-	ekHandle, _, err := loadEK()
-	if err != nil {
-		return nil, fmt.Errorf("failed to load EK: %w", err)
-	}
-	defer tpm2.FlushContext(tpm.device, ekHandle)
-
-	// This is necessary since the EK requires an endorsement handle policy in
-	// its session.  For us this is stupid because we keep all hierarchies open
-	// anyways since a) we cannot safely store secrets on the OS side
-	// pre-global unlock and b) it makes no sense in this security model since
-	// an uncompromised host OS will not let an untrusted entity attest as
-	// itself and a compromised OS can either not pass PCR policy checks or the
-	// game's already over (you successfully runtime-exploited a production
-	// Metropolis node).
-	endorsementSession, _, err := tpm2.StartAuthSession(
-		tpm.device,
-		tpm2.HandleNull,
-		tpm2.HandleNull,
-		make([]byte, 16),
-		nil,
-		tpm2.SessionPolicy,
-		tpm2.AlgNull,
-		tpm2.AlgSHA256)
-	if err != nil {
-		panic(err)
-	}
-	defer tpm2.FlushContext(tpm.device, endorsementSession)
-
-	_, _, err = tpm2.PolicySecret(tpm.device, tpm2.HandleEndorsement, tpm2.AuthCommand{Session: tpm2.HandlePasswordSession, Attributes: tpm2.AttrContinueSession}, endorsementSession, nil, nil, nil, 0)
-	if err != nil {
-		return nil, fmt.Errorf("failed to make a policy secret session: %w", err)
-	}
-
-	for {
-		solution, err := tpm2.ActivateCredentialUsingAuth(tpm.device, []tpm2.AuthCommand{
-			// Use standard no-password authenatication
-			{Session: tpm2.HandlePasswordSession, Attributes: tpm2.AttrContinueSession},
-			// Use a full policy session for the EK
-			{Session: endorsementSession, Attributes: tpm2.AttrContinueSession},
-		}, tpm.akHandleCache, ekHandle, credBlob, secretChallenge)
-		var warn tpm2.Warning
-		if errors.As(err, &warn) && warn.Code == tpm2.RCRetry {
-			time.Sleep(100 * time.Millisecond)
-			continue
-		}
-		return solution, err
-	}
-}
-
-// FlushTransientHandles flushes all sessions and non-persistent handles
-func FlushTransientHandles() error {
-	lock.Lock()
-	defer lock.Unlock()
-	if tpm == nil {
-		return ErrNotInitialized
-	}
-	flushHandleTypes := []tpm2.HandleType{tpm2.HandleTypeTransient, tpm2.HandleTypeLoadedSession, tpm2.HandleTypeSavedSession}
-	for _, handleType := range flushHandleTypes {
-		handles, err := tpm2tools.Handles(tpm.device, handleType)
-		if err != nil {
-			return err
-		}
-		for _, handle := range handles {
-			if err := tpm2.FlushContext(tpm.device, handle); err != nil {
-				return err
-			}
-		}
-	}
-	return nil
-}
-
-// AttestPlatform performs a PCR quote using the AK and returns the quote and
-// its signature
-func AttestPlatform(nonce []byte) ([]byte, []byte, error) {
-	lock.Lock()
-	defer lock.Unlock()
-	if tpm == nil {
-		return nil, []byte{}, ErrNotInitialized
-	}
-	if tpm.akHandleCache == tpmutil.Handle(0) {
-		if err := loadAK(); err != nil {
-			return nil, []byte{}, fmt.Errorf("failed to load AK primary key: %w", err)
-		}
-	}
-	// We only care about SHA256 since SHA1 is weak. This is supported on at
-	// least GCE and Intel / AMD fTPM, which is good enough for now. Alg is
-	// null because that would just hash the nonce, which is dumb.
-	quote, signature, err := tpm2.Quote(tpm.device, tpm.akHandleCache, "", "", nonce, srtmPCRs,
-		tpm2.AlgNull)
-	if err != nil {
-		return nil, []byte{}, fmt.Errorf("failed to quote PCRs: %w", err)
-	}
-	return quote, signature.RSA.Signature, err
-}
-
-// VerifyAttestPlatform verifies a given attestation. You can rely on all data
-// coming back as being from the TPM on which the AK is bound to.
-func VerifyAttestPlatform(nonce, akPub, quote, signature []byte) (*tpm2.AttestationData, error) {
-	hash := crypto.SHA256.New()
-	hash.Write(quote)
-
-	akPubData, err := tpm2.DecodePublic(akPub)
-	if err != nil {
-		return nil, fmt.Errorf("invalid AK: %w", err)
-	}
-	akPublicKey, err := akPubData.Key()
-	if err != nil {
-		return nil, fmt.Errorf("invalid AK: %w", err)
-	}
-	akRSAKey, ok := akPublicKey.(*rsa.PublicKey)
-	if !ok {
-		return nil, errors.New("invalid AK: invalid key type")
-	}
-
-	if err := rsa.VerifyPKCS1v15(akRSAKey, crypto.SHA256, hash.Sum(nil), signature); err != nil {
-		return nil, err
-	}
-
-	quoteData, err := tpm2.DecodeAttestationData(quote)
-	if err != nil {
-		return nil, err
-	}
-	// quoteData.Magic works together with the TPM's Restricted key attribute.
-	// If this attribute is set (which it needs to be for the AK to be
-	// considered valid) the TPM will not sign external data having this prefix
-	// with such a key. Only data that originates inside the TPM like quotes
-	// and key certifications can have this prefix and sill be signed by a
-	// restricted key. This check is thus vital, otherwise somebody can just
-	// feed the TPM an arbitrary attestation to sign with its AK and this
-	// function will happily accept the forged attestation.
-	if quoteData.Magic != tpmGeneratedValue {
-		return nil, errors.New("invalid TPM quote: data marker for internal data not set - forged attestation")
-	}
-	if quoteData.Type != tpm2.TagAttestQuote {
-		return nil, errors.New("invalid TPM qoute: not a TPM quote")
-	}
-	if !bytes.Equal(quoteData.ExtraData, nonce) {
-		return nil, errors.New("invalid TPM quote: wrong nonce")
-	}
-
-	return quoteData, nil
-}
-
-// GetPCRs returns all SRTM PCRs in-order
-func GetPCRs() ([][]byte, error) {
-	lock.Lock()
-	defer lock.Unlock()
-	if tpm == nil {
-		return nil, ErrNotInitialized
-	}
-	pcrs := make([][]byte, numSRTMPCRs)
-
-	// The TPM can (and most do) return partial results. Let's just retry as
-	// many times as we have PCRs since each read should return at least one
-	// PCR.
-readLoop:
-	for i := 0; i < numSRTMPCRs; i++ {
-		sel := tpm2.PCRSelection{Hash: tpm2.AlgSHA256}
-		for pcrN := 0; pcrN < numSRTMPCRs; pcrN++ {
-			if len(pcrs[pcrN]) == 0 {
-				sel.PCRs = append(sel.PCRs, pcrN)
-			}
-		}
-
-		readPCRs, err := tpm2.ReadPCRs(tpm.device, sel)
-		if err != nil {
-			return nil, fmt.Errorf("failed to read PCRs: %w", err)
-		}
-
-		for pcrN, pcr := range readPCRs {
-			pcrs[pcrN] = pcr
-		}
-		for _, pcr := range pcrs {
-			// If at least one PCR is still not read, continue
-			if len(pcr) == 0 {
-				continue readLoop
-			}
-		}
-		break
-	}
-
-	return pcrs, nil
-}
-
-// GetMeasurementLog returns the binary log of all data hashed into PCRs. The
-// result can be parsed by eventlog.  As this library currently doesn't support
-// extending PCRs it just returns the log as supplied by the EFI interface.
-func GetMeasurementLog() ([]byte, error) {
-	return os.ReadFile("/sys/kernel/security/tpm0/binary_bios_measurements")
-}
diff --git a/metropolis/pkg/verity/BUILD.bazel b/metropolis/pkg/verity/BUILD.bazel
deleted file mode 100644
index 7e0e465..0000000
--- a/metropolis/pkg/verity/BUILD.bazel
+++ /dev/null
@@ -1,25 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
-load("//metropolis/test/ktest:ktest.bzl", "ktest")
-
-go_library(
-    name = "verity",
-    srcs = ["encoder.go"],
-    importpath = "source.monogon.dev/metropolis/pkg/verity",
-    visibility = ["//visibility:public"],
-)
-
-go_test(
-    name = "verity_test",
-    srcs = ["encoder_test.go"],
-    embed = [":verity"],
-    deps = [
-        "//metropolis/pkg/devicemapper",
-        "@com_github_stretchr_testify//require",
-        "@org_golang_x_sys//unix",
-    ],
-)
-
-ktest(
-    cmdline = "ramdisk_size=16384",
-    tester = ":verity_test",
-)
diff --git a/metropolis/pkg/verity/encoder.go b/metropolis/pkg/verity/encoder.go
deleted file mode 100644
index 1b7913b..0000000
--- a/metropolis/pkg/verity/encoder.go
+++ /dev/null
@@ -1,546 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// This package implements the minimum of functionality needed to generate and
-// map dm-verity images. It's provided in order to avoid a perceived higher
-// long term cost of packaging, linking against and maintaining the original C
-// veritysetup tool.
-//
-// dm-verity is a Linux device mapper target that allows integrity verification of
-// a read-only block device. The block device whose integrity should be checked
-// (the 'data device') must be first processed by a tool like veritysetup to
-// generate a hash device and root hash.
-// The original data device, hash device and root hash are then set up as a device
-// mapper target, and any read performed from the data device through the verity
-// target will be verified for integrity by Linux using the hash device and root
-// hash.
-//
-// Internally, the hash device is a Merkle tree of all the bytes in the data
-// device, layed out as layers of 'hash blocks'. Starting with data bytes, layers
-// are built recursively, with each layer's output hash blocks becoming the next
-// layer's data input, ending with the single root hash.
-//
-// For more information about the internals, see the Linux and cryptsetup
-// upstream code:
-//
-// https://gitlab.com/cryptsetup/cryptsetup/wikis/DMVerity
-package verity
-
-import (
-	"bytes"
-	"crypto/rand"
-	"crypto/sha256"
-	"encoding/binary"
-	"encoding/hex"
-	"errors"
-	"fmt"
-	"io"
-	"strconv"
-	"strings"
-)
-
-// superblock represents data layout inside of a dm-verity hash block
-// device superblock. It follows a preexisting verity implementation:
-//
-// https://gitlab.com/cryptsetup/cryptsetup/wikis/DMVerity#verity-superblock-format
-type superblock struct {
-	// signature is the magic signature of a verity hash device superblock,
-	// "verity\0\0".
-	signature [8]byte
-	// version specifies a superblock format. This structure describes version
-	// '1'.
-	version uint32
-	// hashType defaults to '1' outside Chrome OS, according to scarce dm-verity
-	// documentation.
-	hashType uint32
-	// uuid contains a UUID of the hash device.
-	uuid [16]byte
-	// algorithm stores an ASCII-encoded name of the hash function used.
-	algorithm [32]byte
-
-	// dataBlockSize specifies a size of a single data device block, in bytes.
-	dataBlockSize uint32
-	// hashBlockSize specifies a size of a single hash device block, in bytes.
-	hashBlockSize uint32
-	// dataBlocks contains a count of blocks available on the data device.
-	dataBlocks uint64
-
-	// saltSize encodes the size of hash block salt, up to the maximum of 256 bytes.
-	saltSize uint16
-
-	// padding
-	_ [6]byte
-	// exactly saltSize bytes of salt are prepended to data blocks before hashing.
-	saltBuffer [256]byte
-	// padding
-	_ [168]byte
-}
-
-// newSuperblock builds a dm-verity hash device superblock based on
-// hardcoded defaults. dataBlocks is the only field left for later
-// initialization.
-// It returns either a partially initialized superblock, or an error.
-func newSuperblock() (*superblock, error) {
-	// This implementation only handles SHA256-based verity hash images
-	// with a specific 4096-byte block size.
-	// Block sizes can be updated by adjusting the struct literal below.
-	// A change of a hashing algorithm would require a refactor of
-	// saltedDigest, and references to sha256.Size.
-	//
-	// Fill in the defaults (compare with superblock definition).
-	sb := superblock{
-		signature:     [8]byte{'v', 'e', 'r', 'i', 't', 'y', 0, 0},
-		version:       1,
-		hashType:      1,
-		algorithm:     [32]byte{'s', 'h', 'a', '2', '5', '6'},
-		saltSize:      64,
-		dataBlockSize: 4096,
-		hashBlockSize: 4096,
-	}
-
-	// Fill in the superblock UUID and cryptographic salt.
-	if _, err := rand.Read(sb.uuid[:]); err != nil {
-		return nil, fmt.Errorf("when generating UUID: %w", err)
-	}
-	if _, err := rand.Read(sb.saltBuffer[:]); err != nil {
-		return nil, fmt.Errorf("when generating salt: %w", err)
-	}
-
-	return &sb, nil
-}
-
-// salt returns a slice of sb.saltBuffer actually occupied by
-// salt bytes, of sb.saltSize length.
-func (sb *superblock) salt() []byte {
-	return sb.saltBuffer[:int(sb.saltSize)]
-}
-
-// algorithmName returns a name of the algorithm used to hash data block
-// digests.
-func (sb *superblock) algorithmName() string {
-	size := bytes.IndexByte(sb.algorithm[:], 0x00)
-	return string(sb.algorithm[:size])
-}
-
-// saltedDigest computes and returns a SHA256 sum of a block prepended
-// with a Superblock-defined salt.
-func (sb *superblock) saltedDigest(data []byte) (digest [sha256.Size]byte) {
-	h := sha256.New()
-	h.Write(sb.salt())
-	h.Write(data)
-	copy(digest[:], h.Sum(nil))
-	return
-}
-
-// dataBlocksPerHashBlock returns the count of hash operation outputs that
-// fit in a hash device block. This is also the amount of data device
-// blocks it takes to populate a hash device block.
-func (sb *superblock) dataBlocksPerHashBlock() uint64 {
-	return uint64(sb.hashBlockSize) / sha256.Size
-}
-
-// computeHashBlock reads at most sb.dataBlocksPerHashBlock blocks from
-// the given reader object, returning a padded hash block of length
-// defined by sb.hashBlockSize, the count of digests output, and an
-// error, if encountered.
-// In case a non-nil block is returned, it's guaranteed to contain at
-// least one hash. An io.EOF signals that there is no more to be read.
-func (sb *superblock) computeHashBlock(r io.Reader) ([]byte, uint64, error) {
-	// dcnt stores the total count of data blocks processed, which is the
-	// as the count of digests output.
-	var dcnt uint64
-	// Preallocate a whole hash block.
-	hblk := bytes.NewBuffer(make([]byte, 0, sb.hashBlockSize))
-
-	// For every data block, compute a hash and place it in hblk. Continue
-	// till EOF.
-	for b := uint64(0); b < sb.dataBlocksPerHashBlock(); b++ {
-		dbuf := make([]byte, sb.dataBlockSize)
-		// Attempt to read enough data blocks to make a complete hash block.
-		n, err := io.ReadFull(r, dbuf)
-		// If any data was read, make a hash and add it to the hash buffer.
-		if n != 0 {
-			hash := sb.saltedDigest(dbuf)
-			hblk.Write(hash[:])
-			dcnt++
-		}
-		// Handle the read errors.
-		switch {
-		case err == nil:
-		case errors.Is(err, io.ErrUnexpectedEOF), errors.Is(err, io.EOF):
-			// io.ReadFull returns io.ErrUnexpectedEOF after a partial read,
-			// and io.EOF if no bytes were read. In both cases it's possible
-			// to end up with a partially filled hash block.
-			if hblk.Len() != 0 {
-				// Return a zero-padded hash block if any hashes were written
-				// to it, and signal that no more blocks can be built.
-				res := hblk.Bytes()
-				return res[:cap(res)], dcnt, io.EOF
-			}
-			// Return nil if the block doesn't contain any hashes.
-			return nil, 0, io.EOF
-		default:
-			// Wrap unhandled read errors.
-			return nil, 0, fmt.Errorf("while computing a hash block: %w", err)
-		}
-	}
-	// Return a completely filled hash block.
-	res := hblk.Bytes()
-	return res[:cap(res)], dcnt, nil
-}
-
-// WriteTo writes a verity superblock to a given writer object.
-// It returns the count of bytes written, and a write error, if
-// encountered.
-func (sb *superblock) WriteTo(w io.Writer) (int64, error) {
-	// Write the superblock.
-	if err := binary.Write(w, binary.LittleEndian, sb); err != nil {
-		return -1, fmt.Errorf("while writing a header: %w", err)
-	}
-
-	// Get the padding size by substracting current offset from a hash block
-	// size.
-	co := binary.Size(sb)
-	pbc := int(sb.hashBlockSize) - co
-	if pbc <= 0 {
-		return int64(co), fmt.Errorf("hash device block size smaller than dm-verity superblock")
-	}
-
-	// Write the padding bytes at the end of the block.
-	n, err := w.Write(bytes.Repeat([]byte{0}, pbc))
-	co += n
-	if err != nil {
-		return int64(co), fmt.Errorf("while writing padding: %w", err)
-	}
-	return int64(co), nil
-}
-
-// computeLevel produces a verity hash tree level based on data read from
-// a given reader object.
-// It returns a byte slice containing one or more hash blocks, or an
-// error.
-// BUG(mz): Current implementation requires a 1/128th of the data image
-// size to be allocatable on the heap.
-func (sb *superblock) computeLevel(r io.Reader) ([]byte, error) {
-	// hbuf will store all the computed hash blocks.
-	var hbuf bytes.Buffer
-	// Compute one or more hash blocks, reading all data available in the
-	// 'r' reader object, and write them into hbuf.
-	for {
-		hblk, _, err := sb.computeHashBlock(r)
-		if err != nil && err != io.EOF {
-			return nil, fmt.Errorf("while building a hash tree level: %w", err)
-		}
-		if hblk != nil {
-			_, err := hbuf.Write(hblk)
-			if err != nil {
-				return nil, fmt.Errorf("while writing to hash block buffer: %w", err)
-			}
-		}
-		if err == io.EOF {
-			break
-		}
-	}
-	return hbuf.Bytes(), nil
-}
-
-// hashTree stores hash tree levels, each level comprising one or more
-// Verity hash blocks. Levels are ordered from bottom to top.
-type hashTree [][]byte
-
-// push appends a level to the hash tree.
-func (ht *hashTree) push(nl []byte) {
-	*ht = append(*ht, nl)
-}
-
-// top returns the topmost level of the hash tree.
-func (ht *hashTree) top() []byte {
-	if len(*ht) == 0 {
-		return nil
-	}
-	return (*ht)[len(*ht)-1]
-}
-
-// WriteTo writes a verity-formatted hash tree to the given writer
-// object.
-// It returns a write error, if encountered.
-func (ht *hashTree) WriteTo(w io.Writer) (int64, error) {
-	// t keeps the count of bytes written to w.
-	var t int64
-	// Write the hash tree levels from top to bottom.
-	for l := len(*ht) - 1; l >= 0; l-- {
-		level := (*ht)[l]
-		// Call w.Write until a whole level is written.
-		for len(level) != 0 {
-			n, err := w.Write(level)
-			if err != nil {
-				return t, fmt.Errorf("while writing a level: %w", err)
-			}
-			level = level[n:]
-			t += int64(n)
-		}
-	}
-	return t, nil
-}
-
-// MappingTable aggregates data needed to generate a complete Verity
-// mapping table.
-type MappingTable struct {
-	// superblock defines the following elements of the mapping table:
-	// - data device block size
-	// - hash device block size
-	// - total count of data blocks
-	// - hash algorithm used
-	// - cryptographic salt used
-	superblock *superblock
-	// DataDevicePath is the filesystem path of the data device used as part
-	// of the Verity Device Mapper target.
-	DataDevicePath string
-	// HashDevicePath is the filesystem path of the hash device used as part
-	// of the Verity Device Mapper target.
-	HashDevicePath string
-	// HashStart marks the starting block of the Verity hash tree.
-	HashStart int64
-	// rootHash stores a cryptographic hash of the top hash tree block.
-	rootHash []byte
-}
-
-// VerityParameterList returns a list of Verity target parameters, ordered
-// as they would appear in a parameter string.
-func (t *MappingTable) VerityParameterList() []string {
-	return []string{
-		"1",
-		t.DataDevicePath,
-		t.HashDevicePath,
-		strconv.FormatUint(uint64(t.superblock.dataBlockSize), 10),
-		strconv.FormatUint(uint64(t.superblock.hashBlockSize), 10),
-		strconv.FormatUint(t.superblock.dataBlocks, 10),
-		strconv.FormatInt(t.HashStart, 10),
-		t.superblock.algorithmName(),
-		hex.EncodeToString(t.rootHash),
-		hex.EncodeToString(t.superblock.salt()),
-	}
-}
-
-// TargetParameters returns the mapping table as a list of Device Mapper
-// target parameters, ordered as they would appear in a parameter string
-// (see: String).
-func (t *MappingTable) TargetParameters() []string {
-	return append(
-		[]string{
-			"0",
-			strconv.FormatUint(t.Length(), 10),
-			"verity",
-		},
-		t.VerityParameterList()...,
-	)
-}
-
-// String returns a string-formatted mapping table for use with Device
-// Mapper.
-// BUG(mz): unescaped whitespace can appear in block device paths
-func (t *MappingTable) String() string {
-	return strings.Join(t.TargetParameters(), " ")
-}
-
-// Length returns the data device length, represented as a number of
-// 512-byte sectors.
-func (t *MappingTable) Length() uint64 {
-	return t.superblock.dataBlocks * uint64(t.superblock.dataBlockSize) / 512
-}
-
-// encoder transforms data blocks written into it into a verity hash
-// tree. It writes out the hash tree only after Close is called on it.
-type encoder struct {
-	// out is the writer object Encoder will write to.
-	out io.Writer
-	// writeSb, if true, will cause a Verity superblock to be written to the
-	// writer object.
-	writeSb bool
-	// sb contains the most of information needed to build a mapping table.
-	sb *superblock
-	// bottom stands for the bottom level of the hash tree. It contains
-	// complete hash blocks of data written to the encoder.
-	bottom bytes.Buffer
-	// dataBuffer stores incoming data for later processing.
-	dataBuffer bytes.Buffer
-	// rootHash stores the verity root hash set on Close.
-	rootHash []byte
-}
-
-// computeHashTree builds a complete hash tree based on the encoder's
-// state. Levels are appended to the returned hash tree starting from the
-// bottom, with the top level written last.
-// e.sb.dataBlocks is set according to the bottom level's length, which
-// must be divisible by e.sb.hashBlockSize.
-// e.rootHash is set on success.
-// It returns an error, if encountered.
-func (e *encoder) computeHashTree() (*hashTree, error) {
-	// Put b at the bottom of the tree. Don't perform a deep copy.
-	ht := hashTree{e.bottom.Bytes()}
-
-	// Other levels are built by hashing the hash blocks comprising a level
-	// below.
-	for {
-		if len(ht.top()) == int(e.sb.hashBlockSize) {
-			// The last level to compute has a size of exactly one hash block.
-			// That's the root level. Its hash serves as a cryptographic root of
-			// trust and is saved into a encoder for later use.
-			// In case the bottom level consists of only one hash block, no more
-			// levels are computed.
-			sd := e.sb.saltedDigest(ht.top())
-			e.rootHash = sd[:]
-			return &ht, nil
-		}
-
-		// Create the next level by hashing the previous one.
-		nl, err := e.sb.computeLevel(bytes.NewReader(ht.top()))
-		if err != nil {
-			return nil, fmt.Errorf("while computing a level: %w", err)
-		}
-		// Append the resulting next level to a tree.
-		ht.push(nl)
-	}
-}
-
-// processDataBuffer processes data blocks contained in e.dataBuffer
-// until no more data is available to form a completely filled hash block.
-// If 'incomplete' is true, all remaining data in e.dataBuffer will be
-// processed, producing a terminating incomplete block.
-// It returns the count of data blocks processed, or an error, if
-// encountered.
-func (e *encoder) processDataBuffer(incomplete bool) (uint64, error) {
-	// tdcnt stores the total count of data blocks processed.
-	var tdcnt uint64
-	// Compute the count of bytes needed to produce a complete hash block.
-	bph := e.sb.dataBlocksPerHashBlock() * uint64(e.sb.dataBlockSize)
-
-	// Iterate until no more data is available in e.dbuf.
-	for uint64(e.dataBuffer.Len()) >= bph || incomplete && e.dataBuffer.Len() != 0 {
-		hb, dcnt, err := e.sb.computeHashBlock(&e.dataBuffer)
-		if err != nil && err != io.EOF {
-			return 0, fmt.Errorf("while processing a data buffer: %w", err)
-		}
-		// Increment the total count of data blocks processed.
-		tdcnt += dcnt
-		// Write the resulting hash block into the level-zero buffer.
-		e.bottom.Write(hb[:])
-	}
-	return tdcnt, nil
-}
-
-// NewEncoder returns a fully initialized encoder, or an error. The
-// encoder will write to the given io.Writer object.
-// A verity superblock will be written, preceding the hash tree, if
-// writeSb is true.
-func NewEncoder(out io.Writer, dataBlockSize, hashBlockSize uint32, writeSb bool) (*encoder, error) {
-	sb, err := newSuperblock()
-	if err != nil {
-		return nil, fmt.Errorf("while creating a superblock: %w", err)
-	}
-	sb.dataBlockSize = dataBlockSize
-	sb.hashBlockSize = hashBlockSize
-
-	e := encoder{
-		out:     out,
-		writeSb: writeSb,
-		sb:      sb,
-	}
-	return &e, nil
-}
-
-// Write hashes raw data to form the bottom hash tree level.
-// It returns the number of bytes written, and an error, if encountered.
-func (e *encoder) Write(data []byte) (int, error) {
-	// Copy the input into the data buffer.
-	n, _ := e.dataBuffer.Write(data)
-	// Process only enough data to form a complete hash block. This may
-	// leave excess data in e.dbuf to be processed later on.
-	dcnt, err := e.processDataBuffer(false)
-	if err != nil {
-		return n, fmt.Errorf("while processing the data buffer: %w", err)
-	}
-	// Update the superblock with the count of data blocks written.
-	e.sb.dataBlocks += dcnt
-	return n, nil
-}
-
-// Close builds a complete hash tree based on cached bottom level blocks,
-// then writes it to a preconfigured io.Writer object. A Verity superblock
-// is written, if e.writeSb is true. No data, nor the superblock is written
-// if the encoder is empty.
-// It returns an error, if one was encountered.
-func (e *encoder) Close() error {
-	// Process all buffered data, including data blocks that may not form
-	// a complete hash block.
-	dcnt, err := e.processDataBuffer(true)
-	if err != nil {
-		return fmt.Errorf("while processing the data buffer: %w", err)
-	}
-	// Update the superblock with the count of data blocks written.
-	e.sb.dataBlocks += dcnt
-
-	// Don't write anything if nothing was written to the encoder.
-	if e.bottom.Len() == 0 {
-		return nil
-	}
-
-	// Compute remaining hash tree levels based on the bottom level: e.bottom.
-	ht, err := e.computeHashTree()
-	if err != nil {
-		return fmt.Errorf("while encoding a hash tree: %w", err)
-	}
-
-	// Write the Verity superblock if the encoder was configured to do so.
-	if e.writeSb {
-		if _, err = e.sb.WriteTo(e.out); err != nil {
-			return fmt.Errorf("while writing a superblock: %w", err)
-		}
-	}
-	// Write the hash tree.
-	_, err = ht.WriteTo(e.out)
-	if err != nil {
-		return fmt.Errorf("while writing a hash tree: %w", err)
-	}
-
-	return nil
-}
-
-// MappingTable returns a complete, string-convertible Verity target mapping
-// table for use with Device Mapper, or an error. Close must be called on the
-// encoder before calling this function. dataDevicePath, hashDevicePath, and
-// hashStart parameters are parts of the mapping table. See:
-// https://www.kernel.org/doc/html/latest/admin-guide/device-mapper/verity.html
-func (e *encoder) MappingTable(dataDevicePath, hashDevicePath string, hashStart int64) (*MappingTable, error) {
-	if e.rootHash == nil {
-		if e.bottom.Len() != 0 {
-			return nil, fmt.Errorf("encoder wasn't closed")
-		}
-		return nil, fmt.Errorf("encoder is empty")
-	}
-
-	if e.writeSb {
-		// Account for the superblock.
-		hashStart += 1
-	}
-	return &MappingTable{
-		superblock:     e.sb,
-		DataDevicePath: dataDevicePath,
-		HashDevicePath: hashDevicePath,
-		HashStart:      hashStart,
-		rootHash:       e.rootHash,
-	}, nil
-}
diff --git a/metropolis/pkg/verity/encoder_test.go b/metropolis/pkg/verity/encoder_test.go
deleted file mode 100644
index 116c6a4..0000000
--- a/metropolis/pkg/verity/encoder_test.go
+++ /dev/null
@@ -1,250 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package verity
-
-import (
-	"bytes"
-	"crypto/aes"
-	"crypto/cipher"
-	"fmt"
-	"io"
-	"os"
-	"testing"
-
-	"github.com/stretchr/testify/require"
-	"golang.org/x/sys/unix"
-
-	dm "source.monogon.dev/metropolis/pkg/devicemapper"
-)
-
-const (
-	// testDataSize configures the size of Verity-protected data devices.
-	testDataSize int64 = 2 * 1024 * 1024
-	// accessMode configures new files' permission bits.
-	accessMode = 0600
-)
-
-// getRamdisk creates a device file pointing to an unused ramdisk.
-// Returns a filesystem path.
-func getRamdisk() (string, error) {
-	for i := 0; ; i++ {
-		path := fmt.Sprintf("/dev/ram%d", i)
-		dn := unix.Mkdev(1, uint32(i))
-		err := unix.Mknod(path, accessMode|unix.S_IFBLK, int(dn))
-		if os.IsExist(err) {
-			continue
-		}
-		if err != nil {
-			return "", err
-		}
-		return path, nil
-	}
-}
-
-// verityDMTarget returns a dm.Target based on a Verity mapping table.
-func verityDMTarget(mt *MappingTable) *dm.Target {
-	return &dm.Target{
-		Type:        "verity",
-		StartSector: 0,
-		Length:      mt.Length(),
-		Parameters:  mt.VerityParameterList(),
-	}
-}
-
-// devZeroReader is a helper type used by writeRandomBytes.
-type devZeroReader struct{}
-
-// Read implements io.Reader on devZeroReader, making it a source of zero
-// bytes.
-func (devZeroReader) Read(b []byte) (int, error) {
-	for i := range b {
-		b[i] = 0
-	}
-	return len(b), nil
-}
-
-// writeRandomBytes writes length pseudorandom bytes to a given io.Writer.
-func writeRandomBytes(w io.Writer, length int64) error {
-	keyiv := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}
-	blkCipher, err := aes.NewCipher(keyiv)
-	if err != nil {
-		return err
-	}
-	var z devZeroReader
-	c := cipher.StreamReader{S: cipher.NewCTR(blkCipher, keyiv), R: z}
-	_, err = io.CopyN(w, c, length)
-	return err
-}
-
-// fillVerityRamdisks fills a block device at dataDevPath with
-// pseudorandom data and writes a complementary Verity hash device to
-// a block device at hashDevPath. Returns a dm.Target configuring a
-// resulting Verity device, and a buffer containing random data written
-// the data device.
-func fillVerityRamdisks(t *testing.T, dataDevPath, hashDevPath string) (*dm.Target, bytes.Buffer) {
-	// Open the data device for writing.
-	dfd, err := os.OpenFile(dataDevPath, os.O_WRONLY, accessMode)
-	require.NoError(t, err, "while opening the data device at %s", dataDevPath)
-	// Open the hash device for writing.
-	hfd, err := os.OpenFile(hashDevPath, os.O_WRONLY, accessMode)
-	require.NoError(t, err, "while opening the hash device at %s", hashDevPath)
-
-	// Create a Verity encoder, backed with hfd. Configure it to write the
-	// Verity superblock. Use 4096-byte blocks.
-	bs := uint32(4096)
-	verityEnc, err := NewEncoder(hfd, bs, bs, true)
-	require.NoError(t, err, "while creating a Verity encoder")
-
-	// Write pseudorandom data both to the Verity-protected data device, and
-	// into the Verity encoder, which in turn will write a resulting hash
-	// tree to hfd on Close().
-	var testData bytes.Buffer
-	tdw := io.MultiWriter(dfd, verityEnc, &testData)
-	err = writeRandomBytes(tdw, testDataSize)
-	require.NoError(t, err, "while writing test data")
-
-	// Close the file descriptors.
-	err = verityEnc.Close()
-	require.NoError(t, err, "while closing the Verity encoder")
-	err = hfd.Close()
-	require.NoError(t, err, "while closing the hash device descriptor")
-	err = dfd.Close()
-	require.NoError(t, err, "while closing the data device descriptor")
-
-	// Generate the Verity mapping table based on the encoder state, device
-	// file paths and the metadata starting block, then return it along with
-	// the test data buffer.
-	mt, err := verityEnc.MappingTable(dataDevPath, hashDevPath, 0)
-	require.NoError(t, err, "while building a Verity mapping table")
-	return verityDMTarget(mt), testData
-}
-
-// createVerityDevice maps a Verity device described by dmt while
-// assigning it a name equal to devName. It returns a Verity device path.
-func createVerityDevice(t *testing.T, dmt *dm.Target, devName string) string {
-	devNum, err := dm.CreateActiveDevice(devName, true, []dm.Target{*dmt})
-	require.NoError(t, err, "while creating a Verity device")
-
-	devPath := fmt.Sprintf("/dev/%s", devName)
-	err = unix.Mknod(devPath, accessMode|unix.S_IFBLK, int(devNum))
-	require.NoError(t, err, "while creating a Verity device file at %s", devPath)
-	return devPath
-}
-
-// cleanupVerityDevice deactivates a Verity device previously mapped by
-// createVerityDevice, and removes an associated device file.
-func cleanupVerityDevice(t *testing.T, devName string) {
-	err := dm.RemoveDevice(devName)
-	require.NoError(t, err, "while removing a Verity device %s", devName)
-
-	devPath := fmt.Sprintf("/dev/%s", devName)
-	err = os.Remove(devPath)
-	require.NoError(t, err, "while removing a Verity device file at %s", devPath)
-}
-
-// testRead compares contents of a block device at devPath with
-// expectedData. The length of data read is equal to the length
-// of expectedData.
-// It returns 'false', if either data could not be read or it does not
-// match expectedData, and 'true' otherwise.
-func testRead(t *testing.T, devPath string, expectedData []byte) bool {
-	// Open the Verity device.
-	verityDev, err := os.Open(devPath)
-	require.NoError(t, err, "while opening a Verity device at %s", devPath)
-	defer verityDev.Close()
-
-	// Attempt to read the test data. Abort on read errors.
-	readData := make([]byte, len(expectedData))
-	_, err = io.ReadFull(verityDev, readData)
-	if err != nil {
-		return false
-	}
-
-	// Return true, if read data matches expectedData.
-	if bytes.Equal(expectedData, readData) {
-		return true
-	}
-	return false
-}
-
-// TestMakeAndRead attempts to create a Verity device, then verifies the
-// integrity of its contents.
-func TestMakeAndRead(t *testing.T) {
-	if os.Getenv("IN_KTEST") != "true" {
-		t.Skip("Not in ktest")
-	}
-
-	// Allocate block devices backing the Verity target.
-	dataDevPath, err := getRamdisk()
-	require.NoError(t, err, "while allocating a data device ramdisk")
-	hashDevPath, err := getRamdisk()
-	require.NoError(t, err, "while allocating a hash device ramdisk")
-
-	// Fill the data device with test data and write a corresponding Verity
-	// hash tree to the hash device.
-	dmTarget, expectedDataBuf := fillVerityRamdisks(t, dataDevPath, hashDevPath)
-
-	// Create a Verity device using dmTarget. Use the test name as a device
-	// handle. verityPath will point to a resulting new block device.
-	verityPath := createVerityDevice(t, dmTarget, t.Name())
-	defer cleanupVerityDevice(t, t.Name())
-
-	// Use testRead to compare Verity target device contents with test data
-	// written to the data block device at dataDevPath by fillVerityRamdisks.
-	if !testRead(t, verityPath, expectedDataBuf.Bytes()) {
-		t.Error("data read from the verity device doesn't match the source")
-	}
-}
-
-// TestMalformed checks whenever Verity would prevent reading from a
-// target whose hash device contents have been corrupted, as is expected.
-func TestMalformed(t *testing.T) {
-	if os.Getenv("IN_KTEST") != "true" {
-		t.Skip("Not in ktest")
-	}
-
-	// Allocate block devices backing the Verity target.
-	dataDevPath, err := getRamdisk()
-	require.NoError(t, err, "while allocating a data device ramdisk")
-	hashDevPath, err := getRamdisk()
-	require.NoError(t, err, "while allocating a hash device ramdisk")
-
-	// Fill the data device with test data and write a corresponding Verity
-	// hash tree to the hash device.
-	dmTarget, expectedDataBuf := fillVerityRamdisks(t, dataDevPath, hashDevPath)
-
-	// Corrupt the first hash device block before mapping the Verity target.
-	hfd, err := os.OpenFile(hashDevPath, os.O_RDWR, accessMode)
-	require.NoError(t, err, "while opening a hash device at %s", hashDevPath)
-	// Place an odd byte at the 256th byte of the first hash block, skipping
-	// a 4096-byte Verity superblock.
-	hfd.Seek(4096+256, io.SeekStart)
-	hfd.Write([]byte{'F'})
-	hfd.Close()
-
-	// Create a Verity device using dmTarget. Use the test name as a device
-	// handle. verityPath will point to a resulting new block device.
-	verityPath := createVerityDevice(t, dmTarget, t.Name())
-	defer cleanupVerityDevice(t, t.Name())
-
-	// Use testRead to compare Verity target device contents with test data
-	// written to the data block device at dataDevPath by fillVerityRamdisks.
-	// This step is expected to fail after an incomplete read.
-	if testRead(t, verityPath, expectedDataBuf.Bytes()) {
-		t.Error("data matches the source when it shouldn't")
-	}
-}
diff --git a/metropolis/pkg/watchdog/BUILD.bazel b/metropolis/pkg/watchdog/BUILD.bazel
deleted file mode 100644
index 0363883..0000000
--- a/metropolis/pkg/watchdog/BUILD.bazel
+++ /dev/null
@@ -1,9 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library")
-
-go_library(
-    name = "watchdog",
-    srcs = ["watchdog.go"],
-    importpath = "source.monogon.dev/metropolis/pkg/watchdog",
-    visibility = ["//visibility:public"],
-    deps = ["@org_golang_x_sys//unix"],
-)
diff --git a/metropolis/pkg/watchdog/watchdog.go b/metropolis/pkg/watchdog/watchdog.go
deleted file mode 100644
index d9a14bf..0000000
--- a/metropolis/pkg/watchdog/watchdog.go
+++ /dev/null
@@ -1,215 +0,0 @@
-// Package watchdog provides access to hardware watchdogs. These can be used to
-// automatically reset/reboot a system if they are no longer pinged.
-package watchdog
-
-import (
-	"bytes"
-	"errors"
-	"fmt"
-	"math"
-	"os"
-	"syscall"
-	"time"
-
-	"golang.org/x/sys/unix"
-)
-
-// Device represents a handle to a hardware watchdog.
-type Device struct {
-	// Type identifies the type of watchdog device. It corresponds to the Linux
-	// driver's watchdog_info.identity value.
-	Type string
-	// HasConfiguratbleTimeout indicates if the device supports the SetTimeout
-	// call.
-	HasConfigurableTimeout bool
-	// HasPretimeout indicates if the device supports notifying the system of
-	// an impending reset and the functions to control this
-	// (Get/SetPreTimeout).
-	HasPretimeout bool
-	// Indicates if the watchdog is capable of reporting that it is responsible
-	// for the last system reset.
-	ReportsWatchdogReset bool
-
-	raw syscall.RawConn
-	f   *os.File
-}
-
-// Open opens a watchdog device identified by the path to its device inode.
-func Open(name string) (*Device, error) {
-	f, err := os.Open(name)
-	if err != nil {
-		// Already wrapped by PathError
-		return nil, err
-	}
-	raw, err := f.SyscallConn()
-	if err != nil {
-		f.Close()
-		return nil, fmt.Errorf("while obtaining RawConn: %w", err)
-	}
-	var wdInfo *unix.WatchdogInfo
-	ctrlErr := raw.Control(func(fd uintptr) {
-		wdInfo, err = unix.IoctlGetWatchdogInfo(int(fd))
-	})
-	if ctrlErr != nil {
-		f.Close()
-		return nil, fmt.Errorf("when calling RawConn.Control: %w", err)
-	}
-	if errors.Is(err, unix.ENOTTY) {
-		f.Close()
-		return nil, errors.New("device is not a watchdog")
-	}
-	if err != nil {
-		return nil, fmt.Errorf("while getting watchdog metadata: %w", err)
-	}
-	w := &Device{
-		Type:                   string(bytes.Trim(wdInfo.Identity[:], "\x00")),
-		f:                      f,
-		raw:                    raw,
-		HasConfigurableTimeout: wdInfo.Options&unix.WDIOF_SETTIMEOUT != 0,
-		HasPretimeout:          wdInfo.Options&unix.WDIOF_PRETIMEOUT != 0,
-		ReportsWatchdogReset:   wdInfo.Options&unix.WDIOF_CARDRESET != 0,
-	}
-	return w, nil
-}
-
-// SetTimeout sets the duration since the last ping after which it performs
-// a recovery actions (usually a reset or reboot).
-// Due to hardware limitations this function may approximate the set duration
-// or not be a available at all. GetTimeout returns the active timeout.
-func (w *Device) SetTimeout(t time.Duration) error {
-	if !w.HasConfigurableTimeout {
-		return errors.New("watchdog does not have a configurable timeout, check HasConfigurableTimeout")
-	}
-	var err error
-	ctrlErr := w.raw.Control(func(fd uintptr) {
-		err = unix.IoctlSetInt(int(fd), unix.WDIOC_SETTIMEOUT, int(math.Ceil(t.Seconds())))
-	})
-	if ctrlErr != nil {
-		return fmt.Errorf("when calling RawConn.Control: %w", err)
-	}
-	if err != nil {
-		return fmt.Errorf("ioctl(WDIOC_SETTIMEOUT): %w", err)
-	}
-	return nil
-}
-
-// GetTimeout returns the configured timeout duration.
-func (w *Device) GetTimeout() (time.Duration, error) {
-	var err error
-	var t int
-	ctrlErr := w.raw.Control(func(fd uintptr) {
-		t, err = unix.IoctlGetInt(int(fd), unix.WDIOC_GETTIMEOUT)
-	})
-	if ctrlErr != nil {
-		return 0, fmt.Errorf("when calling RawConn.Control: %w", err)
-	}
-	if err != nil {
-		return 0, fmt.Errorf("ioctl(WDIOC_GETTIMEOUT): %w", err)
-	}
-	return time.Duration(t) * time.Second, nil
-}
-
-// SetPreTimeout sets the minimum duration left on the expiry timer where when
-// it drops below that, the system is notified (via some high-priority
-// interrupt, usually an NMI). This is only available if HasPretimeout is true.
-// This can be used by the system (if it's still in a sem-working state) to
-// recover or dump diagnostic information before it gets forcibly reset by the
-// watchdog. To disable this functionality, set the duration to zero.
-func (w *Device) SetPreTimeout(t time.Duration) error {
-	if !w.HasPretimeout {
-		return errors.New("watchdog does not have a pretimeout, check HasPretimeout")
-	}
-	var err error
-	ctrlErr := w.raw.Control(func(fd uintptr) {
-		err = unix.IoctlSetInt(int(fd), unix.WDIOC_SETPRETIMEOUT, int(math.Ceil(t.Seconds())))
-	})
-	if ctrlErr != nil {
-		return fmt.Errorf("when calling RawConn.Control: %w", err)
-	}
-	if err != nil {
-		return fmt.Errorf("ioctl(WDIOC_SETPRETIMEOUT): %w", err)
-	}
-	return nil
-}
-
-// GetPreTimeout gets the current pre-timeout (see SetPreTimeout for more).
-func (w *Device) GetPreTimeout() (time.Duration, error) {
-	if !w.HasPretimeout {
-		return 0, errors.New("watchdog does not have a pretimeout, check HasPretimeout")
-	}
-	var err error
-	var t int
-	ctrlErr := w.raw.Control(func(fd uintptr) {
-		t, err = unix.IoctlGetInt(int(fd), unix.WDIOC_GETPRETIMEOUT)
-	})
-	if ctrlErr != nil {
-		return 0, fmt.Errorf("when calling RawConn.Control: %w", err)
-	}
-	if err != nil {
-		return 0, fmt.Errorf("ioctl(WDIOC_GETPRETIMEOUT): %w", err)
-	}
-	return time.Duration(t) * time.Second, nil
-
-}
-
-// Ping the watchdog. This needs to be called regularly before the
-// watchdog timeout expires, otherwise the system resets.
-func (w *Device) Ping() error {
-	var err error
-	ctrlErr := w.raw.Control(func(fd uintptr) {
-		err = unix.IoctlWatchdogKeepalive(int(fd))
-	})
-	if ctrlErr != nil {
-		return fmt.Errorf("when calling RawConn.Control: %w", err)
-	}
-	if err != nil {
-		return fmt.Errorf("ioctl(WDIOC_KEEPALIVE): %w", err)
-	}
-	return nil
-}
-
-// LastResetByWatchdog returns true if the last system reset was caused by
-// this watchdog. Not all watchdogs report this accurately.
-func (w *Device) LastResetByWatchdog() (bool, error) {
-	if !w.ReportsWatchdogReset {
-		return false, errors.New("watchdog does not report resets, check ReportsWatchdogReset")
-	}
-	var err error
-	var flags int
-	ctrlErr := w.raw.Control(func(fd uintptr) {
-		flags, err = unix.IoctlGetInt(int(fd), unix.WDIOC_GETBOOTSTATUS)
-	})
-	if ctrlErr != nil {
-		return false, fmt.Errorf("when calling RawConn.Control: %w", err)
-	}
-	if err != nil {
-		return false, fmt.Errorf("ioctl(WDIOC_GETBOOTSTATUS): %w", err)
-	}
-	return flags&unix.WDIOF_CARDRESET != 0, nil
-}
-
-// Close disables the watchdog and releases all associated resources.
-func (w *Device) Close() error {
-	if w.f != nil {
-		_, err := w.f.Write([]byte{'V'})
-		errClose := w.f.Close()
-		w.f = nil
-		if err != nil {
-			return err
-		}
-		return errClose
-	}
-	return nil
-}
-
-// CloseActive releases all resources and file handles, but keeps the
-// watchdog active. Another system must reopen it and ping it before
-// it expires to avoid a reset.
-func (w *Device) CloseActive() error {
-	if w.f != nil {
-		err := w.f.Close()
-		w.f = nil
-		return err
-	}
-	return nil
-}
diff --git a/metropolis/proto/api/BUILD.bazel b/metropolis/proto/api/BUILD.bazel
index 66ab06a..f573105 100644
--- a/metropolis/proto/api/BUILD.bazel
+++ b/metropolis/proto/api/BUILD.bazel
@@ -12,10 +12,10 @@
     ],
     visibility = ["//visibility:public"],
     deps = [
-        "//metropolis/pkg/logtree/proto:proto_proto",
         "//metropolis/proto/common:common_proto",
         "//metropolis/proto/ext:ext_proto",
         "//net/proto:net_proto_proto",
+        "//osbase/logtree/proto:proto_proto",
         "@com_google_protobuf//:duration_proto",
     ],
 )
@@ -27,10 +27,10 @@
     proto = ":api_proto",
     visibility = ["//visibility:public"],
     deps = [
-        "//metropolis/pkg/logtree/proto",
         "//metropolis/proto/common",
         "//metropolis/proto/ext",
         "//net/proto",
+        "//osbase/logtree/proto",
     ],
 )
 
diff --git a/metropolis/proto/api/management.proto b/metropolis/proto/api/management.proto
index 421898a..cd95a8e 100644
--- a/metropolis/proto/api/management.proto
+++ b/metropolis/proto/api/management.proto
@@ -4,7 +4,7 @@
 
 import "google/protobuf/duration.proto";
 
-import "metropolis/pkg/logtree/proto/logtree.proto";
+import "osbase/logtree/proto/logtree.proto";
 import "metropolis/proto/common/common.proto";
 import "metropolis/proto/ext/authorization.proto";
 
@@ -334,7 +334,7 @@
   // GetLogs Returns historical and/or streaming logs for a given DN with given
   // filters from the system global LogTree.
   //
-  // For more information about this API, see //metropolis/pkg/logtree. But, in
+  // For more information about this API, see //osbase/logtree. But, in
   // summary:
   //   - All logging is performed to a DN (distinguished name), which is a
   //     dot-delimited string like foo.bar.baz.
@@ -398,11 +398,11 @@
 message GetLogsResponse {
   // Entries from the requested historical entries (via WithBackLog). They will
   // all be served before the first stream_entries are served (if any).
-  repeated metropolis.pkg.logtree.proto.LogEntry backlog_entries = 1;
+  repeated osbase.pkg.logtree.proto.LogEntry backlog_entries = 1;
   // Entries streamed as they arrive. Currently no server-side buffering is
   // enabled, instead every line is served as early as it arrives. However, this
   // might change in the future, so this behaviour cannot be depended upon.
-  repeated metropolis.pkg.logtree.proto.LogEntry stream_entries = 2;
+  repeated osbase.pkg.logtree.proto.LogEntry stream_entries = 2;
 }
 
 enum ActivationMode {
diff --git a/metropolis/proto/common/BUILD.bazel b/metropolis/proto/common/BUILD.bazel
index 10f7f84..f895f44 100644
--- a/metropolis/proto/common/BUILD.bazel
+++ b/metropolis/proto/common/BUILD.bazel
@@ -7,7 +7,7 @@
     srcs = ["common.proto"],
     visibility = ["//metropolis:__subpackages__"],
     deps = [
-        "//metropolis/pkg/logtree/proto:proto_proto",
+        "//osbase/logtree/proto:proto_proto",
         "//version/spec:spec_proto",
         "@com_google_protobuf//:timestamp_proto",
     ],
@@ -19,7 +19,7 @@
     proto = ":common_proto",
     visibility = ["//metropolis:__subpackages__"],
     deps = [
-        "//metropolis/pkg/logtree/proto",
+        "//osbase/logtree/proto",
         "//version/spec",
     ],
 )
diff --git a/metropolis/proto/common/common.proto b/metropolis/proto/common/common.proto
index a0e8c73..758631f 100644
--- a/metropolis/proto/common/common.proto
+++ b/metropolis/proto/common/common.proto
@@ -21,7 +21,7 @@
 import "google/protobuf/timestamp.proto";
 import "version/spec/spec.proto";
 
-import "metropolis/pkg/logtree/proto/logtree.proto";
+import "osbase/logtree/proto/logtree.proto";
 
 // NodeRoles are the possible roles that a Metropolis Node should run within the
 // cluster. These are configured by the cluster and can be retrieved through the
@@ -252,7 +252,7 @@
     // If leveled logs are returned, all entries at severity lower than `minimum`
     // will be discarded.
     message LeveledWithMinimumSeverity {
-        metropolis.pkg.logtree.proto.LeveledLogSeverity minimum = 1;
+        osbase.pkg.logtree.proto.LeveledLogSeverity minimum = 1;
     }
     oneof filter {
         WithChildren with_children = 1;
diff --git a/metropolis/test/e2e/BUILD.bazel b/metropolis/test/e2e/BUILD.bazel
index cb8645b..565223e 100644
--- a/metropolis/test/e2e/BUILD.bazel
+++ b/metropolis/test/e2e/BUILD.bazel
@@ -1,4 +1,4 @@
-load("//metropolis/pkg/localregistry:def.bzl", "localregistry_manifest")
+load("//metropolis/test/localregistry:def.bzl", "localregistry_manifest")
 
 localregistry_manifest(
     name = "testimages_manifest",
diff --git a/metropolis/test/e2e/k8s_cts/BUILD.bazel b/metropolis/test/e2e/k8s_cts/BUILD.bazel
index 8458b2d..6593a73 100644
--- a/metropolis/test/e2e/k8s_cts/BUILD.bazel
+++ b/metropolis/test/e2e/k8s_cts/BUILD.bazel
@@ -6,7 +6,7 @@
     importpath = "source.monogon.dev/metropolis/test/e2e/k8s_cts",
     visibility = ["//visibility:private"],
     deps = [
-        "//metropolis/test/launch/cluster",
+        "//metropolis/test/launch",
         "@io_k8s_api//core/v1:core",
         "@io_k8s_api//rbac/v1:rbac",
         "@io_k8s_apimachinery//pkg/apis/meta/v1:meta",
diff --git a/metropolis/test/e2e/k8s_cts/main.go b/metropolis/test/e2e/k8s_cts/main.go
index 6383cb8..6250a16 100644
--- a/metropolis/test/e2e/k8s_cts/main.go
+++ b/metropolis/test/e2e/k8s_cts/main.go
@@ -34,7 +34,7 @@
 	rbacv1 "k8s.io/api/rbac/v1"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 
-	"source.monogon.dev/metropolis/test/launch/cluster"
+	mlaunch "source.monogon.dev/metropolis/test/launch"
 )
 
 // makeCTSPodSpec generates a spec for a standalone pod running the Kubernetes
@@ -100,7 +100,7 @@
 	}()
 
 	// TODO(q3k): bump up number of nodes after multi-node workflow gets reimplemented.
-	cl, err := cluster.LaunchCluster(ctx, cluster.ClusterOptions{NumNodes: 1})
+	cl, err := mlaunch.LaunchCluster(ctx, mlaunch.ClusterOptions{NumNodes: 1})
 	if err != nil {
 		log.Fatalf("Failed to launch cluster: %v", err)
 	}
diff --git a/metropolis/test/e2e/suites/core/BUILD.bazel b/metropolis/test/e2e/suites/core/BUILD.bazel
index 45ce438..223d7c7 100644
--- a/metropolis/test/e2e/suites/core/BUILD.bazel
+++ b/metropolis/test/e2e/suites/core/BUILD.bazel
@@ -17,12 +17,12 @@
     deps = [
         "//metropolis/node",
         "//metropolis/node/core/rpc",
-        "//metropolis/pkg/localregistry",
         "//metropolis/proto/api",
         "//metropolis/proto/common",
         "//metropolis/test/launch",
-        "//metropolis/test/launch/cluster",
+        "//metropolis/test/localregistry",
         "//metropolis/test/util",
+        "//osbase/test/launch",
         "@io_bazel_rules_go//go/runfiles:go_default_library",
         "@org_golang_google_grpc//:go_default_library",
     ],
diff --git a/metropolis/test/e2e/suites/core/run_test.go b/metropolis/test/e2e/suites/core/run_test.go
index 8bbbf52..21640a6 100644
--- a/metropolis/test/e2e/suites/core/run_test.go
+++ b/metropolis/test/e2e/suites/core/run_test.go
@@ -19,10 +19,10 @@
 
 	common "source.monogon.dev/metropolis/node"
 	"source.monogon.dev/metropolis/node/core/rpc"
-	"source.monogon.dev/metropolis/pkg/localregistry"
-	"source.monogon.dev/metropolis/test/launch"
-	"source.monogon.dev/metropolis/test/launch/cluster"
+	mlaunch "source.monogon.dev/metropolis/test/launch"
+	"source.monogon.dev/metropolis/test/localregistry"
 	"source.monogon.dev/metropolis/test/util"
+	"source.monogon.dev/osbase/test/launch"
 
 	apb "source.monogon.dev/metropolis/proto/api"
 	cpb "source.monogon.dev/metropolis/proto/common"
@@ -62,7 +62,7 @@
 		t.Fatalf("Creating test image registry failed: %v", err)
 	}
 	// Launch cluster.
-	clusterOptions := cluster.ClusterOptions{
+	clusterOptions := mlaunch.ClusterOptions{
 		NumNodes:      2,
 		LocalRegistry: lr,
 		InitialClusterConfiguration: &cpb.ClusterConfiguration{
@@ -70,7 +70,7 @@
 			StorageSecurityPolicy: cpb.ClusterConfiguration_STORAGE_SECURITY_POLICY_NEEDS_INSECURE,
 		},
 	}
-	cluster, err := cluster.LaunchCluster(ctx, clusterOptions)
+	cluster, err := mlaunch.LaunchCluster(ctx, clusterOptions)
 	if err != nil {
 		t.Fatalf("LaunchCluster failed: %v", err)
 	}
diff --git a/metropolis/test/e2e/suites/ha/BUILD.bazel b/metropolis/test/e2e/suites/ha/BUILD.bazel
index 0a89977..5a2a4dd 100644
--- a/metropolis/test/e2e/suites/ha/BUILD.bazel
+++ b/metropolis/test/e2e/suites/ha/BUILD.bazel
@@ -15,10 +15,10 @@
         "resources:ram:7000",
     ],
     deps = [
-        "//metropolis/pkg/localregistry",
         "//metropolis/test/launch",
-        "//metropolis/test/launch/cluster",
+        "//metropolis/test/localregistry",
         "//metropolis/test/util",
+        "//osbase/test/launch",
         "@io_bazel_rules_go//go/runfiles:go_default_library",
     ],
 )
diff --git a/metropolis/test/e2e/suites/ha/run_test.go b/metropolis/test/e2e/suites/ha/run_test.go
index 63a2acd..cc02df4 100644
--- a/metropolis/test/e2e/suites/ha/run_test.go
+++ b/metropolis/test/e2e/suites/ha/run_test.go
@@ -9,10 +9,10 @@
 
 	"github.com/bazelbuild/rules_go/go/runfiles"
 
-	"source.monogon.dev/metropolis/pkg/localregistry"
-	"source.monogon.dev/metropolis/test/launch"
-	"source.monogon.dev/metropolis/test/launch/cluster"
+	mlaunch "source.monogon.dev/metropolis/test/launch"
+	"source.monogon.dev/metropolis/test/localregistry"
 	"source.monogon.dev/metropolis/test/util"
+	"source.monogon.dev/osbase/test/launch"
 )
 
 const (
@@ -48,12 +48,12 @@
 		t.Fatalf("Creating test image registry failed: %v", err)
 	}
 	// Launch cluster.
-	clusterOptions := cluster.ClusterOptions{
+	clusterOptions := mlaunch.ClusterOptions{
 		NumNodes:        3,
 		LocalRegistry:   lr,
 		NodeLogsToFiles: true,
 	}
-	cluster, err := cluster.LaunchCluster(ctx, clusterOptions)
+	cluster, err := mlaunch.LaunchCluster(ctx, clusterOptions)
 	if err != nil {
 		t.Fatalf("LaunchCluster failed: %v", err)
 	}
diff --git a/metropolis/test/e2e/suites/ha_cold/BUILD.bazel b/metropolis/test/e2e/suites/ha_cold/BUILD.bazel
index d358f86..c2cfa0d 100644
--- a/metropolis/test/e2e/suites/ha_cold/BUILD.bazel
+++ b/metropolis/test/e2e/suites/ha_cold/BUILD.bazel
@@ -17,7 +17,7 @@
     deps = [
         "//metropolis/proto/common",
         "//metropolis/test/launch",
-        "//metropolis/test/launch/cluster",
         "//metropolis/test/util",
+        "//osbase/test/launch",
     ],
 )
diff --git a/metropolis/test/e2e/suites/ha_cold/run_test.go b/metropolis/test/e2e/suites/ha_cold/run_test.go
index 6670b8f..419d290 100644
--- a/metropolis/test/e2e/suites/ha_cold/run_test.go
+++ b/metropolis/test/e2e/suites/ha_cold/run_test.go
@@ -6,9 +6,9 @@
 	"testing"
 	"time"
 
-	"source.monogon.dev/metropolis/test/launch"
-	"source.monogon.dev/metropolis/test/launch/cluster"
+	mlaunch "source.monogon.dev/metropolis/test/launch"
 	"source.monogon.dev/metropolis/test/util"
+	"source.monogon.dev/osbase/test/launch"
 
 	cpb "source.monogon.dev/metropolis/proto/common"
 )
@@ -36,7 +36,7 @@
 	defer cancel()
 
 	// Launch cluster.
-	clusterOptions := cluster.ClusterOptions{
+	clusterOptions := mlaunch.ClusterOptions{
 		NumNodes:        3,
 		NodeLogsToFiles: true,
 		InitialClusterConfiguration: &cpb.ClusterConfiguration{
@@ -44,7 +44,7 @@
 			StorageSecurityPolicy: cpb.ClusterConfiguration_STORAGE_SECURITY_POLICY_NEEDS_INSECURE,
 		},
 	}
-	cluster, err := cluster.LaunchCluster(ctx, clusterOptions)
+	cluster, err := mlaunch.LaunchCluster(ctx, clusterOptions)
 	if err != nil {
 		t.Fatalf("LaunchCluster failed: %v", err)
 	}
diff --git a/metropolis/test/e2e/suites/kubernetes/BUILD.bazel b/metropolis/test/e2e/suites/kubernetes/BUILD.bazel
index 191ba35..6234e94 100644
--- a/metropolis/test/e2e/suites/kubernetes/BUILD.bazel
+++ b/metropolis/test/e2e/suites/kubernetes/BUILD.bazel
@@ -33,8 +33,8 @@
     ],
     deps = [
         "//metropolis/node",
-        "//metropolis/pkg/localregistry",
-        "//metropolis/test/launch/cluster",
+        "//metropolis/test/launch",
+        "//metropolis/test/localregistry",
         "//metropolis/test/util",
         "@io_bazel_rules_go//go/runfiles:go_default_library",
         "@io_k8s_api//core/v1:core",
diff --git a/metropolis/test/e2e/suites/kubernetes/run_test.go b/metropolis/test/e2e/suites/kubernetes/run_test.go
index ec38aa3..f15fafd 100644
--- a/metropolis/test/e2e/suites/kubernetes/run_test.go
+++ b/metropolis/test/e2e/suites/kubernetes/run_test.go
@@ -23,8 +23,8 @@
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	podv1 "k8s.io/kubernetes/pkg/api/v1/pod"
 
-	"source.monogon.dev/metropolis/pkg/localregistry"
-	"source.monogon.dev/metropolis/test/launch/cluster"
+	mlaunch "source.monogon.dev/metropolis/test/launch"
+	"source.monogon.dev/metropolis/test/localregistry"
 	"source.monogon.dev/metropolis/test/util"
 
 	common "source.monogon.dev/metropolis/node"
@@ -64,11 +64,11 @@
 	}
 
 	// Launch cluster.
-	clusterOptions := cluster.ClusterOptions{
+	clusterOptions := mlaunch.ClusterOptions{
 		NumNodes:      2,
 		LocalRegistry: lr,
 	}
-	cluster, err := cluster.LaunchCluster(ctx, clusterOptions)
+	cluster, err := mlaunch.LaunchCluster(ctx, clusterOptions)
 	if err != nil {
 		t.Fatalf("LaunchCluster failed: %v", err)
 	}
diff --git a/metropolis/test/ktest/BUILD.bazel b/metropolis/test/ktest/BUILD.bazel
deleted file mode 100644
index 952d7e8..0000000
--- a/metropolis/test/ktest/BUILD.bazel
+++ /dev/null
@@ -1,59 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
-load("//metropolis/node/build/kconfig-patcher:kconfig-patcher.bzl", "kconfig_patch")
-load("//third_party/linux:def.bzl", "linux_image")
-
-go_library(
-    name = "ktest_lib",
-    srcs = ["main.go"],
-    importpath = "source.monogon.dev/metropolis/test/ktest",
-    visibility = ["//visibility:private"],
-    deps = ["//metropolis/test/launch"],
-)
-
-go_binary(
-    name = "ktest",
-    embed = [":ktest_lib"],
-    pure = "on",
-    visibility = [
-        "//go/net/psample:__pkg__",
-        "//metropolis:__subpackages__",
-    ],
-)
-
-kconfig_patch(
-    name = "testing-config",
-    src = "//third_party/linux:linux-metropolis.config",
-    out = "testing.config",
-    override_configs = {
-        # Unlock command line
-        "CONFIG_CMDLINE_OVERRIDE": "n",
-        "CONFIG_CMDLINE_BOOL": "n",
-        # Shave off 1 second from boot time
-        "CONFIG_SERIO_I8042": "",
-        "CONFIG_KEYBOARD_ATKBD": "",
-        "CONFIG_RTC_DRV_CMOS": "",
-        # Shave off an additional 18ms (half of the boot time)
-        "CONFIG_DEBUG_WX": "",
-    },
-)
-
-linux_image(
-    name = "linux-testing",
-    image_format = "vmlinux",
-    kernel_config = ":testing-config",
-    # This image is directly used by the ktest macro, thus it needs a pretty
-    # wide visibility.
-    visibility = [
-        "//go/net/psample:__pkg__",
-        "//metropolis:__subpackages__",
-    ],
-)
-
-filegroup(
-    name = "test-script",
-    srcs = ["run_ktest.sh"],
-    visibility = [
-        "//go/net/psample:__pkg__",
-        "//metropolis:__subpackages__",
-    ],
-)
diff --git a/metropolis/test/ktest/init/BUILD.bazel b/metropolis/test/ktest/init/BUILD.bazel
deleted file mode 100644
index e48bcb2..0000000
--- a/metropolis/test/ktest/init/BUILD.bazel
+++ /dev/null
@@ -1,19 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
-
-go_library(
-    name = "init_lib",
-    srcs = ["main.go"],
-    importpath = "source.monogon.dev/metropolis/test/ktest/init",
-    visibility = ["//visibility:private"],
-    deps = ["@org_golang_x_sys//unix"],
-)
-
-go_binary(
-    name = "init",
-    embed = [":init_lib"],
-    pure = "on",
-    visibility = [
-        "//go/net/psample:__pkg__",
-        "//metropolis:__subpackages__",
-    ],
-)
diff --git a/metropolis/test/ktest/init/main.go b/metropolis/test/ktest/init/main.go
deleted file mode 100644
index 16ffa29..0000000
--- a/metropolis/test/ktest/init/main.go
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// ktestinit is an init designed to run inside a lightweight VM for running
-// tests in there.  It performs basic platform initialization like mounting
-// kernel filesystems and launches the test executable at /tester, passes the
-// exit code back out over the control socket to ktest and then terminates the
-// default VM kernel.
-package main
-
-import (
-	"errors"
-	"fmt"
-	"os"
-	"os/exec"
-
-	"golang.org/x/sys/unix"
-)
-
-func mountInit() error {
-	for _, el := range []struct {
-		dir   string
-		fs    string
-		flags uintptr
-	}{
-		{"/sys", "sysfs", unix.MS_NOEXEC | unix.MS_NOSUID | unix.MS_NODEV},
-		{"/sys/kernel/debug", "debugfs", unix.MS_NOEXEC | unix.MS_NOSUID | unix.MS_NODEV},
-		{"/proc", "proc", unix.MS_NOEXEC | unix.MS_NOSUID | unix.MS_NODEV},
-		{"/dev", "devtmpfs", unix.MS_NOEXEC | unix.MS_NOSUID},
-		{"/dev/pts", "devpts", unix.MS_NOEXEC | unix.MS_NOSUID},
-		{"/tmp", "tmpfs", 0},
-	} {
-		if err := os.Mkdir(el.dir, 0755); err != nil && !os.IsExist(err) {
-			return fmt.Errorf("could not make %s: %w", el.dir, err)
-		}
-		if err := unix.Mount(el.fs, el.dir, el.fs, el.flags, ""); err != nil {
-			return fmt.Errorf("could not mount %s on %s: %w", el.fs, el.dir, err)
-		}
-	}
-	return nil
-}
-
-func main() {
-	if err := mountInit(); err != nil {
-		panic(err)
-	}
-
-	// First virtual serial is always stdout, second is control
-	ioConn, err := os.OpenFile("/dev/vport1p1", os.O_RDWR, 0)
-	if err != nil {
-		fmt.Printf("Failed to open communication device: %v\n", err)
-		return
-	}
-	cmd := exec.Command("/tester", "-test.v")
-	cmd.Stderr = os.Stderr
-	cmd.Stdout = os.Stdout
-	cmd.Env = append(cmd.Env, "IN_KTEST=true")
-	if err := cmd.Run(); err != nil {
-		var exerr *exec.ExitError
-		if errors.As(err, &exerr) {
-			if _, err := ioConn.Write([]byte{uint8(exerr.ExitCode())}); err != nil {
-				panic(err)
-			}
-		}
-		fmt.Printf("Failed to execute tests (tests didn't run): %v", err)
-	} else {
-		ioConn.Write([]byte{0})
-	}
-	ioConn.Close()
-
-	unix.Reboot(unix.LINUX_REBOOT_CMD_RESTART)
-}
diff --git a/metropolis/test/ktest/ktest.bzl b/metropolis/test/ktest/ktest.bzl
deleted file mode 100644
index 8cb5257..0000000
--- a/metropolis/test/ktest/ktest.bzl
+++ /dev/null
@@ -1,58 +0,0 @@
-#  Copyright 2020 The Monogon Project Authors.
-#
-#  SPDX-License-Identifier: Apache-2.0
-#
-#  Licensed under the Apache License, Version 2.0 (the "License");
-#  you may not use this file except in compliance with the License.
-#  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-#  limitations under the License.
-
-"""
-Ktest provides a macro to run tests under a normal Metropolis node kernel
-"""
-
-load("//metropolis/node/build:def.bzl", "node_initramfs")
-
-def _dict_union(x, y):
-    z = {}
-    z.update(x)
-    z.update(y)
-    return z
-
-def ktest(tester, cmdline = "", files = {}, fsspecs = [], files_cc = {}):
-    node_initramfs(
-        name = "test_initramfs",
-        fsspecs = [
-            "//metropolis/node/build:earlydev.fsspec",
-        ] + fsspecs,
-        files = _dict_union({
-            "//metropolis/test/ktest/init": "/init",
-            tester: "/tester",
-        }, files),
-        files_cc = files_cc,
-        testonly = True,
-    )
-
-    native.sh_test(
-        name = "ktest",
-        args = [
-            "$(location //metropolis/test/ktest)",
-            "$(location :test_initramfs)",
-            "$(location //metropolis/test/ktest:linux-testing)",
-            cmdline,
-        ],
-        size = "small",
-        srcs = ["//metropolis/test/ktest:test-script"],
-        data = [
-            "//metropolis/test/ktest",
-            ":test_initramfs",
-            "//metropolis/test/ktest:linux-testing",
-        ],
-    )
diff --git a/metropolis/test/ktest/main.go b/metropolis/test/ktest/main.go
deleted file mode 100644
index 27cd919..0000000
--- a/metropolis/test/ktest/main.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// ktest is a test launcher for running tests inside a custom kernel and passes
-// the results back out.
-package main
-
-import (
-	"context"
-	"flag"
-	"io"
-	"log"
-	"os"
-	"time"
-
-	"source.monogon.dev/metropolis/test/launch"
-)
-
-var (
-	kernelPath = flag.String("kernel-path", "", "Path of the Kernel ELF file")
-	initrdPath = flag.String("initrd-path", "", "Path of the initrd image")
-	cmdline    = flag.String("cmdline", "", "Additional kernel command line options")
-)
-
-func main() {
-	flag.Parse()
-
-	hostFeedbackConn, vmFeedbackConn, err := launch.NewSocketPair()
-	if err != nil {
-		log.Fatalf("Failed to create socket pair: %v", err)
-	}
-
-	exitCodeChan := make(chan uint8, 1)
-
-	go func() {
-		defer hostFeedbackConn.Close()
-
-		returnCode := make([]byte, 1)
-		if _, err := io.ReadFull(hostFeedbackConn, returnCode); err != nil {
-			log.Fatalf("Failed to read socket: %v", err)
-		}
-		exitCodeChan <- returnCode[0]
-	}()
-
-	if err := launch.RunMicroVM(context.Background(), &launch.MicroVMOptions{
-		Name:                        "ktest",
-		KernelPath:                  *kernelPath,
-		InitramfsPath:               *initrdPath,
-		Cmdline:                     *cmdline,
-		SerialPort:                  os.Stdout,
-		ExtraChardevs:               []*os.File{vmFeedbackConn},
-		DisableHostNetworkInterface: true,
-	}); err != nil {
-		log.Fatalf("Failed to run ktest VM: %v", err)
-	}
-
-	select {
-	case exitCode := <-exitCodeChan:
-		os.Exit(int(exitCode))
-	case <-time.After(1 * time.Second):
-		log.Fatal("Failed to get an error code back (test runtime probably crashed)")
-	}
-}
diff --git a/metropolis/test/ktest/run_ktest.sh b/metropolis/test/ktest/run_ktest.sh
deleted file mode 100755
index 02920a1..0000000
--- a/metropolis/test/ktest/run_ktest.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/bash
-exec "$1" -initrd-path "$2" -kernel-path "$3" -cmdline "$4"
\ No newline at end of file
diff --git a/metropolis/test/lacp/BUILD.bazel b/metropolis/test/lacp/BUILD.bazel
index d044de2..b5a0f7a 100644
--- a/metropolis/test/lacp/BUILD.bazel
+++ b/metropolis/test/lacp/BUILD.bazel
@@ -1,5 +1,5 @@
 load("@io_bazel_rules_go//go:def.bzl", "go_test")
-load("//metropolis/test/ktest:ktest.bzl", "ktest")
+load("//osbase/test/ktest:ktest.bzl", "ktest")
 
 go_test(
     name = "lacptest_test",
diff --git a/metropolis/test/launch/BUILD.bazel b/metropolis/test/launch/BUILD.bazel
index cc5ef6c..24296d1 100644
--- a/metropolis/test/launch/BUILD.bazel
+++ b/metropolis/test/launch/BUILD.bazel
@@ -3,17 +3,50 @@
 go_library(
     name = "launch",
     srcs = [
-        "launch.go",
-        "log.go",
+        "cluster.go",
+        "insecure_key.go",
+        "metroctl.go",
+        "prefixed_stdio.go",
+        "swtpm.go",
     ],
     data = [
+        "//metropolis/node:image",
+        "//metropolis/test/nanoswitch:initramfs",
+        "//metropolis/test/swtpm/certtool",
+        "//metropolis/test/swtpm/swtpm_cert",
+        "//osbase/test/ktest:linux-testing",
+        "//third_party/edk2:firmware",
         "@com_github_bonzini_qboot//:qboot-bin",
+        "@swtpm",
+        "@swtpm//:swtpm_localca",
+        "@swtpm//:swtpm_setup",
     ],
     importpath = "source.monogon.dev/metropolis/test/launch",
-    visibility = ["//metropolis:__subpackages__"],
+    visibility = ["//visibility:public"],
     deps = [
-        "//metropolis/pkg/freeport",
+        "//go/qcow2",
+        "//metropolis/cli/metroctl/core",
+        "//metropolis/node",
+        "//metropolis/node/core/curator/proto/api",
+        "//metropolis/node/core/identity",
+        "//metropolis/node/core/rpc",
+        "//metropolis/node/core/rpc/resolver",
+        "//metropolis/proto/api",
+        "//metropolis/proto/common",
+        "//metropolis/test/localregistry",
+        "//osbase/logbuffer",
+        "//osbase/test/launch",
+        "@com_github_cenkalti_backoff_v4//:backoff",
+        "@com_github_kballard_go_shellquote//:go-shellquote",
         "@io_bazel_rules_go//go/runfiles:go_default_library",
+        "@io_k8s_client_go//kubernetes",
+        "@io_k8s_client_go//rest",
+        "@org_golang_google_grpc//:go_default_library",
+        "@org_golang_google_grpc//codes",
+        "@org_golang_google_grpc//status",
+        "@org_golang_google_protobuf//proto",
+        "@org_golang_x_net//proxy",
         "@org_golang_x_sys//unix",
+        "@org_uber_go_multierr//:multierr",
     ],
 )
diff --git a/metropolis/test/launch/cli/launch-cluster/BUILD.bazel b/metropolis/test/launch/cli/launch-cluster/BUILD.bazel
index 688228e..9e48795 100644
--- a/metropolis/test/launch/cli/launch-cluster/BUILD.bazel
+++ b/metropolis/test/launch/cli/launch-cluster/BUILD.bazel
@@ -8,7 +8,7 @@
     visibility = ["//visibility:private"],
     deps = [
         "//metropolis/cli/metroctl/core",
-        "//metropolis/test/launch/cluster",
+        "//metropolis/test/launch",
     ],
 )
 
diff --git a/metropolis/test/launch/cli/launch-cluster/main.go b/metropolis/test/launch/cli/launch-cluster/main.go
index c9b9dec..1529396 100644
--- a/metropolis/test/launch/cli/launch-cluster/main.go
+++ b/metropolis/test/launch/cli/launch-cluster/main.go
@@ -23,12 +23,12 @@
 	"os/signal"
 
 	metroctl "source.monogon.dev/metropolis/cli/metroctl/core"
-	"source.monogon.dev/metropolis/test/launch/cluster"
+	mlaunch "source.monogon.dev/metropolis/test/launch"
 )
 
 func main() {
 	ctx, _ := signal.NotifyContext(context.Background(), os.Interrupt)
-	cl, err := cluster.LaunchCluster(ctx, cluster.ClusterOptions{
+	cl, err := mlaunch.LaunchCluster(ctx, mlaunch.ClusterOptions{
 		NumNodes:        3,
 		NodeLogsToFiles: true,
 	})
@@ -36,7 +36,7 @@
 		log.Fatalf("LaunchCluster: %v", err)
 	}
 
-	mpath, err := cluster.MetroctlRunfilePath()
+	mpath, err := mlaunch.MetroctlRunfilePath()
 	if err != nil {
 		log.Fatalf("MetroctlRunfilePath: %v", err)
 	}
diff --git a/metropolis/test/launch/cli/launch/BUILD.bazel b/metropolis/test/launch/cli/launch/BUILD.bazel
index 49df994..f0edefc 100644
--- a/metropolis/test/launch/cli/launch/BUILD.bazel
+++ b/metropolis/test/launch/cli/launch/BUILD.bazel
@@ -9,7 +9,7 @@
     deps = [
         "//metropolis/proto/api",
         "//metropolis/test/launch",
-        "//metropolis/test/launch/cluster",
+        "//osbase/test/launch",
     ],
 )
 
diff --git a/metropolis/test/launch/cli/launch/main.go b/metropolis/test/launch/cli/launch/main.go
index 2ae3a0c..71c7aa2 100644
--- a/metropolis/test/launch/cli/launch/main.go
+++ b/metropolis/test/launch/cli/launch/main.go
@@ -24,8 +24,9 @@
 	"path/filepath"
 
 	apb "source.monogon.dev/metropolis/proto/api"
-	"source.monogon.dev/metropolis/test/launch"
-	"source.monogon.dev/metropolis/test/launch/cluster"
+
+	mlaunch "source.monogon.dev/metropolis/test/launch"
+	"source.monogon.dev/osbase/test/launch"
 )
 
 func main() {
@@ -45,22 +46,22 @@
 	defer os.RemoveAll(sd)
 
 	var ports []uint16
-	for _, p := range cluster.NodePorts {
+	for _, p := range mlaunch.NodePorts {
 		ports = append(ports, uint16(p))
 	}
 	ctx, _ := signal.NotifyContext(context.Background(), os.Interrupt)
 	doneC := make(chan error)
-	tpmf, err := cluster.NewTPMFactory(filepath.Join(ld, "tpm"))
+	tpmf, err := mlaunch.NewTPMFactory(filepath.Join(ld, "tpm"))
 	if err != nil {
 		log.Fatalf("NewTPMFactory: %v", err)
 	}
-	err = cluster.LaunchNode(ctx, ld, sd, tpmf, &cluster.NodeOptions{
+	err = mlaunch.LaunchNode(ctx, ld, sd, tpmf, &mlaunch.NodeOptions{
 		Name:       "test-node",
 		Ports:      launch.IdentityPortMap(ports),
 		SerialPort: os.Stdout,
 		NodeParameters: &apb.NodeParameters{
 			Cluster: &apb.NodeParameters_ClusterBootstrap_{
-				ClusterBootstrap: cluster.InsecureClusterBootstrap,
+				ClusterBootstrap: mlaunch.InsecureClusterBootstrap,
 			},
 		},
 	}, doneC)
diff --git a/metropolis/test/launch/cluster/cluster.go b/metropolis/test/launch/cluster.go
similarity index 99%
rename from metropolis/test/launch/cluster/cluster.go
rename to metropolis/test/launch/cluster.go
index dfeb457..7ae5f83 100644
--- a/metropolis/test/launch/cluster/cluster.go
+++ b/metropolis/test/launch/cluster.go
@@ -2,7 +2,7 @@
 // nodes and clusters in a virtualized environment using qemu. It's kept in a
 // separate package as it depends on a Metropolis node image, which might not be
 // required for some use of the launch library.
-package cluster
+package launch
 
 import (
 	"bytes"
@@ -48,8 +48,8 @@
 	"source.monogon.dev/metropolis/node/core/identity"
 	"source.monogon.dev/metropolis/node/core/rpc"
 	"source.monogon.dev/metropolis/node/core/rpc/resolver"
-	"source.monogon.dev/metropolis/pkg/localregistry"
-	"source.monogon.dev/metropolis/test/launch"
+	"source.monogon.dev/metropolis/test/localregistry"
+	"source.monogon.dev/osbase/test/launch"
 )
 
 const (
@@ -854,7 +854,7 @@
 		} else {
 			serialPort = newPrefixedStdio(99)
 		}
-		kernelPath, err := runfiles.Rlocation("_main/metropolis/test/ktest/vmlinux")
+		kernelPath, err := runfiles.Rlocation("_main/osbase/test/ktest/vmlinux")
 		if err != nil {
 			launch.Fatal("Failed to resolved nanoswitch kernel: %v", err)
 		}
diff --git a/metropolis/test/launch/cluster/BUILD.bazel b/metropolis/test/launch/cluster/BUILD.bazel
deleted file mode 100644
index efa04cb..0000000
--- a/metropolis/test/launch/cluster/BUILD.bazel
+++ /dev/null
@@ -1,52 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library")
-
-go_library(
-    name = "cluster",
-    srcs = [
-        "cluster.go",
-        "insecure_key.go",
-        "metroctl.go",
-        "prefixed_stdio.go",
-        "swtpm.go",
-    ],
-    data = [
-        "//metropolis/node:image",
-        "//metropolis/test/ktest:linux-testing",
-        "//metropolis/test/nanoswitch:initramfs",
-        "//metropolis/test/swtpm/certtool",
-        "//metropolis/test/swtpm/swtpm_cert",
-        "//third_party/edk2:firmware",
-        "@com_github_bonzini_qboot//:qboot-bin",
-        "@swtpm",
-        "@swtpm//:swtpm_localca",
-        "@swtpm//:swtpm_setup",
-    ],
-    importpath = "source.monogon.dev/metropolis/test/launch/cluster",
-    visibility = ["//visibility:public"],
-    deps = [
-        "//go/qcow2",
-        "//metropolis/cli/metroctl/core",
-        "//metropolis/node",
-        "//metropolis/node/core/curator/proto/api",
-        "//metropolis/node/core/identity",
-        "//metropolis/node/core/rpc",
-        "//metropolis/node/core/rpc/resolver",
-        "//metropolis/pkg/localregistry",
-        "//metropolis/pkg/logbuffer",
-        "//metropolis/proto/api",
-        "//metropolis/proto/common",
-        "//metropolis/test/launch",
-        "@com_github_cenkalti_backoff_v4//:backoff",
-        "@com_github_kballard_go_shellquote//:go-shellquote",
-        "@io_bazel_rules_go//go/runfiles:go_default_library",
-        "@io_k8s_client_go//kubernetes",
-        "@io_k8s_client_go//rest",
-        "@org_golang_google_grpc//:go_default_library",
-        "@org_golang_google_grpc//codes",
-        "@org_golang_google_grpc//status",
-        "@org_golang_google_protobuf//proto",
-        "@org_golang_x_net//proxy",
-        "@org_golang_x_sys//unix",
-        "@org_uber_go_multierr//:multierr",
-    ],
-)
diff --git a/metropolis/test/launch/cluster/insecure_key.go b/metropolis/test/launch/insecure_key.go
similarity index 98%
rename from metropolis/test/launch/cluster/insecure_key.go
rename to metropolis/test/launch/insecure_key.go
index 48cd6d8..72af26f 100644
--- a/metropolis/test/launch/cluster/insecure_key.go
+++ b/metropolis/test/launch/insecure_key.go
@@ -14,7 +14,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-package cluster
+package launch
 
 import (
 	"crypto/ed25519"
diff --git a/metropolis/test/launch/launch.go b/metropolis/test/launch/launch.go
deleted file mode 100644
index 953025d..0000000
--- a/metropolis/test/launch/launch.go
+++ /dev/null
@@ -1,335 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// launch implements test harnesses for running qemu VMs from tests.
-package launch
-
-import (
-	"bytes"
-	"context"
-	"errors"
-	"fmt"
-	"io"
-	"net"
-	"os"
-	"os/exec"
-	"strconv"
-	"strings"
-	"syscall"
-
-	"github.com/bazelbuild/rules_go/go/runfiles"
-	"golang.org/x/sys/unix"
-
-	"source.monogon.dev/metropolis/pkg/freeport"
-)
-
-type QemuValue map[string][]string
-
-// ToOption encodes structured data into a QEMU option. Example: "test", {"key1":
-// {"val1"}, "key2": {"val2", "val3"}} returns "test,key1=val1,key2=val2,key2=val3"
-func (value QemuValue) ToOption(name string) string {
-	var optionValues []string
-	if name != "" {
-		optionValues = append(optionValues, name)
-	}
-	for name, values := range value {
-		if len(values) == 0 {
-			optionValues = append(optionValues, name)
-		}
-		for _, val := range values {
-			optionValues = append(optionValues, fmt.Sprintf("%v=%v", name, val))
-		}
-	}
-	return strings.Join(optionValues, ",")
-}
-
-// PrettyPrintQemuArgs prints the given QEMU arguments to stderr.
-func PrettyPrintQemuArgs(name string, args []string) {
-	var argsFmt string
-	for _, arg := range args {
-		argsFmt += arg
-		if !strings.HasPrefix(arg, "-") {
-			argsFmt += "\n  "
-		} else {
-			argsFmt += " "
-		}
-	}
-	Log("Running %s:\n  %s\n", name, argsFmt)
-}
-
-// PortMap represents where VM ports are mapped to on the host. It maps from the VM
-// port number to the host port number.
-type PortMap map[uint16]uint16
-
-// ToQemuForwards generates QEMU hostfwd values (https://qemu.weilnetz.de/doc/qemu-
-// doc.html#:~:text=hostfwd=) for all mapped ports.
-func (p PortMap) ToQemuForwards() []string {
-	var hostfwdOptions []string
-	for vmPort, hostPort := range p {
-		hostfwdOptions = append(hostfwdOptions, fmt.Sprintf("tcp::%d-:%d", hostPort, vmPort))
-	}
-	return hostfwdOptions
-}
-
-// IdentityPortMap returns a port map where each given port is mapped onto itself
-// on the host. This is mainly useful for development against Metropolis. The dbg
-// command requires this mapping.
-func IdentityPortMap(ports []uint16) PortMap {
-	portMap := make(PortMap)
-	for _, port := range ports {
-		portMap[port] = port
-	}
-	return portMap
-}
-
-// ConflictFreePortMap returns a port map where each given port is mapped onto a
-// random free port on the host. This is intended for automated testing where
-// multiple instances of Metropolis nodes might be running. Please call this
-// function for each Launch command separately and as close to it as possible since
-// it cannot guarantee that the ports will remain free.
-func ConflictFreePortMap(ports []uint16) (PortMap, error) {
-	portMap := make(PortMap)
-	for _, port := range ports {
-		mappedPort, listenCloser, err := freeport.AllocateTCPPort()
-		if err != nil {
-			return portMap, fmt.Errorf("failed to get free host port: %w", err)
-		}
-		// Defer closing of the listening port until the function is done and all ports are
-		// allocated
-		defer listenCloser.Close()
-		portMap[port] = mappedPort
-	}
-	return portMap, nil
-}
-
-// GuestServiceMap maps an IP/port combination inside the virtual guest network
-// to a TCPAddr reachable by the host. If the guest connects to the virtual
-// address/port, this connection gets forwarded to the host.
-type GuestServiceMap map[*net.TCPAddr]net.TCPAddr
-
-// ToQemuForwards generates QEMU guestfwd values (https://qemu.weilnetz.de/doc/qemu-
-// doc.html#:~:text=guestfwd=) for all mapped addresses.
-func (p GuestServiceMap) ToQemuForwards() []string {
-	var guestfwdOptions []string
-	for guestAddr, hostAddr := range p {
-		guestfwdOptions = append(guestfwdOptions, fmt.Sprintf("tcp:%s-tcp:%s", guestAddr.String(), hostAddr.String()))
-	}
-	return guestfwdOptions
-}
-
-// NewSocketPair creates a new socket pair. By connecting both ends to different
-// instances you can connect them with a virtual "network cable". The ends can be
-// passed into the ConnectToSocket option.
-func NewSocketPair() (*os.File, *os.File, error) {
-	fds, err := unix.Socketpair(unix.AF_UNIX, syscall.SOCK_STREAM, 0)
-	if err != nil {
-		return nil, nil, fmt.Errorf("failed to call socketpair: %w", err)
-	}
-
-	fd1 := os.NewFile(uintptr(fds[0]), "network0")
-	fd2 := os.NewFile(uintptr(fds[1]), "network1")
-	return fd1, fd2, nil
-}
-
-// HostInterfaceMAC is the MAC address the host SLIRP network interface has if it
-// is not disabled (see DisableHostNetworkInterface in MicroVMOptions)
-var HostInterfaceMAC = net.HardwareAddr{0x02, 0x72, 0x82, 0xbf, 0xc3, 0x56}
-
-// MicroVMOptions contains all options to start a MicroVM
-type MicroVMOptions struct {
-	// Name is a human-readable identifier to be used in debug output.
-	Name string
-
-	// Path to the ELF kernel binary
-	KernelPath string
-
-	// Path to the Initramfs
-	InitramfsPath string
-
-	// Cmdline contains additional kernel commandline options
-	Cmdline string
-
-	// SerialPort is a File(descriptor) over which you can communicate with the serial
-	// port of the machine It can be set to an existing file descriptor (like
-	// os.Stdout/os.Stderr) or you can use NewSocketPair() to get one end to talk to
-	// from Go.
-	SerialPort io.Writer
-
-	// ExtraChardevs can be used similar to SerialPort, but can contain an arbitrary
-	// number of additional serial ports
-	ExtraChardevs []*os.File
-
-	// ExtraNetworkInterfaces can contain an arbitrary number of file descriptors which
-	// are mapped into the VM as virtio network interfaces. The first interface is
-	// always a SLIRP-backed interface for communicating with the host.
-	ExtraNetworkInterfaces []*os.File
-
-	// PortMap contains ports that are mapped to the host through the built-in SLIRP
-	// network interface.
-	PortMap PortMap
-
-	// GuestServiceMap contains TCP services made available in the guest virtual
-	// network which are running on the host.
-	GuestServiceMap GuestServiceMap
-
-	// DisableHostNetworkInterface disables the SLIRP-backed host network interface
-	// that is normally the first network interface. If this is set PortMap is ignored.
-	// Mostly useful for speeding up QEMU's startup time for tests.
-	DisableHostNetworkInterface bool
-
-	// PcapDump can be used to dump all network traffic to a pcap file.
-	// If unset, no dump is created.
-	PcapDump string
-}
-
-// RunMicroVM launches a tiny VM mostly intended for testing. Very quick to boot
-// (<40ms).
-func RunMicroVM(ctx context.Context, opts *MicroVMOptions) error {
-	// Generate options for all the file descriptors we'll be passing as virtio "serial
-	// ports"
-	var extraArgs []string
-	for idx := range opts.ExtraChardevs {
-		idxStr := strconv.Itoa(idx)
-		id := "extra" + idxStr
-		// That this works is pretty much a hack, but upstream QEMU doesn't have a
-		// bidirectional chardev backend not based around files/sockets on the disk which
-		// are a giant pain to work with. We're using QEMU's fdset functionality to make
-		// FDs available as pseudo-files and then "ab"using the pipe backend's fallback
-		// functionality to get a single bidirectional chardev backend backed by a passed-
-		// down RDWR fd. Ref https://lists.gnu.org/archive/html/qemu-devel/2015-
-		// 12/msg01256.html
-		addFdConf := QemuValue{
-			"set": {idxStr},
-			"fd":  {strconv.Itoa(idx + 3)},
-		}
-		chardevConf := QemuValue{
-			"id":   {id},
-			"path": {"/dev/fdset/" + idxStr},
-		}
-		deviceConf := QemuValue{
-			"chardev": {id},
-		}
-		extraArgs = append(extraArgs, "-add-fd", addFdConf.ToOption(""),
-			"-chardev", chardevConf.ToOption("pipe"), "-device", deviceConf.ToOption("virtserialport"))
-	}
-
-	for idx := range opts.ExtraNetworkInterfaces {
-		id := fmt.Sprintf("net%v", idx)
-		netdevConf := QemuValue{
-			"id": {id},
-			"fd": {strconv.Itoa(idx + 3 + len(opts.ExtraChardevs))},
-		}
-		extraArgs = append(extraArgs, "-netdev", netdevConf.ToOption("socket"), "-device", "virtio-net-device,netdev="+id)
-	}
-
-	// This sets up a minimum viable environment for our Linux kernel. It clears all
-	// standard QEMU configuration and sets up a MicroVM machine
-	// (https://github.com/qemu/qemu/blob/master/docs/microvm.rst) with all legacy
-	// emulation turned off. This means the only "hardware" the Linux kernel inside can
-	// communicate with is a single virtio-mmio region. Over that MMIO interface we run
-	// a paravirtualized RNG (since the kernel in there has nothing to gather that from
-	// and it delays booting), a single paravirtualized console and an arbitrary number
-	// of extra serial ports for talking to various things that might run inside. The
-	// kernel, initramfs and command line are mapped into VM memory at boot time and
-	// not loaded from any sort of disk. Booting and shutting off one of these VMs
-	// takes <100ms.
-	biosPath, err := runfiles.Rlocation("com_github_bonzini_qboot/bios.bin")
-	if err != nil {
-		return fmt.Errorf("while searching bios: %w", err)
-	}
-
-	baseArgs := []string{
-		"-nodefaults", "-no-user-config", "-nographic", "-no-reboot",
-		"-accel", "kvm", "-cpu", "host",
-		"-m", "1G",
-		// Needed until QEMU updates their bundled qboot version (needs
-		// https://github.com/bonzini/qboot/pull/28)
-		"-bios", biosPath,
-		"-M", "microvm,x-option-roms=off,pic=off,pit=off,rtc=off,isa-serial=off",
-		"-kernel", opts.KernelPath,
-		// We force using a triple-fault reboot strategy since otherwise the kernel first
-		// tries others (like ACPI) which are not available in this very restricted
-		// environment. Similarly we need to override the boot console since there's
-		// nothing on the ISA bus that the kernel could talk to. We also force quiet for
-		// performance reasons.
-		"-append", "reboot=t console=hvc0 quiet " + opts.Cmdline,
-		"-initrd", opts.InitramfsPath,
-		"-device", "virtio-rng-device,max-bytes=1024,period=1000",
-		"-device", "virtio-serial-device,max_ports=16",
-		"-chardev", "stdio,id=con0", "-device", "virtconsole,chardev=con0",
-	}
-
-	if !opts.DisableHostNetworkInterface {
-		qemuNetType := "user"
-		qemuNetConfig := QemuValue{
-			"id":        {"usernet0"},
-			"net":       {"10.42.0.0/24"},
-			"dhcpstart": {"10.42.0.10"},
-		}
-		if opts.PortMap != nil {
-			qemuNetConfig["hostfwd"] = opts.PortMap.ToQemuForwards()
-		}
-		if opts.GuestServiceMap != nil {
-			qemuNetConfig["guestfwd"] = opts.GuestServiceMap.ToQemuForwards()
-		}
-
-		baseArgs = append(baseArgs, "-netdev", qemuNetConfig.ToOption(qemuNetType),
-			"-device", "virtio-net-device,netdev=usernet0,mac="+HostInterfaceMAC.String())
-	}
-
-	if !opts.DisableHostNetworkInterface && opts.PcapDump != "" {
-		qemuNetDump := QemuValue{
-			"id":     {"usernet0"},
-			"netdev": {"usernet0"},
-			"file":   {opts.PcapDump},
-		}
-		extraArgs = append(extraArgs, "-object", qemuNetDump.ToOption("filter-dump"))
-	}
-
-	var stdErrBuf bytes.Buffer
-	cmd := exec.CommandContext(ctx, "qemu-system-x86_64", append(baseArgs, extraArgs...)...)
-	cmd.Stdout = opts.SerialPort
-	cmd.Stderr = &stdErrBuf
-
-	cmd.ExtraFiles = append(cmd.ExtraFiles, opts.ExtraChardevs...)
-	cmd.ExtraFiles = append(cmd.ExtraFiles, opts.ExtraNetworkInterfaces...)
-
-	PrettyPrintQemuArgs(opts.Name, cmd.Args)
-
-	err = cmd.Run()
-	// If it's a context error, just quit. There's no way to tell a
-	// killed-due-to-context vs killed-due-to-external-reason error returned by Run,
-	// so we approximate by looking at the context's status.
-	if err != nil && ctx.Err() != nil {
-		return ctx.Err()
-	}
-
-	var exerr *exec.ExitError
-	if err != nil && errors.As(err, &exerr) {
-		exerr.Stderr = stdErrBuf.Bytes()
-		newErr := QEMUError(*exerr)
-		return &newErr
-	}
-	return err
-}
-
-// QEMUError is a special type of ExitError used when QEMU fails. In addition to
-// normal ExitError features it prints stderr for debugging.
-type QEMUError exec.ExitError
-
-func (e *QEMUError) Error() string {
-	return fmt.Sprintf("%v: %v", e.String(), string(e.Stderr))
-}
diff --git a/metropolis/test/launch/log.go b/metropolis/test/launch/log.go
deleted file mode 100644
index 2637e24..0000000
--- a/metropolis/test/launch/log.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package launch
-
-import (
-	"fmt"
-	"os"
-	"strings"
-)
-
-// Log is compatible with the output of ConciseString as used in the Metropolis
-// console log, making the output more readable in unified test logs.
-func Log(f string, args ...any) {
-	formatted := fmt.Sprintf(f, args...)
-	for i, line := range strings.Split(formatted, "\n") {
-		if len(line) == 0 {
-			continue
-		}
-		if i == 0 {
-			fmt.Printf("TT| %20s ! %s\n", "test launch", line)
-		} else {
-			fmt.Printf("TT| %20s | %s\n", "", line)
-		}
-	}
-}
-
-func Fatal(f string, args ...any) {
-	Log(f, args...)
-	os.Exit(1)
-}
diff --git a/metropolis/test/launch/cluster/metroctl.go b/metropolis/test/launch/metroctl.go
similarity index 99%
rename from metropolis/test/launch/cluster/metroctl.go
rename to metropolis/test/launch/metroctl.go
index e985a64..e3196a6 100644
--- a/metropolis/test/launch/cluster/metroctl.go
+++ b/metropolis/test/launch/metroctl.go
@@ -1,4 +1,4 @@
-package cluster
+package launch
 
 import (
 	"context"
diff --git a/metropolis/test/launch/cluster/prefixed_stdio.go b/metropolis/test/launch/prefixed_stdio.go
similarity index 94%
rename from metropolis/test/launch/cluster/prefixed_stdio.go
rename to metropolis/test/launch/prefixed_stdio.go
index 3ea3e18..c851c44 100644
--- a/metropolis/test/launch/cluster/prefixed_stdio.go
+++ b/metropolis/test/launch/prefixed_stdio.go
@@ -1,11 +1,11 @@
-package cluster
+package launch
 
 import (
 	"fmt"
 	"io"
 	"strings"
 
-	"source.monogon.dev/metropolis/pkg/logbuffer"
+	"source.monogon.dev/osbase/logbuffer"
 )
 
 // prefixedStdio is a io.ReadWriter which splits written bytes into lines,
diff --git a/metropolis/test/launch/cluster/swtpm.go b/metropolis/test/launch/swtpm.go
similarity index 98%
rename from metropolis/test/launch/cluster/swtpm.go
rename to metropolis/test/launch/swtpm.go
index 0f9b5c5..fa5cb78 100644
--- a/metropolis/test/launch/cluster/swtpm.go
+++ b/metropolis/test/launch/swtpm.go
@@ -1,4 +1,4 @@
-package cluster
+package launch
 
 import (
 	"context"
@@ -12,7 +12,7 @@
 
 	"github.com/bazelbuild/rules_go/go/runfiles"
 
-	"source.monogon.dev/metropolis/test/launch"
+	"source.monogon.dev/osbase/test/launch"
 )
 
 // A TPMFactory manufactures virtual TPMs using swtpm.
diff --git a/metropolis/pkg/localregistry/BUILD.bazel b/metropolis/test/localregistry/BUILD.bazel
similarity index 85%
rename from metropolis/pkg/localregistry/BUILD.bazel
rename to metropolis/test/localregistry/BUILD.bazel
index ae8c384..548960a 100644
--- a/metropolis/pkg/localregistry/BUILD.bazel
+++ b/metropolis/test/localregistry/BUILD.bazel
@@ -3,10 +3,10 @@
 go_library(
     name = "localregistry",
     srcs = ["localregistry.go"],
-    importpath = "source.monogon.dev/metropolis/pkg/localregistry",
+    importpath = "source.monogon.dev/metropolis/test/localregistry",
     visibility = ["//visibility:public"],
     deps = [
-        "//metropolis/pkg/localregistry/spec",
+        "//metropolis/test/localregistry/spec",
         "@com_github_docker_distribution//:distribution",
         "@com_github_docker_distribution//manifest/manifestlist",
         "@com_github_docker_distribution//manifest/ocischema",
diff --git a/metropolis/pkg/localregistry/def.bzl b/metropolis/test/localregistry/def.bzl
similarity index 100%
rename from metropolis/pkg/localregistry/def.bzl
rename to metropolis/test/localregistry/def.bzl
diff --git a/metropolis/pkg/localregistry/localregistry.go b/metropolis/test/localregistry/localregistry.go
similarity index 98%
rename from metropolis/pkg/localregistry/localregistry.go
rename to metropolis/test/localregistry/localregistry.go
index dc0fdde..120eb61 100644
--- a/metropolis/pkg/localregistry/localregistry.go
+++ b/metropolis/test/localregistry/localregistry.go
@@ -23,7 +23,7 @@
 	"github.com/opencontainers/go-digest"
 	"google.golang.org/protobuf/encoding/prototext"
 
-	"source.monogon.dev/metropolis/pkg/localregistry/spec"
+	"source.monogon.dev/metropolis/test/localregistry/spec"
 )
 
 type Server struct {
diff --git a/metropolis/pkg/localregistry/spec/BUILD.bazel b/metropolis/test/localregistry/spec/BUILD.bazel
similarity index 77%
rename from metropolis/pkg/localregistry/spec/BUILD.bazel
rename to metropolis/test/localregistry/spec/BUILD.bazel
index 3d6b734..71253d1 100644
--- a/metropolis/pkg/localregistry/spec/BUILD.bazel
+++ b/metropolis/test/localregistry/spec/BUILD.bazel
@@ -10,7 +10,7 @@
 
 go_proto_library(
     name = "spec_go_proto",
-    importpath = "source.monogon.dev/metropolis/pkg/localregistry/spec",
+    importpath = "source.monogon.dev/metropolis/test/localregistry/spec",
     proto = ":spec_proto",
     visibility = ["//visibility:public"],
 )
@@ -18,6 +18,6 @@
 go_library(
     name = "spec",
     embed = [":spec_go_proto"],
-    importpath = "source.monogon.dev/metropolis/pkg/localregistry/spec",
+    importpath = "source.monogon.dev/metropolis/test/localregistry/spec",
     visibility = ["//visibility:public"],
 )
diff --git a/metropolis/pkg/localregistry/spec/gomod-generated-placeholder.go b/metropolis/test/localregistry/spec/gomod-generated-placeholder.go
similarity index 100%
rename from metropolis/pkg/localregistry/spec/gomod-generated-placeholder.go
rename to metropolis/test/localregistry/spec/gomod-generated-placeholder.go
diff --git a/metropolis/pkg/localregistry/spec/manifest.proto b/metropolis/test/localregistry/spec/manifest.proto
similarity index 82%
rename from metropolis/pkg/localregistry/spec/manifest.proto
rename to metropolis/test/localregistry/spec/manifest.proto
index b28c8b7..bb53581 100644
--- a/metropolis/pkg/localregistry/spec/manifest.proto
+++ b/metropolis/test/localregistry/spec/manifest.proto
@@ -2,7 +2,7 @@
 
 package monogon.metropolis.pkg.localregistry;
 
-option go_package = "source.monogon.dev/metropolis/pkg/localregistry/spec";
+option go_package = "source.monogon.dev/metropolis/test/localregistry/spec";
 
 // Single image metadata
 message Image {
diff --git a/metropolis/test/nanoswitch/BUILD.bazel b/metropolis/test/nanoswitch/BUILD.bazel
index a3163f5..4954480 100644
--- a/metropolis/test/nanoswitch/BUILD.bazel
+++ b/metropolis/test/nanoswitch/BUILD.bazel
@@ -13,10 +13,10 @@
         "//metropolis/node",
         "//metropolis/node/core/network/dhcp4c",
         "//metropolis/node/core/network/dhcp4c/callback",
-        "//metropolis/pkg/logtree",
-        "//metropolis/pkg/socksproxy",
-        "//metropolis/pkg/supervisor",
-        "//metropolis/test/launch",
+        "//osbase/logtree",
+        "//osbase/socksproxy",
+        "//osbase/supervisor",
+        "//osbase/test/launch",
         "@com_github_google_nftables//:nftables",
         "@com_github_google_nftables//expr",
         "@com_github_insomniacslk_dhcp//dhcpv4",
diff --git a/metropolis/test/nanoswitch/nanoswitch.go b/metropolis/test/nanoswitch/nanoswitch.go
index b4ced56..ca1d77d 100644
--- a/metropolis/test/nanoswitch/nanoswitch.go
+++ b/metropolis/test/nanoswitch/nanoswitch.go
@@ -45,9 +45,9 @@
 	common "source.monogon.dev/metropolis/node"
 	"source.monogon.dev/metropolis/node/core/network/dhcp4c"
 	dhcpcb "source.monogon.dev/metropolis/node/core/network/dhcp4c/callback"
-	"source.monogon.dev/metropolis/pkg/logtree"
-	"source.monogon.dev/metropolis/pkg/supervisor"
-	"source.monogon.dev/metropolis/test/launch"
+	"source.monogon.dev/osbase/logtree"
+	"source.monogon.dev/osbase/supervisor"
+	"source.monogon.dev/osbase/test/launch"
 )
 
 var switchIP = net.IP{10, 1, 0, 1}
diff --git a/metropolis/test/nanoswitch/socks.go b/metropolis/test/nanoswitch/socks.go
index b8e7107..1230903 100644
--- a/metropolis/test/nanoswitch/socks.go
+++ b/metropolis/test/nanoswitch/socks.go
@@ -5,13 +5,13 @@
 	"fmt"
 	"net"
 
-	"source.monogon.dev/metropolis/pkg/socksproxy"
-	"source.monogon.dev/metropolis/pkg/supervisor"
+	"source.monogon.dev/osbase/socksproxy"
+	"source.monogon.dev/osbase/supervisor"
 )
 
 // SOCKSPort is the port at which nanoswitch listens for SOCKS conenctions.
 //
-// ONCHANGE(//metropolis/test/launch/cluster:cluster.go): port must be kept in sync
+// ONCHANGE(//metropolis/test/launch:cluster.go): port must be kept in sync
 const SOCKSPort uint16 = 1080
 
 // socksHandler implements a socksproxy.Handler which permits and logs
diff --git a/metropolis/test/swtpm/swtpm_cert/BUILD.bazel b/metropolis/test/swtpm/swtpm_cert/BUILD.bazel
index dce12e6..f0a8050 100644
--- a/metropolis/test/swtpm/swtpm_cert/BUILD.bazel
+++ b/metropolis/test/swtpm/swtpm_cert/BUILD.bazel
@@ -9,7 +9,7 @@
     importpath = "source.monogon.dev/metropolis/test/swtpm/swtpm_cert",
     visibility = ["//visibility:private"],
     deps = [
-        "//metropolis/pkg/pki",
+        "//osbase/pki",
         "@com_github_spf13_pflag//:pflag",
     ],
 )
diff --git a/metropolis/test/swtpm/swtpm_cert/main.go b/metropolis/test/swtpm/swtpm_cert/main.go
index 7b14f3a..8d5663b 100644
--- a/metropolis/test/swtpm/swtpm_cert/main.go
+++ b/metropolis/test/swtpm/swtpm_cert/main.go
@@ -24,7 +24,7 @@
 
 	"github.com/spf13/pflag"
 
-	"source.monogon.dev/metropolis/pkg/pki"
+	"source.monogon.dev/osbase/pki"
 )
 
 func getSignkey() *rsa.PrivateKey {
diff --git a/metropolis/test/util/BUILD.bazel b/metropolis/test/util/BUILD.bazel
index 00790a2..3603daf 100644
--- a/metropolis/test/util/BUILD.bazel
+++ b/metropolis/test/util/BUILD.bazel
@@ -12,10 +12,10 @@
     deps = [
         "//metropolis/node/core/curator/proto/api",
         "//metropolis/node/core/identity",
-        "//metropolis/pkg/event/memory",
-        "//metropolis/pkg/pki",
         "//metropolis/proto/common",
-        "//metropolis/test/launch",
+        "//osbase/event/memory",
+        "//osbase/pki",
+        "//osbase/test/launch",
         "@com_zx2c4_golang_wireguard_wgctrl//wgtypes",
         "@org_golang_google_grpc//:go_default_library",
         "@org_golang_google_grpc//credentials/insecure",
diff --git a/metropolis/test/util/curator.go b/metropolis/test/util/curator.go
index 39b2610..28fa7b1 100644
--- a/metropolis/test/util/curator.go
+++ b/metropolis/test/util/curator.go
@@ -13,7 +13,7 @@
 	apb "source.monogon.dev/metropolis/node/core/curator/proto/api"
 	cpb "source.monogon.dev/metropolis/proto/common"
 
-	"source.monogon.dev/metropolis/pkg/event/memory"
+	"source.monogon.dev/osbase/event/memory"
 )
 
 // TestCurator is a shim Curator implementation that serves pending Watch
diff --git a/metropolis/test/util/rpc.go b/metropolis/test/util/rpc.go
index e4caece..030d8d0 100644
--- a/metropolis/test/util/rpc.go
+++ b/metropolis/test/util/rpc.go
@@ -9,7 +9,7 @@
 	"testing"
 
 	"source.monogon.dev/metropolis/node/core/identity"
-	"source.monogon.dev/metropolis/pkg/pki"
+	"source.monogon.dev/osbase/pki"
 )
 
 // NewEphemeralClusterCredentials creates a set of TLS certificates for use in a
diff --git a/metropolis/test/util/runners.go b/metropolis/test/util/runners.go
index bb7aa91..95de02a 100644
--- a/metropolis/test/util/runners.go
+++ b/metropolis/test/util/runners.go
@@ -9,7 +9,7 @@
 	"testing"
 	"time"
 
-	"source.monogon.dev/metropolis/test/launch"
+	"source.monogon.dev/osbase/test/launch"
 )
 
 // TestEventual creates a new subtest looping the given function until it either
diff --git a/metropolis/vm/smoketest/BUILD.bazel b/metropolis/vm/smoketest/BUILD.bazel
index 7264bcf..3c01a4a 100644
--- a/metropolis/vm/smoketest/BUILD.bazel
+++ b/metropolis/vm/smoketest/BUILD.bazel
@@ -23,7 +23,7 @@
     name = "smoketest",
     data = [
         ":initramfs",
-        "//metropolis/test/ktest:linux-testing",
+        "//osbase/test/ktest:linux-testing",
         "@qemu//:qemu-x86_64-softmmu",
     ],
     embed = [":smoketest_lib"],
diff --git a/metropolis/vm/smoketest/main.go b/metropolis/vm/smoketest/main.go
index d22a578..3f0b8ca 100644
--- a/metropolis/vm/smoketest/main.go
+++ b/metropolis/vm/smoketest/main.go
@@ -65,7 +65,7 @@
 		"-accel", "kvm", "-cpu", "host",
 		"-bios", biosPath,
 		"-M", "microvm,x-option-roms=off,pic=off,pit=off,rtc=off,isa-serial=off",
-		"-kernel", "metropolis/test/ktest/linux-testing.elf",
+		"-kernel", "osbase/test/ktest/linux-testing.elf",
 		"-append", "reboot=t console=hvc0 quiet",
 		"-initrd", "metropolis/vm/smoketest/initramfs.cpio.lz4",
 		"-device", "virtio-rng-device,max-bytes=1024,period=1000",