cloud: split shepherd up

Change-Id: I8e386d9eaaf17543743e1e8a37a8d71426910d59
Reviewed-on: https://review.monogon.dev/c/monogon/+/2213
Reviewed-by: Serge Bazanski <serge@monogon.tech>
Tested-by: Jenkins CI
diff --git a/cloud/shepherd/BUILD.bazel b/cloud/shepherd/BUILD.bazel
new file mode 100644
index 0000000..512ebed
--- /dev/null
+++ b/cloud/shepherd/BUILD.bazel
@@ -0,0 +1,12 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+go_library(
+    name = "shepherd",
+    srcs = ["shepherd.go"],
+    importpath = "source.monogon.dev/cloud/shepherd",
+    visibility = ["//visibility:public"],
+    deps = [
+        "//cloud/bmaas/bmdb",
+        "//cloud/bmaas/bmdb/model",
+    ],
+)
diff --git a/cloud/shepherd/equinix/cli/BUILD.bazel b/cloud/shepherd/equinix/cli/BUILD.bazel
deleted file mode 100644
index d47afbd..0000000
--- a/cloud/shepherd/equinix/cli/BUILD.bazel
+++ /dev/null
@@ -1,27 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
-
-go_library(
-    name = "cli_lib",
-    srcs = [
-        "cmd_delete.go",
-        "cmd_move.go",
-        "cmd_reboot.go",
-        "cmd_yoink.go",
-        "main.go",
-    ],
-    importpath = "source.monogon.dev/cloud/shepherd/equinix/cli",
-    visibility = ["//visibility:private"],
-    deps = [
-        "//cloud/shepherd/equinix/wrapngo",
-        "//metropolis/cli/pkg/context",
-        "@com_github_packethost_packngo//:packngo",
-        "@com_github_spf13_cobra//:cobra",
-        "@io_k8s_klog_v2//:klog",
-    ],
-)
-
-go_binary(
-    name = "cli",
-    embed = [":cli_lib"],
-    visibility = ["//visibility:public"],
-)
diff --git a/cloud/shepherd/equinix/cli/cmd_delete.go b/cloud/shepherd/equinix/cli/cmd_delete.go
deleted file mode 100644
index 07b4dc7..0000000
--- a/cloud/shepherd/equinix/cli/cmd_delete.go
+++ /dev/null
@@ -1,61 +0,0 @@
-package main
-
-import (
-	"context"
-	"time"
-
-	"github.com/packethost/packngo"
-	"github.com/spf13/cobra"
-	"k8s.io/klog/v2"
-
-	"source.monogon.dev/cloud/shepherd/equinix/wrapngo"
-	clicontext "source.monogon.dev/metropolis/cli/pkg/context"
-)
-
-var deleteCmd = &cobra.Command{
-	Use:   "delete [target]",
-	Short: "Delete all devices from one project",
-	Args:  cobra.ExactArgs(1),
-	Run:   doDelete,
-}
-
-func init() {
-	rootCmd.AddCommand(deleteCmd)
-}
-
-func doDelete(cmd *cobra.Command, args []string) {
-	ctx := clicontext.WithInterrupt(context.Background())
-	api := wrapngo.New(&c)
-
-	klog.Infof("Listing devices for %q", args[0])
-
-	devices, err := api.ListDevices(ctx, args[0])
-	if err != nil {
-		klog.Exitf("failed listing devices: %v", err)
-	}
-
-	if len(devices) == 0 {
-		klog.Infof("No devices found in %s", args[0])
-		return
-	}
-
-	klog.Infof("Deleting %d Devices in %s. THIS WILL DELETE SERVERS! You have five seconds to cancel!", len(devices), args[0])
-	time.Sleep(5 * time.Second)
-
-	for _, d := range devices {
-		h := "deleted-" + d.Hostname
-		_, err := api.UpdateDevice(ctx, d.ID, &packngo.DeviceUpdateRequest{
-			Hostname: &h,
-		})
-		if err != nil {
-			klog.Infof("failed updating device %s (%s): %v", d.ID, d.Hostname, err)
-			continue
-		}
-
-		klog.Infof("deleting %s (%s)...", d.ID, d.Hostname)
-		if err := api.DeleteDevice(ctx, d.ID); err != nil {
-			klog.Infof("failed deleting device %s (%s): %v", d.ID, d.Hostname, err)
-			continue
-		}
-	}
-}
diff --git a/cloud/shepherd/equinix/cli/cmd_move.go b/cloud/shepherd/equinix/cli/cmd_move.go
deleted file mode 100644
index fa93501..0000000
--- a/cloud/shepherd/equinix/cli/cmd_move.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package main
-
-import (
-	"context"
-
-	"github.com/spf13/cobra"
-	"k8s.io/klog/v2"
-
-	"source.monogon.dev/cloud/shepherd/equinix/wrapngo"
-	clicontext "source.monogon.dev/metropolis/cli/pkg/context"
-)
-
-var moveCmd = &cobra.Command{
-	Use:   "move [source] [target]",
-	Short: "Move all reserved hardware from one to another project",
-	Args:  cobra.ExactArgs(2),
-	Run:   doMove,
-}
-
-func init() {
-	rootCmd.AddCommand(moveCmd)
-}
-
-func doMove(cmd *cobra.Command, args []string) {
-	ctx := clicontext.WithInterrupt(context.Background())
-	api := wrapngo.New(&c)
-
-	klog.Infof("Listing reservations for %q", args[0])
-	reservations, err := api.ListReservations(ctx, args[0])
-	if err != nil {
-		klog.Exitf("failed listing reservations: %v", err)
-	}
-
-	klog.Infof("Got %d reservations. Moving machines", len(reservations))
-	for _, r := range reservations {
-		_, err := api.MoveReservation(ctx, r.ID, args[1])
-		if err != nil {
-			klog.Errorf("failed moving reservation: %v", err)
-			continue
-		}
-		klog.Infof("Moved Device %s", r.ID)
-	}
-}
diff --git a/cloud/shepherd/equinix/cli/cmd_reboot.go b/cloud/shepherd/equinix/cli/cmd_reboot.go
deleted file mode 100644
index 528cd2e..0000000
--- a/cloud/shepherd/equinix/cli/cmd_reboot.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package main
-
-import (
-	"context"
-
-	"github.com/spf13/cobra"
-	"k8s.io/klog/v2"
-
-	"source.monogon.dev/cloud/shepherd/equinix/wrapngo"
-	clicontext "source.monogon.dev/metropolis/cli/pkg/context"
-)
-
-var rebootCmd = &cobra.Command{
-	Use:   "reboot [project] [id]",
-	Short: "Reboots all or one specific node",
-	Args:  cobra.MaximumNArgs(1),
-	Run:   doReboot,
-}
-
-func init() {
-	rootCmd.AddCommand(rebootCmd)
-}
-
-func doReboot(cmd *cobra.Command, args []string) {
-	ctx := clicontext.WithInterrupt(context.Background())
-	api := wrapngo.New(&c)
-
-	klog.Infof("Requesting device list...")
-	devices, err := api.ListDevices(ctx, args[0])
-	if err != nil {
-		klog.Fatal(err)
-	}
-
-	for _, d := range devices {
-		if len(args) == 2 && args[1] != d.ID {
-			continue
-		}
-
-		err := api.RebootDevice(ctx, d.ID)
-		if err != nil {
-			klog.Error(err)
-			continue
-		}
-		klog.Infof("rebooted %s", d.ID)
-	}
-}
diff --git a/cloud/shepherd/equinix/cli/cmd_yoink.go b/cloud/shepherd/equinix/cli/cmd_yoink.go
deleted file mode 100644
index fc8973e..0000000
--- a/cloud/shepherd/equinix/cli/cmd_yoink.go
+++ /dev/null
@@ -1,170 +0,0 @@
-package main
-
-import (
-	"bufio"
-	"context"
-	"os"
-	"sort"
-	"strconv"
-	"strings"
-
-	"github.com/packethost/packngo"
-	"github.com/spf13/cobra"
-	"k8s.io/klog/v2"
-
-	"source.monogon.dev/cloud/shepherd/equinix/wrapngo"
-	clicontext "source.monogon.dev/metropolis/cli/pkg/context"
-)
-
-var yoinkCmd = &cobra.Command{
-	Use: "yoink",
-	Long: `This moves a specified amount of servers that match the given spec to a different metro.
-While spec is a easy to find argument that matches the equinix system spec e.g. w3amd.75xx24c.512.8160.x86, 
-metro does not represent the public facing name. Instead it is the acutal datacenter name e.g. fr2"`,
-	Short: "Move a server base on the spec from one to another project",
-	Args:  cobra.NoArgs,
-	Run:   doYoink,
-}
-
-func init() {
-	yoinkCmd.Flags().Int("count", 1, "how many machines should be moved")
-	yoinkCmd.Flags().String("equinix_source_project", "", "from which project should the machine be yoinked")
-	yoinkCmd.Flags().String("equinix_target_project", "", "to which project should the machine be moved")
-	yoinkCmd.Flags().String("spec", "", "which device spec should be moved")
-	yoinkCmd.Flags().String("metro", "", "to which metro should be moved")
-	rootCmd.AddCommand(yoinkCmd)
-}
-
-func doYoink(cmd *cobra.Command, args []string) {
-	srcProject, err := cmd.Flags().GetString("equinix_source_project")
-	if err != nil {
-		klog.Exitf("flag: %v", err)
-	}
-
-	dstProject, err := cmd.Flags().GetString("equinix_target_project")
-	if err != nil {
-		klog.Exitf("flag: %v", err)
-	}
-
-	if srcProject == "" || dstProject == "" {
-		klog.Exitf("missing project flags")
-	}
-
-	count, err := cmd.Flags().GetInt("count")
-	if err != nil {
-		klog.Exitf("flag: %v", err)
-	}
-
-	spec, err := cmd.Flags().GetString("spec")
-	if err != nil {
-		klog.Exitf("flag: %v", err)
-	}
-
-	if spec == "" {
-		klog.Exitf("missing spec flag")
-	}
-
-	metro, err := cmd.Flags().GetString("metro")
-	if err != nil {
-		klog.Exitf("flag: %v", err)
-	}
-
-	if metro == "" {
-		klog.Exitf("missing metro flag")
-	}
-
-	ctx := clicontext.WithInterrupt(context.Background())
-	api := wrapngo.New(&c)
-
-	klog.Infof("Listing reservations for %q", srcProject)
-	reservations, err := api.ListReservations(ctx, srcProject)
-	if err != nil {
-		klog.Exitf("Failed to list reservations: %v", err)
-	}
-
-	type configDC struct {
-		config string
-		dc     string
-	}
-	mtypes := make(map[configDC]int)
-
-	var matchingReservations []packngo.HardwareReservation
-	reqType := configDC{config: strings.ToLower(spec), dc: strings.ToLower(metro)}
-
-	klog.Infof("Got %d reservations", len(reservations))
-	for _, r := range reservations {
-		curType := configDC{config: strings.ToLower(r.Plan.Name), dc: strings.ToLower(r.Facility.Metro.Code)}
-
-		mtypes[curType]++
-		if curType == reqType {
-			matchingReservations = append(matchingReservations, r)
-		}
-	}
-
-	klog.Infof("Found the following configurations:")
-	for dc, c := range mtypes {
-		klog.Infof("%s | %s | %d", dc.dc, dc.config, c)
-	}
-
-	if len(matchingReservations) == 0 {
-		klog.Exitf("Configuration not found: %s - %s", reqType.dc, reqType.config)
-	}
-
-	if len(matchingReservations)-count < 0 {
-		klog.Exitf("Not enough machines with matching configuration found ")
-	}
-
-	// prefer hosts that are not deployed
-	sort.Slice(matchingReservations, func(i, j int) bool {
-		return matchingReservations[i].Device == nil && matchingReservations[j].Device != nil
-	})
-
-	toMove := matchingReservations[:count]
-	var toDelete []string
-	for _, r := range toMove {
-		if r.Device != nil {
-			toDelete = append(toDelete, r.Device.Hostname)
-		}
-	}
-
-	stdInReader := bufio.NewReader(os.Stdin)
-	klog.Infof("Will move %d machines with spec %s in %s from %s to %s.", count, spec, metro, srcProject, dstProject)
-	if len(toDelete) > 0 {
-		klog.Warningf("Not enough free machines found. This will delete %d provisioned hosts! Hosts scheduled for deletion: ", len(toDelete))
-		klog.Warningf("%s", strings.Join(toDelete, ", "))
-		klog.Warningf("Please confirm by inputting in the number of machines that will be moved.")
-
-		read, err := stdInReader.ReadString('\n')
-		if err != nil {
-			klog.Exitf("failed reading input: %v", err)
-		}
-
-		atoi, err := strconv.Atoi(strings.TrimSpace(read))
-		if err != nil {
-			klog.Exitf("failed parsing number: %v", err)
-		}
-
-		if atoi != len(toDelete) {
-			klog.Exitf("Confirmation failed! Wanted \"%q\" got \"%d\"", len(toDelete), atoi)
-		} else {
-			klog.Infof("Thanks for the confirmation! continuing...")
-		}
-	}
-
-	klog.Infof("Note: It can be normal for a device move to fail for project validation issues. This is a known issue and can be ignored")
-	for _, r := range matchingReservations[:count] {
-		if r.Device != nil {
-			klog.Warningf("Deleting server %s (%s) on %s", r.Device.ID, r.Device.Hostname, r.ID)
-
-			if err := api.DeleteDevice(ctx, r.Device.ID); err != nil {
-				klog.Errorf("failed deleting device %s (%s): %v", r.Device.ID, r.Device.Hostname, err)
-				continue
-			}
-		}
-
-		_, err := api.MoveReservation(ctx, r.ID, dstProject)
-		if err != nil {
-			klog.Errorf("failed moving device %s: %v", r.ID, err)
-		}
-	}
-}
diff --git a/cloud/shepherd/equinix/cli/main.go b/cloud/shepherd/equinix/cli/main.go
deleted file mode 100644
index b4fe1dd..0000000
--- a/cloud/shepherd/equinix/cli/main.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package main
-
-import (
-	"flag"
-
-	"github.com/spf13/cobra"
-
-	"k8s.io/klog/v2"
-
-	"source.monogon.dev/cloud/shepherd/equinix/wrapngo"
-)
-
-// rootCmd represents the base command when called without any subcommands
-var rootCmd = &cobra.Command{
-	PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
-		if c.APIKey == "" || c.User == "" {
-			klog.Exitf("-equinix_api_username and -equinix_api_key must be set")
-		}
-		return nil
-	},
-}
-
-var c wrapngo.Opts
-
-func init() {
-	c.RegisterFlags()
-	rootCmd.PersistentFlags().AddGoFlagSet(flag.CommandLine)
-}
-
-func main() {
-	cobra.CheckErr(rootCmd.Execute())
-}
diff --git a/cloud/shepherd/equinix/manager/provisioner.go b/cloud/shepherd/equinix/manager/provisioner.go
deleted file mode 100644
index 03dffa6..0000000
--- a/cloud/shepherd/equinix/manager/provisioner.go
+++ /dev/null
@@ -1,449 +0,0 @@
-package manager
-
-import (
-	"context"
-	"errors"
-	"flag"
-	"fmt"
-	"sort"
-	"time"
-
-	"github.com/google/uuid"
-	"github.com/packethost/packngo"
-	"golang.org/x/time/rate"
-	"k8s.io/klog/v2"
-
-	"source.monogon.dev/cloud/bmaas/bmdb"
-	"source.monogon.dev/cloud/bmaas/bmdb/metrics"
-	"source.monogon.dev/cloud/bmaas/bmdb/model"
-	"source.monogon.dev/cloud/lib/sinbin"
-	ecl "source.monogon.dev/cloud/shepherd/equinix/wrapngo"
-)
-
-// ProvisionerConfig configures the provisioning process.
-type ProvisionerConfig struct {
-	// OS defines the operating system new devices are created with. Its format
-	// is specified by Equinix API.
-	OS string
-	// MaxCount is the maximum count of managed servers. No new devices will be
-	// created after reaching the limit. No attempt will be made to reduce the
-	// server count.
-	MaxCount uint
-
-	// ReconcileLoopLimiter limits the rate of the main reconciliation loop
-	// iterating. As new machines are being provisioned, each loop will cause one
-	// 'long' ListHardwareReservations call to Equinix.
-	ReconcileLoopLimiter *rate.Limiter
-
-	// DeviceCreation limits the rate at which devices are created within
-	// Equinix through use of appropriate API calls.
-	DeviceCreationLimiter *rate.Limiter
-
-	// Assimilate Equinix machines that match the configured device prefix into the
-	// BMDB as Provided. This should only be used for manual testing with
-	// -bmdb_eat_my_data.
-	Assimilate bool
-
-	// ReservationChunkSize is how many Equinix machines will try to be spawned in a
-	// single reconciliation loop. Higher numbers allow for faster initial
-	// provisioning, but lower numbers decrease potential raciness with other systems
-	// and make sure that other parts of the reconciliation logic are ran regularly.
-	//
-	// 20 is decent starting point.
-	ReservationChunkSize uint
-
-	// UseProjectKeys defines if the provisioner adds all ssh keys defined inside
-	// the used project to every new machine. This is only used for debug purposes.
-	UseProjectKeys bool
-}
-
-func (p *ProvisionerConfig) RegisterFlags() {
-	flag.StringVar(&p.OS, "provisioner_os", "ubuntu_20_04", "OS that provisioner will deploy on Equinix machines. Not the target OS for cluster customers.")
-	flag.UintVar(&p.MaxCount, "provisioner_max_machines", 50, "Limit of machines that the provisioner will attempt to pull into the BMDB. Zero for no limit.")
-	flagLimiter(&p.ReconcileLoopLimiter, "provisioner_reconciler_rate", "1m,1", "Rate limiting for main provisioner reconciliation loop")
-	flagLimiter(&p.DeviceCreationLimiter, "provisioner_device_creation_rate", "5s,1", "Rate limiting for Equinix device/machine creation")
-	flag.BoolVar(&p.Assimilate, "provisioner_assimilate", false, "Assimilate matching machines in Equinix project into BMDB as Provided. Only to be used when manually testing.")
-	flag.UintVar(&p.ReservationChunkSize, "provisioner_reservation_chunk_size", 20, "How many machines will the provisioner attempt to create in a single reconciliation loop iteration")
-	flag.BoolVar(&p.UseProjectKeys, "provisioner_use_project_keys", false, "Add all Equinix project keys to newly provisioned machines, not just the provisioner's managed key. Debug/development only.")
-}
-
-// Provisioner implements the server provisioning logic. Provisioning entails
-// bringing all available hardware reservations (subject to limits) into BMDB as
-// machines provided by Equinix.
-type Provisioner struct {
-	config       *ProvisionerConfig
-	sharedConfig *SharedConfig
-
-	// cl is the wrapngo client instance used.
-	cl ecl.Client
-
-	// badReservations is a holiday resort for Equinix hardware reservations which
-	// failed to be provisioned for some reason or another. We keep a list of them in
-	// memory just so that we don't repeatedly try to provision the same known bad
-	// machines.
-	badReservations sinbin.Sinbin[string]
-}
-
-// New creates a Provisioner instance, checking ProvisionerConfig and
-// SharedConfig for errors.
-func (c *ProvisionerConfig) New(cl ecl.Client, sc *SharedConfig) (*Provisioner, error) {
-	// If these are unset, it's probably because someone is using us as a library.
-	// Provide error messages useful to code users instead of flag names.
-	if c.OS == "" {
-		return nil, fmt.Errorf("OS must be set")
-	}
-	if c.ReconcileLoopLimiter == nil {
-		return nil, fmt.Errorf("ReconcileLoopLimiter must be set")
-	}
-	if c.DeviceCreationLimiter == nil {
-		return nil, fmt.Errorf("DeviceCreationLimiter must be set")
-	}
-	if c.ReservationChunkSize == 0 {
-		return nil, fmt.Errorf("ReservationChunkSize must be set")
-	}
-	return &Provisioner{
-		config:       c,
-		sharedConfig: sc,
-
-		cl: cl,
-	}, nil
-}
-
-// Run the provisioner blocking the current goroutine until the given context
-// expires.
-func (p *Provisioner) Run(ctx context.Context, conn *bmdb.Connection) error {
-
-	var sess *bmdb.Session
-	var err error
-	for {
-		if sess == nil {
-			sess, err = conn.StartSession(ctx, bmdb.SessionOption{Processor: metrics.ProcessorShepherdProvisioner})
-			if err != nil {
-				return fmt.Errorf("could not start BMDB session: %w", err)
-			}
-		}
-		err = p.runInSession(ctx, sess)
-
-		switch {
-		case err == nil:
-		case errors.Is(err, ctx.Err()):
-			return err
-		case errors.Is(err, bmdb.ErrSessionExpired):
-			klog.Errorf("Session expired, restarting...")
-			sess = nil
-			time.Sleep(time.Second)
-		case err != nil:
-			klog.Errorf("Processing failed: %v", err)
-			// TODO(q3k): close session
-			time.Sleep(time.Second)
-		}
-	}
-}
-
-type machineListing struct {
-	machines []uuid.UUID
-	err      error
-}
-
-// runInSession executes one iteration of the provisioner's control loop within a
-// BMDB session. This control loop attempts to bring all Equinix hardware
-// reservations into machines in the BMDB, subject to limits.
-func (p *Provisioner) runInSession(ctx context.Context, sess *bmdb.Session) error {
-	if err := p.config.ReconcileLoopLimiter.Wait(ctx); err != nil {
-		return err
-	}
-
-	providerC := make(chan *machineListing, 1)
-	bmdbC := make(chan *machineListing, 1)
-
-	klog.Infof("Getting provider and bmdb machines...")
-
-	// Make sub-context for two parallel operations, and so that we can cancel one
-	// immediately if the other fails.
-	subCtx, subCtxC := context.WithCancel(ctx)
-	defer subCtxC()
-
-	go func() {
-		machines, err := p.listInProvider(subCtx)
-		providerC <- &machineListing{
-			machines: machines,
-			err:      err,
-		}
-	}()
-	go func() {
-		machines, err := p.listInBMDB(subCtx, sess)
-		bmdbC <- &machineListing{
-			machines: machines,
-			err:      err,
-		}
-	}()
-	var inProvider, inBMDB *machineListing
-	for {
-		select {
-		case inProvider = <-providerC:
-			if err := inProvider.err; err != nil {
-				return fmt.Errorf("listing provider machines failed: %w", err)
-			}
-			klog.Infof("Got %d machines managed in provider.", len(inProvider.machines))
-		case inBMDB = <-bmdbC:
-			if err := inBMDB.err; err != nil {
-				return fmt.Errorf("listing BMDB machines failed: %w", err)
-			}
-			klog.Infof("Got %d machines in BMDB.", len(inBMDB.machines))
-		}
-		if inProvider != nil && inBMDB != nil {
-			break
-		}
-	}
-
-	subCtxC()
-	if err := p.reconcile(ctx, sess, inProvider.machines, inBMDB.machines); err != nil {
-		return fmt.Errorf("reconciliation failed: %w", err)
-	}
-	return nil
-}
-
-// listInProviders returns all machines that the provider thinks we should be
-// managing.
-func (p *Provisioner) listInProvider(ctx context.Context) ([]uuid.UUID, error) {
-	devices, err := p.sharedConfig.managedDevices(ctx, p.cl)
-	if err != nil {
-		return nil, fmt.Errorf("while fetching managed machines: %w", err)
-	}
-	var pvr []uuid.UUID
-	for _, dev := range devices {
-		id, err := uuid.Parse(dev.ID)
-		if err != nil {
-			klog.Errorf("Device ID %q is not UUID, skipping", dev.ID)
-		} else {
-			pvr = append(pvr, id)
-		}
-	}
-	sort.Slice(pvr, func(i, j int) bool {
-		return pvr[i].String() < pvr[j].String()
-	})
-	return pvr, nil
-}
-
-// listInBMDB returns all the machines that the BMDB thinks we should be managing.
-func (p *Provisioner) listInBMDB(ctx context.Context, sess *bmdb.Session) ([]uuid.UUID, error) {
-	var res []uuid.UUID
-	err := sess.Transact(ctx, func(q *model.Queries) error {
-		machines, err := q.GetProvidedMachines(ctx, model.ProviderEquinix)
-		if err != nil {
-			return err
-		}
-		res = make([]uuid.UUID, len(machines))
-		for i, machine := range machines {
-			id, err := uuid.Parse(machine.ProviderID)
-			if err != nil {
-				klog.Errorf("BMDB machine %s has unparseable provider ID %q", machine.MachineID, machine.ProviderID)
-			} else {
-				res[i] = id
-			}
-		}
-		return nil
-	})
-	if err != nil {
-		return nil, err
-	}
-	sort.Slice(res, func(i, j int) bool {
-		return res[i].String() < res[j].String()
-	})
-	return res, nil
-}
-
-// reconcile takes a list of machines that the provider thinks we should be
-// managing and that the BMDB thinks we should be managing, and tries to make
-// sense of that. First, some checks are performed across the two lists to make
-// sure we haven't dropped anything. Then, additional machines are deployed from
-// hardware reservations as needed.
-func (p *Provisioner) reconcile(ctx context.Context, sess *bmdb.Session, inProvider, inBMDB []uuid.UUID) error {
-	klog.Infof("Reconciling...")
-
-	bmdb := make(map[string]bool)
-	provider := make(map[string]bool)
-	for _, machine := range inProvider {
-		provider[machine.String()] = true
-	}
-	for _, machine := range inBMDB {
-		bmdb[machine.String()] = true
-	}
-
-	managed := make(map[string]bool)
-
-	// Some desynchronization between the BMDB and Provider point of view might be so
-	// bad we shouldn't attempt to do any work, at least not any time soon.
-	badbadnotgood := false
-
-	// Find any machines supposedly managed by us in the provider, but not in the
-	// BMDB, and assimilate them if so configured.
-	for machine, _ := range provider {
-		if bmdb[machine] {
-			managed[machine] = true
-			continue
-		}
-		if p.config.Assimilate {
-			klog.Warningf("Provider machine %s has no corresponding machine in BMDB. Assimilating it.", machine)
-			if err := p.assimilate(ctx, sess, machine); err != nil {
-				klog.Errorf("Failed to assimilate: %v", err)
-			} else {
-				managed[machine] = true
-			}
-		} else {
-			klog.Errorf("Provider machine %s has no corresponding machine in BMDB.", machine)
-			badbadnotgood = true
-		}
-	}
-
-	// Find any machines in the BMDB but not in the provider.
-	for machine, _ := range bmdb {
-		if !provider[machine] {
-			klog.Errorf("Provider device ID %s referred to in BMDB (from TODO) but missing in provider.", machine)
-			badbadnotgood = true
-		}
-	}
-
-	// Bail if things are weird.
-	if badbadnotgood {
-		klog.Errorf("Something's very wrong. Bailing early and refusing to do any work.")
-		return fmt.Errorf("fatal discrepency between BMDB and provider")
-	}
-
-	// Summarize all managed machines, which is the intersection of BMDB and
-	// Provisioner machines, usually both of these sets being equal.
-	nmanaged := len(managed)
-	klog.Infof("Total managed machines: %d", nmanaged)
-
-	if p.config.MaxCount != 0 && p.config.MaxCount <= uint(nmanaged) {
-		klog.Infof("Not bringing up more machines (at limit of %d machines)", p.config.MaxCount)
-		return nil
-	}
-
-	limitName := "no limit"
-	if p.config.MaxCount != 0 {
-		limitName = fmt.Sprintf("%d", p.config.MaxCount)
-	}
-	klog.Infof("Below managed machine limit (%s), bringing up more...", limitName)
-	klog.Infof("Retrieving hardware reservations, this will take a while...")
-	reservations, err := p.cl.ListReservations(ctx, p.sharedConfig.ProjectId)
-	if err != nil {
-		return fmt.Errorf("failed to list reservations: %w", err)
-	}
-
-	// Collect all reservations.
-	var toProvision []packngo.HardwareReservation
-	var inUse, notProvisionable, penalized int
-	for _, reservation := range reservations {
-		if reservation.Device != nil {
-			inUse++
-			continue
-		}
-		if !reservation.Provisionable {
-			notProvisionable++
-			continue
-		}
-		if p.badReservations.Penalized(reservation.ID) {
-			penalized++
-			continue
-		}
-		toProvision = append(toProvision, reservation)
-	}
-	klog.Infof("Retrieved hardware reservations: %d (total), %d (available), %d (in use), %d (not provisionable), %d (penalized)", len(reservations), len(toProvision), inUse, notProvisionable, penalized)
-
-	// Limit them to MaxCount, if applicable.
-	if p.config.MaxCount != 0 {
-		needed := int(p.config.MaxCount) - nmanaged
-		if len(toProvision) < needed {
-			needed = len(toProvision)
-		}
-		toProvision = toProvision[:needed]
-	}
-
-	// Limit them to an arbitrary 'chunk' size so that we don't do too many things in
-	// a single reconciliation operation.
-	if uint(len(toProvision)) > p.config.ReservationChunkSize {
-		toProvision = toProvision[:p.config.ReservationChunkSize]
-	}
-
-	if len(toProvision) == 0 {
-		klog.Infof("No more hardware reservations available, or all filtered out.")
-		return nil
-	}
-
-	klog.Infof("Bringing up %d machines...", len(toProvision))
-	for _, res := range toProvision {
-		p.config.DeviceCreationLimiter.Wait(ctx)
-		if err := p.provision(ctx, sess, res); err != nil {
-			klog.Errorf("Failed to provision reservation %s: %v", res.ID, err)
-			until := time.Now().Add(time.Hour)
-			klog.Errorf("Adding hardware reservation %s to sinbin until %s", res.ID, until)
-			p.badReservations.Add(res.ID, until)
-		}
-	}
-
-	return nil
-}
-
-// provision attempts to create a device within Equinix using given Hardware
-// Reservation rsv. The resulting device is registered with BMDB, and tagged as
-// "provided" in the process.
-func (pr *Provisioner) provision(ctx context.Context, sess *bmdb.Session, rsv packngo.HardwareReservation) error {
-	klog.Infof("Creating a new device using reservation ID %s.", rsv.ID)
-	hostname := pr.sharedConfig.DevicePrefix + rsv.ID[:18]
-	kid, err := pr.sharedConfig.sshEquinixId(ctx, pr.cl)
-	if err != nil {
-		return err
-	}
-	req := &packngo.DeviceCreateRequest{
-		Hostname:              hostname,
-		OS:                    pr.config.OS,
-		Plan:                  rsv.Plan.Slug,
-		ProjectID:             pr.sharedConfig.ProjectId,
-		HardwareReservationID: rsv.ID,
-		ProjectSSHKeys:        []string{kid},
-	}
-	if pr.config.UseProjectKeys {
-		klog.Warningf("INSECURE: Machines will be created with ALL PROJECT SSH KEYS!")
-		req.ProjectSSHKeys = nil
-	}
-
-	nd, err := pr.cl.CreateDevice(ctx, req)
-	if err != nil {
-		return fmt.Errorf("while creating new device within Equinix: %w", err)
-	}
-	klog.Infof("Created a new device within Equinix (RID: %s, PID: %s, HOST: %s)", rsv.ID, nd.ID, hostname)
-
-	err = pr.assimilate(ctx, sess, nd.ID)
-	if err != nil {
-		// TODO(mateusz@monogon.tech) at this point the device at Equinix isn't
-		// matched by a BMDB record. Schedule device deletion or make sure this
-		// case is being handled elsewhere.
-		return err
-	}
-	return nil
-}
-
-// assimilate brings in an already existing machine from Equinix into the BMDB.
-// This is only used in manual testing.
-func (pr *Provisioner) assimilate(ctx context.Context, sess *bmdb.Session, deviceID string) error {
-	return sess.Transact(ctx, func(q *model.Queries) error {
-		// Create a new machine record within BMDB.
-		m, err := q.NewMachine(ctx)
-		if err != nil {
-			return fmt.Errorf("while creating a new machine record in BMDB: %w", err)
-		}
-
-		// Link the new machine with the Equinix device, and tag it "provided".
-		p := model.MachineAddProvidedParams{
-			MachineID:  m.MachineID,
-			ProviderID: deviceID,
-			Provider:   model.ProviderEquinix,
-		}
-		klog.Infof("Setting \"provided\" tag (ID: %s, PID: %s, Provider: %s).", p.MachineID, p.ProviderID, p.Provider)
-		if err := q.MachineAddProvided(ctx, p); err != nil {
-			return fmt.Errorf("while tagging machine active: %w", err)
-		}
-		return nil
-	})
-}
diff --git a/cloud/shepherd/equinix/manager/server/BUILD.bazel b/cloud/shepherd/equinix/manager/server/BUILD.bazel
deleted file mode 100644
index 55172bd..0000000
--- a/cloud/shepherd/equinix/manager/server/BUILD.bazel
+++ /dev/null
@@ -1,42 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
-load("@io_bazel_rules_docker//container:container.bzl", "container_image")
-load("//build/static_binary_tarball:def.bzl", "static_binary_tarball")
-
-go_library(
-    name = "server_lib",
-    srcs = ["main.go"],
-    importpath = "source.monogon.dev/cloud/shepherd/equinix/manager/server",
-    visibility = ["//visibility:private"],
-    deps = [
-        "//cloud/bmaas/bmdb",
-        "//cloud/bmaas/bmdb/webug",
-        "//cloud/lib/component",
-        "//cloud/shepherd/equinix/manager",
-        "//cloud/shepherd/equinix/wrapngo",
-        "//metropolis/cli/pkg/context",
-        "@io_k8s_klog_v2//:klog",
-    ],
-)
-
-go_binary(
-    name = "server",
-    embed = [":server_lib"],
-    visibility = ["//visibility:public"],
-)
-
-static_binary_tarball(
-    name = "server_layer",
-    executable = ":server",
-)
-
-container_image(
-    name = "server_container",
-    base = "@go_image_base//image",
-    entrypoint = ["/app/cloud/shepherd/equinix/manager/server/server_/server"],
-    tars = [
-        ":server_layer",
-        "//cloud/takeover:takeover_layer",
-    ],
-    visibility = ["//visibility:public"],
-    workdir = "/app",
-)
diff --git a/cloud/shepherd/equinix/manager/shared_config.go b/cloud/shepherd/equinix/manager/shared_config.go
deleted file mode 100644
index 6ece4ce..0000000
--- a/cloud/shepherd/equinix/manager/shared_config.go
+++ /dev/null
@@ -1,256 +0,0 @@
-package manager
-
-import (
-	"context"
-	"crypto/ed25519"
-	"crypto/rand"
-	"errors"
-	"flag"
-	"fmt"
-	"os"
-	"strings"
-	"sync"
-
-	"github.com/packethost/packngo"
-	"golang.org/x/crypto/ssh"
-	"k8s.io/klog/v2"
-
-	ecl "source.monogon.dev/cloud/shepherd/equinix/wrapngo"
-)
-
-var (
-	NoSuchKey = errors.New("no such key")
-)
-
-// SharedConfig contains configuration options used by both the Initializer and
-// Provisioner components of the Shepherd. In CLI scenarios, RegisterFlags should
-// be called to configure this struct from CLI flags. Otherwise, this structure
-// should be explicitly configured, as the default values are not valid.
-type SharedConfig struct {
-	// ProjectId is the Equinix project UUID used by the manager. See Equinix API
-	// documentation for details. Must be set.
-	ProjectId string
-
-	// Label specifies the ID to use when handling the Equinix-registered SSH key
-	// used to authenticate to newly created servers. Must be set.
-	KeyLabel string
-
-	// myKey guards Key.
-	muKey sync.Mutex
-
-	// SSH key to use when creating machines and then connecting to them. If not
-	// provided, it will be automatically loaded from KeyPersistPath, and if that
-	// doesn't exist either, it will be first generated and persisted there.
-	Key ed25519.PrivateKey
-
-	// Path at which the SSH key will be loaded from and persisted to, if Key is not
-	// explicitly set. Either KeyPersistPath or Key must be set.
-	KeyPersistPath string
-
-	// Prefix applied to all devices (machines) created by the Provisioner, and used
-	// by the Provisioner to identify machines which it managed. Must be set.
-	DevicePrefix string
-
-	// configPrefix will be set to the prefix of the latest RegisterFlags call and
-	// will be then used by various methods to display the full name of a
-	// misconfigured flag.
-	configPrefix string
-}
-
-func (c *SharedConfig) check() error {
-	if c.ProjectId == "" {
-		return fmt.Errorf("-%sequinix_project_id must be set", c.configPrefix)
-	}
-	if c.KeyLabel == "" {
-		return fmt.Errorf("-%sequinix_ssh_key_label must be set", c.configPrefix)
-	}
-	if c.DevicePrefix == "" {
-		return fmt.Errorf("-%sequinix_device_prefix must be set", c.configPrefix)
-	}
-	return nil
-}
-
-func (k *SharedConfig) RegisterFlags(prefix string) {
-	k.configPrefix = prefix
-
-	flag.StringVar(&k.ProjectId, prefix+"equinix_project_id", "", "Equinix project ID where resources will be managed")
-	flag.StringVar(&k.KeyLabel, prefix+"equinix_ssh_key_label", "shepherd-FIXME", "Label used to identify managed SSH key in Equinix project")
-	flag.StringVar(&k.KeyPersistPath, prefix+"ssh_key_path", "shepherd-key.priv", "Local filesystem path to read SSH key from, and save generated key to")
-	flag.StringVar(&k.DevicePrefix, prefix+"equinix_device_prefix", "shepherd-FIXME-", "Prefix applied to all devices (machines) in Equinix project, used to identify managed machines")
-}
-
-// sshKey returns the SSH key as defined by the Key and KeyPersistPath options,
-// loading/generating/persisting it as necessary.
-func (c *SharedConfig) sshKey() (ed25519.PrivateKey, error) {
-	c.muKey.Lock()
-	defer c.muKey.Unlock()
-
-	if c.Key != nil {
-		return c.Key, nil
-	}
-	if c.KeyPersistPath == "" {
-		return nil, fmt.Errorf("-%sequinix_ssh_key_path must be set", c.configPrefix)
-	}
-
-	data, err := os.ReadFile(c.KeyPersistPath)
-	switch {
-	case err == nil:
-		if len(data) != ed25519.PrivateKeySize {
-			return nil, fmt.Errorf("%s is not a valid ed25519 private key", c.KeyPersistPath)
-		}
-		c.Key = data
-		klog.Infof("Loaded SSH key from %s", c.KeyPersistPath)
-		return c.Key, nil
-	case os.IsNotExist(err):
-		if err := c.sshGenerateUnlocked(); err != nil {
-			return nil, err
-		}
-		if err := os.WriteFile(c.KeyPersistPath, c.Key, 0400); err != nil {
-			return nil, fmt.Errorf("could not persist key: %w", err)
-		}
-		return c.Key, nil
-	default:
-		return nil, fmt.Errorf("could not load peristed key: %w", err)
-	}
-}
-
-// sshPub returns the SSH public key marshaled for use, based on sshKey.
-func (c *SharedConfig) sshPub() (string, error) {
-	private, err := c.sshKey()
-	if err != nil {
-		return "", err
-	}
-	// Marshal the public key part in OpenSSH authorized_keys format that will be
-	// registered with Equinix Metal.
-	sshpub, err := ssh.NewPublicKey(private.Public())
-	if err != nil {
-		return "", fmt.Errorf("while building SSH public key: %w", err)
-	}
-	return string(ssh.MarshalAuthorizedKey(sshpub)), nil
-}
-
-// sshSigner builds an ssh.Signer (for use in SSH connections) based on sshKey.
-func (c *SharedConfig) sshSigner() (ssh.Signer, error) {
-	private, err := c.sshKey()
-	if err != nil {
-		return nil, err
-	}
-	// Set up the internal ssh.Signer to be later used to initiate SSH
-	// connections with newly provided hosts.
-	signer, err := ssh.NewSignerFromKey(private)
-	if err != nil {
-		return nil, fmt.Errorf("while building SSH signer: %w", err)
-	}
-	return signer, nil
-}
-
-// sshGenerateUnlocked saves a new private key into SharedConfig.Key.
-func (c *SharedConfig) sshGenerateUnlocked() error {
-	if c.Key != nil {
-		return nil
-	}
-	_, priv, err := ed25519.GenerateKey(rand.Reader)
-	if err != nil {
-		return fmt.Errorf("while generating SSH key: %w", err)
-	}
-	c.Key = priv
-	return nil
-}
-
-// sshEquinixGet looks up the Equinix key matching SharedConfig.KeyLabel,
-// returning its packngo.SSHKey instance.
-func (c *SharedConfig) sshEquinix(ctx context.Context, cl ecl.Client) (*packngo.SSHKey, error) {
-	ks, err := cl.ListSSHKeys(ctx)
-	if err != nil {
-		return nil, fmt.Errorf("while listing SSH keys: %w", err)
-	}
-
-	for _, k := range ks {
-		if k.Label == c.KeyLabel {
-			return &k, nil
-		}
-	}
-	return nil, NoSuchKey
-}
-
-// sshEquinixId looks up the Equinix key identified by SharedConfig.KeyLabel,
-// returning its Equinix-assigned UUID.
-func (c *SharedConfig) sshEquinixId(ctx context.Context, cl ecl.Client) (string, error) {
-	k, err := c.sshEquinix(ctx, cl)
-	if err != nil {
-		return "", err
-	}
-	return k.ID, nil
-}
-
-// sshEquinixUpdate makes sure the existing SSH key registered with Equinix
-// matches the one from sshPub.
-func (c *SharedConfig) sshEquinixUpdate(ctx context.Context, cl ecl.Client, kid string) error {
-	pub, err := c.sshPub()
-	if err != nil {
-		return err
-	}
-	_, err = cl.UpdateSSHKey(ctx, kid, &packngo.SSHKeyUpdateRequest{
-		Key: &pub,
-	})
-	if err != nil {
-		return fmt.Errorf("while updating the SSH key: %w", err)
-	}
-	return nil
-}
-
-// sshEquinixUpload registers a new SSH key from sshPub.
-func (c *SharedConfig) sshEquinixUpload(ctx context.Context, cl ecl.Client) error {
-	pub, err := c.sshPub()
-	if err != nil {
-		return fmt.Errorf("while generating public key: %w", err)
-	}
-	_, err = cl.CreateSSHKey(ctx, &packngo.SSHKeyCreateRequest{
-		Label:     c.KeyLabel,
-		Key:       pub,
-		ProjectID: c.ProjectId,
-	})
-	if err != nil {
-		return fmt.Errorf("while creating an SSH key: %w", err)
-	}
-	return nil
-}
-
-// SSHEquinixEnsure initializes the locally managed SSH key (from a persistence
-// path or explicitly set key) and updates or uploads it to Equinix. The key is
-// generated as needed The key is generated as needed
-func (c *SharedConfig) SSHEquinixEnsure(ctx context.Context, cl ecl.Client) error {
-	k, err := c.sshEquinix(ctx, cl)
-	switch err {
-	case NoSuchKey:
-		if err := c.sshEquinixUpload(ctx, cl); err != nil {
-			return fmt.Errorf("while uploading key: %w", err)
-		}
-		return nil
-	case nil:
-		if err := c.sshEquinixUpdate(ctx, cl, k.ID); err != nil {
-			return fmt.Errorf("while updating key: %w", err)
-		}
-		return nil
-	default:
-		return err
-	}
-}
-
-// managedDevices provides a map of device provider IDs to matching
-// packngo.Device instances. It calls Equinix API's ListDevices. The returned
-// devices are filtered according to DevicePrefix provided through Opts. The
-// returned error value, if not nil, will originate in wrapngo.
-func (c *SharedConfig) managedDevices(ctx context.Context, cl ecl.Client) (map[string]packngo.Device, error) {
-	ds, err := cl.ListDevices(ctx, c.ProjectId)
-	if err != nil {
-		return nil, err
-	}
-	dm := map[string]packngo.Device{}
-	for _, d := range ds {
-		if strings.HasPrefix(d.Hostname, c.DevicePrefix) {
-			dm[d.ID] = d
-		}
-	}
-	return dm, nil
-}
diff --git a/cloud/shepherd/equinix/wrapngo/BUILD.bazel b/cloud/shepherd/equinix/wrapngo/BUILD.bazel
deleted file mode 100644
index 521e1ca..0000000
--- a/cloud/shepherd/equinix/wrapngo/BUILD.bazel
+++ /dev/null
@@ -1,31 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
-
-go_library(
-    name = "wrapngo",
-    srcs = [
-        "duct_tape.go",
-        "metrics.go",
-        "wrapn.go",
-    ],
-    importpath = "source.monogon.dev/cloud/shepherd/equinix/wrapngo",
-    visibility = ["//visibility:public"],
-    deps = [
-        "@com_github_cenkalti_backoff_v4//:backoff",
-        "@com_github_google_uuid//:uuid",
-        "@com_github_packethost_packngo//:packngo",
-        "@com_github_prometheus_client_golang//prometheus",
-        "@io_k8s_klog_v2//:klog",
-    ],
-)
-
-go_test(
-    name = "wrapngo_test",
-    timeout = "eternal",
-    srcs = ["wrapngo_live_test.go"],
-    args = ["-test.v"],
-    embed = [":wrapngo"],
-    deps = [
-        "@com_github_packethost_packngo//:packngo",
-        "@org_golang_x_crypto//ssh",
-    ],
-)
diff --git a/cloud/shepherd/equinix/wrapngo/duct_tape.go b/cloud/shepherd/equinix/wrapngo/duct_tape.go
deleted file mode 100644
index d5dab7c..0000000
--- a/cloud/shepherd/equinix/wrapngo/duct_tape.go
+++ /dev/null
@@ -1,126 +0,0 @@
-package wrapngo
-
-import (
-	"context"
-	"errors"
-	"fmt"
-	"net/http"
-	"time"
-
-	"github.com/cenkalti/backoff/v4"
-	"github.com/packethost/packngo"
-	"k8s.io/klog/v2"
-)
-
-// wrap a given fn in some reliability-increasing duct tape: context support and
-// exponential backoff retries for intermittent connectivity issues. This allows
-// us to use packngo code instead of writing our own API stub for Equinix Metal.
-//
-// The given fn will be retried until it returns a 'permanent' Equinix error (see
-// isPermanentEquinixError) or the given context expires. Additionally, fn will
-// be called with a brand new packngo client tied to the context of the wrap
-// call. Finally, the given client will also have some logging middleware
-// attached to it which can be activated by setting verbosity 5 (or greater) on
-// this file.
-//
-// The wrapped fn can be either just a plain packngo method or some complicated
-// idempotent logic, as long as it cooperates with the above contract.
-func wrap[U any](ctx context.Context, cl *client, fn func(*packngo.Client) (U, error)) (U, error) {
-	var zero U
-	if err := cl.serializer.up(ctx); err != nil {
-		return zero, err
-	}
-	defer cl.serializer.down()
-
-	bc := backoff.WithContext(cl.o.BackOff(), ctx)
-	pngo, err := cl.clientForContext(ctx)
-	if err != nil {
-		// Generally this shouldn't happen other than with programming errors, so we
-		// don't back this off.
-		return zero, fmt.Errorf("could not crate equinix client: %w", err)
-	}
-
-	var res U
-	err = backoff.Retry(func() error {
-		res, err = fn(pngo)
-		if isPermanentEquinixError(err) {
-			return backoff.Permanent(err)
-		}
-		return err
-	}, bc)
-	if err != nil {
-		return zero, err
-	}
-	return res, nil
-}
-
-type injectContextRoundTripper struct {
-	ctx      context.Context
-	original http.RoundTripper
-	metrics  *metricsSet
-}
-
-func (r *injectContextRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
-	klog.V(5).Infof("Request -> %v", req.URL.String())
-	start := time.Now()
-	res, err := r.original.RoundTrip(req.WithContext(r.ctx))
-	latency := time.Since(start)
-	r.metrics.onAPIRequestDone(req, res, err, latency)
-
-	if err != nil {
-		klog.V(5).Infof("HTTP error <- %v", err)
-	} else {
-		klog.V(5).Infof("Response <- %v", res.Status)
-	}
-	return res, err
-}
-
-func (c *client) clientForContext(ctx context.Context) (*packngo.Client, error) {
-	httpcl := &http.Client{
-		Transport: &injectContextRoundTripper{
-			ctx:      ctx,
-			original: http.DefaultTransport,
-			metrics:  c.metrics,
-		},
-	}
-	return packngo.NewClient(packngo.WithAuth(c.username, c.token), packngo.WithHTTPClient(httpcl))
-}
-
-// httpStatusCode extracts the status code from error values returned by
-// packngo methods.
-func httpStatusCode(err error) int {
-	var er *packngo.ErrorResponse
-	if err != nil && errors.As(err, &er) {
-		return er.Response.StatusCode
-	}
-	return -1
-}
-
-// IsNotFound returns true if the given error is an Equinix packngo/wrapngo 'not
-// found' error.
-func IsNotFound(err error) bool {
-	return httpStatusCode(err) == http.StatusNotFound
-}
-
-func isPermanentEquinixError(err error) bool {
-	// Invalid argument/state errors from wrapping.
-	if errors.Is(err, ErrRaceLost) {
-		return true
-	}
-	if errors.Is(err, ErrNoReservationProvided) {
-		return true
-	}
-	// Real errors returned from equinix.
-	st := httpStatusCode(err)
-	switch st {
-	case http.StatusUnauthorized:
-		return true
-	case http.StatusForbidden:
-		return true
-	case http.StatusNotFound:
-		return true
-	case http.StatusUnprocessableEntity:
-		return true
-	}
-	return false
-}
diff --git a/cloud/shepherd/equinix/wrapngo/metrics.go b/cloud/shepherd/equinix/wrapngo/metrics.go
deleted file mode 100644
index fef506b..0000000
--- a/cloud/shepherd/equinix/wrapngo/metrics.go
+++ /dev/null
@@ -1,129 +0,0 @@
-package wrapngo
-
-import (
-	"context"
-	"errors"
-	"fmt"
-	"net/http"
-	"regexp"
-	"strings"
-	"time"
-
-	"github.com/prometheus/client_golang/prometheus"
-	"k8s.io/klog/v2"
-)
-
-// metricsSet contains all the Prometheus metrics collected by wrapngo.
-type metricsSet struct {
-	requestLatencies *prometheus.HistogramVec
-	waiting          prometheus.GaugeFunc
-	inFlight         prometheus.GaugeFunc
-}
-
-func newMetricsSet(ser *serializer) *metricsSet {
-	return &metricsSet{
-		requestLatencies: prometheus.NewHistogramVec(
-			prometheus.HistogramOpts{
-				Name: "equinix_api_latency",
-				Help: "Equinix API request latency in seconds, partitioned by endpoint status code",
-			},
-			[]string{"endpoint", "status_code"},
-		),
-		waiting: prometheus.NewGaugeFunc(
-			prometheus.GaugeOpts{
-				Name: "equinix_api_waiting",
-				Help: "Number of API requests pending to be sent to Equinix but waiting on semaphore",
-			},
-			func() float64 {
-				_, waiting := ser.stats()
-				return float64(waiting)
-			},
-		),
-		inFlight: prometheus.NewGaugeFunc(
-			prometheus.GaugeOpts{
-				Name: "equinix_api_in_flight",
-				Help: "Number of API requests currently being processed by Equinix",
-			},
-			func() float64 {
-				inFlight, _ := ser.stats()
-				return float64(inFlight)
-			},
-		),
-	}
-}
-
-// getEndpointForPath converts from an Equinix API method and path (eg.
-// /metal/v1/devices/deadbeef) into an 'endpoint' name, which is an imaginary,
-// Monogon-specific name for the API endpoint accessed by this call.
-//
-// If the given path is unknown and thus cannot be converted to an endpoint name,
-// 'Unknown' is return and a warning is logged.
-//
-// We use this function to partition request statistics per API 'endpoint'. An
-// alternative to this would be to record high-level packngo function names, but
-// one packngo function call might actually emit multiple HTTP API requests - so
-// we're stuck recording the low-level requests and gathering statistics from
-// there instead.
-func getEndpointForPath(method, path string) string {
-	path = strings.TrimPrefix(path, "/metal/v1")
-	for name, match := range endpointNames {
-		if match.matches(method, path) {
-			return name
-		}
-	}
-	klog.Warningf("Unknown Equinix API %s %s - cannot determine metric endpoint name", method, path)
-	return "Unknown"
-}
-
-// requestMatch is used to match a HTTP request method/path.
-type requestMatch struct {
-	method string
-	regexp *regexp.Regexp
-}
-
-func (r *requestMatch) matches(method, path string) bool {
-	if r.method != method {
-		return false
-	}
-	return r.regexp.MatchString(path)
-}
-
-var (
-	endpointNames = map[string]requestMatch{
-		"GetDevice":           {"GET", regexp.MustCompile(`^/devices/[^/]+$`)},
-		"ListDevices":         {"GET", regexp.MustCompile(`^/(organizations|projects)/[^/]+/devices$`)},
-		"CreateDevice":        {"POST", regexp.MustCompile(`^/projects/[^/]+/devices$`)},
-		"ListReservations":    {"GET", regexp.MustCompile(`^/projects/[^/]+/hardware-reservations$`)},
-		"ListSSHKeys":         {"GET", regexp.MustCompile(`^/ssh-keys$`)},
-		"CreateSSHKey":        {"POST", regexp.MustCompile(`^/project/[^/]+/ssh-keys$`)},
-		"GetSSHKey":           {"GET", regexp.MustCompile(`^/ssh-keys/[^/]+$`)},
-		"UpdateSSHKey":        {"PATCH", regexp.MustCompile(`^/ssh-keys/[^/]+$`)},
-		"PerformDeviceAction": {"POST", regexp.MustCompile(`^/devices/[^/]+/actions$`)},
-	}
-)
-
-// onAPIRequestDone is called by the wrapngo code on every API response from
-// Equinix, and records the given parameters into metrics.
-func (m *metricsSet) onAPIRequestDone(req *http.Request, res *http.Response, err error, latency time.Duration) {
-	if m == nil {
-		return
-	}
-
-	code := "unknown"
-	if err == nil {
-		code = fmt.Sprintf("%d", res.StatusCode)
-	} else {
-		switch {
-		case errors.Is(err, context.Canceled):
-			code = "ctx canceled"
-		case errors.Is(err, context.DeadlineExceeded):
-			code = "deadline exceeded"
-		}
-	}
-	if code == "unknown" {
-		klog.Warningf("Unexpected HTTP result: req %s %s, error: %v", req.Method, req.URL.Path, res)
-	}
-
-	endpoint := getEndpointForPath(req.Method, req.URL.Path)
-	m.requestLatencies.With(prometheus.Labels{"endpoint": endpoint, "status_code": code}).Observe(latency.Seconds())
-}
diff --git a/cloud/shepherd/equinix/wrapngo/wrapn.go b/cloud/shepherd/equinix/wrapngo/wrapn.go
deleted file mode 100644
index 7bd4522..0000000
--- a/cloud/shepherd/equinix/wrapngo/wrapn.go
+++ /dev/null
@@ -1,433 +0,0 @@
-// Package wrapngo wraps packngo methods providing the following usability
-// enhancements:
-// - API call rate limiting
-// - resource-aware call retries
-// - use of a configurable back-off algorithm implementation
-// - context awareness
-//
-// The implementation is provided with the following caveats:
-//
-// There can be only one call in flight. Concurrent calls to API-related
-// methods of the same client will block. Calls returning packngo structs will
-// return nil data when a non-nil error value is returned. An
-// os.ErrDeadlineExceeded will be returned after the underlying API calls time
-// out beyond the chosen back-off algorithm implementation's maximum allowed
-// retry interval. Other errors, excluding context.Canceled and
-// context.DeadlineExceeded, indicate either an error originating at Equinix'
-// API endpoint (which may still stem from invalid call inputs), or a network
-// error.
-//
-// Packngo wrappers included below may return timeout errors even after the
-// wrapped calls succeed in the event server reply could not have been
-// received.
-//
-// This implies that effects of mutating calls can't always be verified
-// atomically, requiring explicit synchronization between API users, regardless
-// of the retry/recovery logic used.
-//
-// Having that in mind, some call wrappers exposed by this package will attempt
-// to recover from this kind of situations by requesting information on any
-// resources created, and retrying the call if needed. This approach assumes
-// any concurrent mutating API users will be synchronized, as it should be in
-// any case.
-//
-// Another way of handling this problem would be to leave it up to the user to
-// retry calls if needed, though this would leak Equinix Metal API, and
-// complicate implementations depending on this package. Due to that, the prior
-// approach was chosen.
-package wrapngo
-
-import (
-	"context"
-	"errors"
-	"flag"
-	"fmt"
-	"net/http"
-	"sync/atomic"
-	"time"
-
-	"github.com/cenkalti/backoff/v4"
-	"github.com/google/uuid"
-	"github.com/packethost/packngo"
-	"github.com/prometheus/client_golang/prometheus"
-)
-
-// Opts conveys configurable Client parameters.
-type Opts struct {
-	// User and APIKey are the credentials used to authenticate with
-	// Metal API.
-
-	User   string
-	APIKey string
-
-	// Optional parameters:
-
-	// BackOff controls the client's behavior in the event of API calls failing
-	// due to IO timeouts by adjusting the lower bound on time taken between
-	// subsequent calls.
-	BackOff func() backoff.BackOff
-
-	// APIRate is the minimum time taken between subsequent API calls.
-	APIRate time.Duration
-
-	// Parallelism defines how many calls to the Equinix API will be issued in
-	// parallel. When this limit is reached, subsequent attmepts to call the API will
-	// block. The order of serving of pending calls is currently undefined.
-	//
-	// If not defined (ie. 0), defaults to 1.
-	Parallelism int
-
-	MetricsRegistry *prometheus.Registry
-}
-
-func (o *Opts) RegisterFlags() {
-	flag.StringVar(&o.User, "equinix_api_username", "", "Username for Equinix API")
-	flag.StringVar(&o.APIKey, "equinix_api_key", "", "Key/token/password for Equinix API")
-	flag.IntVar(&o.Parallelism, "equinix_parallelism", 3, "How many parallel connections to the Equinix API will be allowed")
-}
-
-// Client is a limited interface of methods that the Shepherd uses on Equinix. It
-// is provided to allow for dependency injection of a fake equinix API for tests.
-type Client interface {
-	// GetDevice wraps packngo's cl.Devices.Get.
-	//
-	// TODO(q3k): remove unused pid parameter.
-	GetDevice(ctx context.Context, pid, did string, opts *packngo.ListOptions) (*packngo.Device, error)
-	// ListDevices wraps packngo's cl.Device.List.
-	ListDevices(ctx context.Context, pid string) ([]packngo.Device, error)
-	// CreateDevice attempts to create a new device according to the provided
-	// request. The request _must_ configure a HardwareReservationID. This call
-	// attempts to be as idempotent as possible, and will return ErrRaceLost if a
-	// retry was needed but in the meantime the requested hardware reservation from
-	// which this machine was requested got lost.
-	CreateDevice(ctx context.Context, request *packngo.DeviceCreateRequest) (*packngo.Device, error)
-
-	UpdateDevice(ctx context.Context, id string, request *packngo.DeviceUpdateRequest) (*packngo.Device, error)
-	RebootDevice(ctx context.Context, did string) error
-	DeleteDevice(ctx context.Context, id string) error
-
-	// ListReservations returns a complete list of hardware reservations associated
-	// with project pid. This is an expensive method that takes a while to execute,
-	// handle with care.
-	ListReservations(ctx context.Context, pid string) ([]packngo.HardwareReservation, error)
-	// MoveReservation moves a reserved device to the given project.
-	MoveReservation(ctx context.Context, hardwareReservationDID, projectID string) (*packngo.HardwareReservation, error)
-
-	// ListSSHKeys wraps packngo's cl.Keys.List.
-	ListSSHKeys(ctx context.Context) ([]packngo.SSHKey, error)
-	// CreateSSHKey is idempotent - the key label can be used only once. Further
-	// calls referring to the same label and key will not yield errors. See the
-	// package comment for more info on this method's behavior and returned error
-	// values.
-	CreateSSHKey(ctx context.Context, req *packngo.SSHKeyCreateRequest) (*packngo.SSHKey, error)
-	// UpdateSSHKey is idempotent - values included in r can be applied only once,
-	// while subsequent updates using the same data don't produce errors. See the
-	// package comment for information on this method's behavior and returned error
-	// values.
-	UpdateSSHKey(ctx context.Context, kid string, req *packngo.SSHKeyUpdateRequest) (*packngo.SSHKey, error)
-
-	Close()
-}
-
-// client implements the Client interface.
-type client struct {
-	username string
-	token    string
-	o        *Opts
-	rlt      *time.Ticker
-
-	serializer *serializer
-	metrics    *metricsSet
-}
-
-// serializer is an N-semaphore channel (configured by opts.Parallelism) which is
-// used to limit the number of concurrent calls to the Equinix API.
-//
-// In addition, it implements some simple waiting/usage statistics for
-// metrics/introspection.
-type serializer struct {
-	sem     chan struct{}
-	usage   int64
-	waiting int64
-}
-
-// up blocks until the serializer has at least one available concurrent call
-// slot. If the given context expires before such a slot is available, the
-// context error is returned.
-func (s *serializer) up(ctx context.Context) error {
-	atomic.AddInt64(&s.waiting, 1)
-	select {
-	case s.sem <- struct{}{}:
-		atomic.AddInt64(&s.waiting, -1)
-		atomic.AddInt64(&s.usage, 1)
-		return nil
-	case <-ctx.Done():
-		atomic.AddInt64(&s.waiting, -1)
-		return ctx.Err()
-	}
-}
-
-// down releases a previously acquire concurrent call slot.
-func (s *serializer) down() {
-	atomic.AddInt64(&s.usage, -1)
-	<-s.sem
-}
-
-// stats returns the number of in-flight and waiting-for-semaphore requests.
-func (s *serializer) stats() (usage, waiting int64) {
-	usage = atomic.LoadInt64(&s.usage)
-	waiting = atomic.LoadInt64(&s.waiting)
-	return
-}
-
-// New creates a Client instance based on Opts. PACKNGO_DEBUG environment
-// variable can be set prior to the below call to enable verbose packngo
-// debug logs.
-func New(opts *Opts) Client {
-	return new(opts)
-}
-
-func new(opts *Opts) *client {
-	// Apply the defaults.
-	if opts.APIRate == 0 {
-		opts.APIRate = 2 * time.Second
-	}
-	if opts.BackOff == nil {
-		opts.BackOff = func() backoff.BackOff {
-			return backoff.NewExponentialBackOff()
-		}
-	}
-	if opts.Parallelism == 0 {
-		opts.Parallelism = 1
-	}
-
-	cl := &client{
-		username: opts.User,
-		token:    opts.APIKey,
-		o:        opts,
-		rlt:      time.NewTicker(opts.APIRate),
-
-		serializer: &serializer{
-			sem: make(chan struct{}, opts.Parallelism),
-		},
-	}
-	if opts.MetricsRegistry != nil {
-		ms := newMetricsSet(cl.serializer)
-		opts.MetricsRegistry.MustRegister(ms.inFlight, ms.waiting, ms.requestLatencies)
-		cl.metrics = ms
-	}
-	return cl
-}
-
-func (c *client) Close() {
-	c.rlt.Stop()
-}
-
-var (
-	ErrRaceLost              = errors.New("race lost with another API user")
-	ErrNoReservationProvided = errors.New("hardware reservation must be set")
-)
-
-func (e *client) PowerOffDevice(ctx context.Context, pid string) error {
-	_, err := wrap(ctx, e, func(p *packngo.Client) (*packngo.Response, error) {
-		r, err := p.Devices.PowerOff(pid)
-		if err != nil {
-			return nil, fmt.Errorf("Devices.PowerOff: %w", err)
-		}
-		return r, nil
-	})
-	return err
-}
-
-func (e *client) PowerOnDevice(ctx context.Context, pid string) error {
-	_, err := wrap(ctx, e, func(p *packngo.Client) (*packngo.Response, error) {
-		r, err := p.Devices.PowerOn(pid)
-		if err != nil {
-			return nil, fmt.Errorf("Devices.PowerOn: %w", err)
-		}
-		return r, nil
-	})
-	return err
-}
-
-func (e *client) DeleteDevice(ctx context.Context, id string) error {
-	_, err := wrap(ctx, e, func(p *packngo.Client) (*packngo.Response, error) {
-		r, err := p.Devices.Delete(id, false)
-		if err != nil {
-			return nil, fmt.Errorf("Devices.Delete: %w", err)
-		}
-		return r, nil
-	})
-	return err
-}
-
-func (e *client) CreateDevice(ctx context.Context, r *packngo.DeviceCreateRequest) (*packngo.Device, error) {
-	if r.HardwareReservationID == "" {
-		return nil, ErrNoReservationProvided
-	}
-	// Add a tag to the request to detect if someone snatches a hardware reservation
-	// from under us.
-	witnessTag := fmt.Sprintf("wrapngo-idempotency-%s", uuid.New().String())
-	r.Tags = append(r.Tags, witnessTag)
-
-	return wrap(ctx, e, func(cl *packngo.Client) (*packngo.Device, error) {
-		//Does the device already exist?
-		res, _, err := cl.HardwareReservations.Get(r.HardwareReservationID, nil)
-		if err != nil {
-			return nil, fmt.Errorf("couldn't check if device already exists: %w", err)
-		}
-		if res == nil {
-			return nil, fmt.Errorf("unexpected nil response")
-		}
-		if res.Device != nil {
-			// Check if we lost the race for this hardware reservation.
-			tags := make(map[string]bool)
-			for _, tag := range res.Device.Tags {
-				tags[tag] = true
-			}
-			if !tags[witnessTag] {
-				return nil, ErrRaceLost
-			}
-			return res.Device, nil
-		}
-
-		// No device yet. Try to create it.
-		dev, _, err := cl.Devices.Create(r)
-		if err == nil {
-			return dev, nil
-		}
-		// In case of a transient failure (eg. network issue), we retry the whole
-		// operation, which means we first check again if the device already exists. If
-		// it's a permanent error from the API, the backoff logic will fail immediately.
-		return nil, fmt.Errorf("couldn't create device: %w", err)
-	})
-}
-
-func (e *client) UpdateDevice(ctx context.Context, id string, r *packngo.DeviceUpdateRequest) (*packngo.Device, error) {
-	return wrap(ctx, e, func(cl *packngo.Client) (*packngo.Device, error) {
-		dev, _, err := cl.Devices.Update(id, r)
-		return dev, err
-	})
-}
-
-func (e *client) ListDevices(ctx context.Context, pid string) ([]packngo.Device, error) {
-	return wrap(ctx, e, func(cl *packngo.Client) ([]packngo.Device, error) {
-		// to increase the chances of a stable pagination, we sort the devices by hostname
-		res, _, err := cl.Devices.List(pid, &packngo.GetOptions{SortBy: "hostname"})
-		return res, err
-	})
-}
-
-func (e *client) GetDevice(ctx context.Context, pid, did string, opts *packngo.ListOptions) (*packngo.Device, error) {
-	return wrap(ctx, e, func(cl *packngo.Client) (*packngo.Device, error) {
-		d, _, err := cl.Devices.Get(did, opts)
-		return d, err
-	})
-}
-
-// Currently unexported, only used in tests.
-func (e *client) deleteDevice(ctx context.Context, did string) error {
-	_, err := wrap(ctx, e, func(cl *packngo.Client) (*struct{}, error) {
-		_, err := cl.Devices.Delete(did, false)
-		if httpStatusCode(err) == http.StatusNotFound {
-			// 404s may pop up as an after effect of running the back-off
-			// algorithm, and as such should not be propagated.
-			return nil, nil
-		}
-		return nil, err
-	})
-	return err
-}
-
-func (e *client) ListReservations(ctx context.Context, pid string) ([]packngo.HardwareReservation, error) {
-	return wrap(ctx, e, func(cl *packngo.Client) ([]packngo.HardwareReservation, error) {
-		res, _, err := cl.HardwareReservations.List(pid, &packngo.ListOptions{Includes: []string{"facility", "device"}})
-		return res, err
-	})
-}
-
-func (e *client) MoveReservation(ctx context.Context, hardwareReservationDID, projectID string) (*packngo.HardwareReservation, error) {
-	return wrap(ctx, e, func(cl *packngo.Client) (*packngo.HardwareReservation, error) {
-		hr, _, err := cl.HardwareReservations.Move(hardwareReservationDID, projectID)
-		if err != nil {
-			return nil, fmt.Errorf("HardwareReservations.Move: %w", err)
-		}
-		return hr, err
-	})
-}
-
-func (e *client) CreateSSHKey(ctx context.Context, r *packngo.SSHKeyCreateRequest) (*packngo.SSHKey, error) {
-	return wrap(ctx, e, func(cl *packngo.Client) (*packngo.SSHKey, error) {
-		// Does the key already exist?
-		ks, _, err := cl.SSHKeys.List()
-		if err != nil {
-			return nil, fmt.Errorf("SSHKeys.List: %w", err)
-		}
-		for _, k := range ks {
-			if k.Label == r.Label {
-				if k.Key != r.Key {
-					return nil, fmt.Errorf("key label already in use for a different key")
-				}
-				return &k, nil
-			}
-		}
-
-		// No key yet. Try to create it.
-		k, _, err := cl.SSHKeys.Create(r)
-		if err != nil {
-			return nil, fmt.Errorf("SSHKeys.Create: %w", err)
-		}
-		return k, nil
-	})
-}
-
-func (e *client) UpdateSSHKey(ctx context.Context, id string, r *packngo.SSHKeyUpdateRequest) (*packngo.SSHKey, error) {
-	return wrap(ctx, e, func(cl *packngo.Client) (*packngo.SSHKey, error) {
-		k, _, err := cl.SSHKeys.Update(id, r)
-		if err != nil {
-			return nil, fmt.Errorf("SSHKeys.Update: %w", err)
-		}
-		return k, err
-	})
-}
-
-// Currently unexported, only used in tests.
-func (e *client) deleteSSHKey(ctx context.Context, id string) error {
-	_, err := wrap(ctx, e, func(cl *packngo.Client) (struct{}, error) {
-		_, err := cl.SSHKeys.Delete(id)
-		if err != nil {
-			return struct{}{}, fmt.Errorf("SSHKeys.Delete: %w", err)
-		}
-		return struct{}{}, err
-	})
-	return err
-}
-
-func (e *client) ListSSHKeys(ctx context.Context) ([]packngo.SSHKey, error) {
-	return wrap(ctx, e, func(cl *packngo.Client) ([]packngo.SSHKey, error) {
-		ks, _, err := cl.SSHKeys.List()
-		if err != nil {
-			return nil, fmt.Errorf("SSHKeys.List: %w", err)
-		}
-		return ks, nil
-	})
-}
-
-// Currently unexported, only used in tests.
-func (e *client) getSSHKey(ctx context.Context, id string) (*packngo.SSHKey, error) {
-	return wrap(ctx, e, func(cl *packngo.Client) (*packngo.SSHKey, error) {
-		k, _, err := cl.SSHKeys.Get(id, nil)
-		if err != nil {
-			return nil, fmt.Errorf("SSHKeys.Get: %w", err)
-		}
-		return k, nil
-	})
-}
-
-func (e *client) RebootDevice(ctx context.Context, did string) error {
-	_, err := wrap(ctx, e, func(cl *packngo.Client) (struct{}, error) {
-		_, err := cl.Devices.Reboot(did)
-		return struct{}{}, err
-	})
-	return err
-}
diff --git a/cloud/shepherd/equinix/wrapngo/wrapngo_live_test.go b/cloud/shepherd/equinix/wrapngo/wrapngo_live_test.go
deleted file mode 100644
index 549071a..0000000
--- a/cloud/shepherd/equinix/wrapngo/wrapngo_live_test.go
+++ /dev/null
@@ -1,344 +0,0 @@
-package wrapngo
-
-import (
-	"context"
-	"crypto/ed25519"
-	"crypto/rand"
-	"errors"
-	"fmt"
-	"log"
-	"os"
-	"testing"
-	"time"
-
-	"github.com/packethost/packngo"
-	"golang.org/x/crypto/ssh"
-)
-
-type liveTestClient struct {
-	cl  *client
-	ctx context.Context
-
-	apipid string
-	apios  string
-
-	sshKeyLabel        string
-	testDeviceHostname string
-}
-
-func newLiveTestClient(t *testing.T) *liveTestClient {
-	t.Helper()
-
-	apiuser := os.Getenv("EQUINIX_USER")
-	apikey := os.Getenv("EQUINIX_APIKEY")
-	apipid := os.Getenv("EQUINIX_PROJECT_ID")
-	apios := os.Getenv("EQUINIX_DEVICE_OS")
-
-	if apiuser == "" {
-		t.Skip("EQUINIX_USER must be set.")
-	}
-	if apikey == "" {
-		t.Skip("EQUINIX_APIKEY must be set.")
-	}
-	if apipid == "" {
-		t.Skip("EQUINIX_PROJECT_ID must be set.")
-	}
-	if apios == "" {
-		t.Skip("EQUINIX_DEVICE_OS must be set.")
-	}
-	ctx, ctxC := context.WithCancel(context.Background())
-	t.Cleanup(ctxC)
-	return &liveTestClient{
-		cl: new(&Opts{
-			User:   apiuser,
-			APIKey: apikey,
-		}),
-		ctx: ctx,
-
-		apipid: apipid,
-		apios:  apios,
-
-		sshKeyLabel:        "shepherd-livetest-client",
-		testDeviceHostname: "shepherd-livetest-device",
-	}
-}
-
-// awaitDeviceState returns nil after device matching the id reaches one of the
-// provided states. It will return a non-nil value in case of an API error, and
-// particularly if there exists no device matching id.
-func (l *liveTestClient) awaitDeviceState(t *testing.T, id string, states ...string) error {
-	t.Helper()
-
-	for {
-		d, err := l.cl.GetDevice(l.ctx, l.apipid, id, nil)
-		if err != nil {
-			if errors.Is(err, os.ErrDeadlineExceeded) {
-				continue
-			}
-			return fmt.Errorf("while fetching device info: %w", err)
-		}
-		if d == nil {
-			return fmt.Errorf("expected the test device (ID: %s) to exist.", id)
-		}
-		for _, s := range states {
-			if d.State == s {
-				return nil
-			}
-		}
-		t.Logf("Waiting for device to be provisioned (ID: %s, current state: %q)", id, d.State)
-		time.Sleep(time.Second)
-	}
-}
-
-// cleanup ensures both the test device and the test key are deleted at
-// Equinix.
-func (l *liveTestClient) cleanup(t *testing.T) {
-	t.Helper()
-
-	t.Logf("Cleaning up.")
-
-	// Ensure the device matching testDeviceHostname is deleted.
-	ds, err := l.cl.ListDevices(l.ctx, l.apipid)
-	if err != nil {
-		log.Fatalf("while listing devices: %v", err)
-	}
-	var td *packngo.Device
-	for _, d := range ds {
-		if d.Hostname == l.testDeviceHostname {
-			td = &d
-			break
-		}
-	}
-	if td != nil {
-		t.Logf("Found a test device (ID: %s) that needs to be deleted before progressing further.", td.ID)
-
-		// Devices currently being provisioned can't be deleted. After it's
-		// provisioned, device's state will match either "active", or "failed".
-		if err := l.awaitDeviceState(t, "active", "failed"); err != nil {
-			t.Fatalf("while waiting for device to be provisioned: %v", err)
-		}
-		if err := l.cl.deleteDevice(l.ctx, td.ID); err != nil {
-			t.Fatalf("while deleting test device: %v", err)
-		}
-	}
-
-	// Ensure the key matching sshKeyLabel is deleted.
-	ks, err := l.cl.ListSSHKeys(l.ctx)
-	if err != nil {
-		t.Fatalf("while listing SSH keys: %v", err)
-	}
-	for _, k := range ks {
-		if k.Label == l.sshKeyLabel {
-			t.Logf("Found a SSH test key (ID: %s) - deleting...", k.ID)
-			if err := l.cl.deleteSSHKey(l.ctx, k.ID); err != nil {
-				t.Fatalf("while deleting an SSH key: %v", err)
-			}
-			t.Logf("Deleted a SSH test key (ID: %s).", k.ID)
-		}
-	}
-}
-
-// createSSHAuthKey returns an SSH public key in OpenSSH authorized_keys
-// format.
-func createSSHAuthKey(t *testing.T) string {
-	t.Helper()
-	pub, _, err := ed25519.GenerateKey(rand.Reader)
-	if err != nil {
-		t.Errorf("while generating SSH key: %v", err)
-	}
-
-	sshpub, err := ssh.NewPublicKey(pub)
-	if err != nil {
-		t.Errorf("while generating SSH public key: %v", err)
-	}
-	return string(ssh.MarshalAuthorizedKey(sshpub))
-}
-
-// TestLiveAPI performs smoke tests of wrapngo against the real Equinix API. See
-// newLiveTestClient to see which environment variables need to be provided in
-// order for this test to run.
-func TestLiveAPI(t *testing.T) {
-	ltc := newLiveTestClient(t)
-	ltc.cleanup(t)
-
-	cl := ltc.cl
-	ctx := ltc.ctx
-
-	t.Run("ListReservations", func(t *testing.T) {
-		_, err := cl.ListReservations(ctx, ltc.apipid)
-		if err != nil {
-			t.Errorf("while listing hardware reservations: %v", err)
-		}
-	})
-
-	var sshKeyID string
-	t.Run("CreateSSHKey", func(t *testing.T) {
-		nk, err := cl.CreateSSHKey(ctx, &packngo.SSHKeyCreateRequest{
-			Label:     ltc.sshKeyLabel,
-			Key:       createSSHAuthKey(t),
-			ProjectID: ltc.apipid,
-		})
-		if err != nil {
-			t.Fatalf("while creating an SSH key: %v", err)
-		}
-		if nk.Label != ltc.sshKeyLabel {
-			t.Errorf("key labels don't match.")
-		}
-		t.Logf("Created an SSH key (ID: %s)", nk.ID)
-		sshKeyID = nk.ID
-	})
-
-	var dummySSHPK2 string
-	t.Run("UpdateSSHKey", func(t *testing.T) {
-		if sshKeyID == "" {
-			t.Skip("SSH key couldn't have been created - skipping...")
-		}
-
-		dummySSHPK2 = createSSHAuthKey(t)
-		k, err := cl.UpdateSSHKey(ctx, sshKeyID, &packngo.SSHKeyUpdateRequest{
-			Key: &dummySSHPK2,
-		})
-		if err != nil {
-			t.Fatalf("while updating an SSH key: %v", err)
-		}
-		if k.Key != dummySSHPK2 {
-			t.Errorf("updated SSH key doesn't match the original.")
-		}
-	})
-	t.Run("GetSSHKey", func(t *testing.T) {
-		if sshKeyID == "" {
-			t.Skip("SSH key couldn't have been created - skipping...")
-		}
-
-		k, err := cl.getSSHKey(ctx, sshKeyID)
-		if err != nil {
-			t.Fatalf("while getting an SSH key: %v", err)
-		}
-		if k.Key != dummySSHPK2 {
-			t.Errorf("got key contents that don't match the original.")
-		}
-	})
-	t.Run("ListSSHKeys", func(t *testing.T) {
-		if sshKeyID == "" {
-			t.Skip("SSH key couldn't have been created - skipping...")
-		}
-
-		ks, err := cl.ListSSHKeys(ctx)
-		if err != nil {
-			t.Fatalf("while listing SSH keys: %v", err)
-		}
-
-		// Check that our key is part of the list.
-		found := false
-		for _, k := range ks {
-			if k.ID == sshKeyID {
-				found = true
-				break
-			}
-		}
-		if !found {
-			t.Errorf("SSH key not listed.")
-		}
-	})
-
-	var testDevice *packngo.Device
-	t.Run("CreateDevice", func(t *testing.T) {
-		// Find a provisionable hardware reservation the device will be created with.
-		rvs, err := cl.ListReservations(ctx, ltc.apipid)
-		if err != nil {
-			t.Errorf("while listing hardware reservations: %v", err)
-		}
-		var rv *packngo.HardwareReservation
-		for _, r := range rvs {
-			if r.Provisionable {
-				rv = &r
-				break
-			}
-		}
-		if rv == nil {
-			t.Skip("could not find a provisionable hardware reservation - skipping...")
-		}
-
-		d, err := cl.CreateDevice(ctx, &packngo.DeviceCreateRequest{
-			Hostname:              ltc.testDeviceHostname,
-			OS:                    ltc.apios,
-			Plan:                  rv.Plan.Slug,
-			HardwareReservationID: rv.ID,
-			ProjectID:             ltc.apipid,
-		})
-		if err != nil {
-			t.Fatalf("while creating a device: %v", err)
-		}
-		t.Logf("Created a new test device (ID: %s)", d.ID)
-		testDevice = d
-	})
-	t.Run("GetDevice", func(t *testing.T) {
-		if testDevice == nil {
-			t.Skip("the test device couldn't have been created - skipping...")
-		}
-
-		d, err := cl.GetDevice(ctx, ltc.apipid, testDevice.ID, nil)
-		if err != nil {
-			t.Fatalf("while fetching device info: %v", err)
-		}
-		if d == nil {
-			t.Fatalf("expected the test device (ID: %s) to exist.", testDevice.ID)
-		}
-		if d.ID != testDevice.ID {
-			t.Errorf("got device ID that doesn't match the original.")
-		}
-	})
-	t.Run("ListDevices", func(t *testing.T) {
-		if testDevice == nil {
-			t.Skip("the test device couldn't have been created - skipping...")
-		}
-
-		ds, err := cl.ListDevices(ctx, ltc.apipid)
-		if err != nil {
-			t.Errorf("while listing devices: %v", err)
-		}
-		if len(ds) == 0 {
-			t.Errorf("expected at least one device.")
-		}
-	})
-	t.Run("DeleteDevice", func(t *testing.T) {
-		if testDevice == nil {
-			t.Skip("the test device couldn't have been created - skipping...")
-		}
-
-		// Devices currently being provisioned can't be deleted. After it's
-		// provisioned, device's state will match either "active", or "failed".
-		if err := ltc.awaitDeviceState(t, testDevice.ID, "active", "failed"); err != nil {
-			t.Fatalf("while waiting for device to be provisioned: %v", err)
-		}
-		t.Logf("Deleting the test device (ID: %s)", testDevice.ID)
-		if err := cl.deleteDevice(ctx, testDevice.ID); err != nil {
-			t.Fatalf("while deleting a device: %v", err)
-		}
-		d, err := cl.GetDevice(ctx, ltc.apipid, testDevice.ID, nil)
-		if err != nil && !IsNotFound(err) {
-			t.Fatalf("while fetching device info: %v", err)
-		}
-		if d != nil {
-			t.Fatalf("device should not exist.")
-		}
-		t.Logf("Deleted the test device (ID: %s)", testDevice.ID)
-	})
-	t.Run("DeleteSSHKey", func(t *testing.T) {
-		if sshKeyID == "" {
-			t.Skip("SSH key couldn't have been created - skipping...")
-		}
-
-		t.Logf("Deleting the test SSH key (ID: %s)", sshKeyID)
-		if err := cl.deleteSSHKey(ctx, sshKeyID); err != nil {
-			t.Fatalf("couldn't delete an SSH key: %v", err)
-		}
-		_, err := cl.getSSHKey(ctx, sshKeyID)
-		if err == nil {
-			t.Fatalf("SSH key should not exist")
-		}
-		t.Logf("Deleted the test SSH key (ID: %s)", sshKeyID)
-	})
-
-	ltc.cleanup(t)
-}
diff --git a/cloud/shepherd/equinix/manager/BUILD.bazel b/cloud/shepherd/manager/BUILD.bazel
similarity index 64%
rename from cloud/shepherd/equinix/manager/BUILD.bazel
rename to cloud/shepherd/manager/BUILD.bazel
index 1e1cb54..4119ff7 100644
--- a/cloud/shepherd/equinix/manager/BUILD.bazel
+++ b/cloud/shepherd/manager/BUILD.bazel
@@ -4,25 +4,24 @@
     name = "manager",
     srcs = [
         "control_loop.go",
+        "fake_ssh_client.go",
         "initializer.go",
         "manager.go",
         "provisioner.go",
         "recoverer.go",
-        "shared_config.go",
-        "ssh.go",
-        "updater.go",
+        "ssh_client.go",
+        "ssh_key_signer.go",
     ],
-    importpath = "source.monogon.dev/cloud/shepherd/equinix/manager",
+    importpath = "source.monogon.dev/cloud/shepherd/manager",
     visibility = ["//visibility:public"],
     deps = [
         "//cloud/agent/api",
         "//cloud/bmaas/bmdb",
         "//cloud/bmaas/bmdb/metrics",
         "//cloud/bmaas/bmdb/model",
-        "//cloud/lib/sinbin",
-        "//cloud/shepherd/equinix/wrapngo",
+        "//cloud/shepherd",
+        "//go/mflags",
         "@com_github_google_uuid//:uuid",
-        "@com_github_packethost_packngo//:packngo",
         "@com_github_pkg_sftp//:sftp",
         "@io_k8s_klog_v2//:klog",
         "@org_golang_google_protobuf//proto",
@@ -34,28 +33,22 @@
 
 go_test(
     name = "manager_test",
-    timeout = "eternal",
     srcs = [
-        "fakequinix_test.go",
         "initializer_test.go",
+        "provider_test.go",
         "provisioner_test.go",
-        "recoverer_test.go",
-        "updater_test.go",
     ],
     data = [
-        "//cloud/shepherd/equinix/manager/test_agent",
         "@cockroach",
     ],
     embed = [":manager"],
     deps = [
-        "//cloud/agent/api",
         "//cloud/bmaas/bmdb",
         "//cloud/bmaas/bmdb/model",
         "//cloud/lib/component",
+        "//cloud/shepherd",
         "@com_github_google_uuid//:uuid",
-        "@com_github_packethost_packngo//:packngo",
-        "@org_golang_google_protobuf//proto",
-        "@org_golang_x_crypto//ssh",
+        "@io_k8s_klog_v2//:klog",
         "@org_golang_x_time//rate",
     ],
 )
diff --git a/cloud/shepherd/equinix/manager/README.md b/cloud/shepherd/manager/README.md
similarity index 80%
rename from cloud/shepherd/equinix/manager/README.md
rename to cloud/shepherd/manager/README.md
index e3c0f24..d5a17c3 100644
--- a/cloud/shepherd/equinix/manager/README.md
+++ b/cloud/shepherd/manager/README.md
@@ -28,14 +28,14 @@
 If you have Equinix credentials, you can run:
 
 ```
-$ bazel build //cloud/shepherd/equinix/manager/server
-$ bazel build //cloud/shepherd/equinix/manager/test_agent
-$ bazel-bin/cloud/shepherd/equinix/manager/server/server_/server \
+$ bazel build //cloud/shepherd/provider/equinix
+$ bazel build //cloud/shepherd/manager/test_agent
+$ bazel-bin/cloud/shepherd/provider/equinix/equinix_/equinix \
     -bmdb_eat_my_data \
     -equinix_project_id FIXME \
     -equinix_api_username FIXME \
     -equinix_api_key FIXME \
-    -agent_executable_path bazel-bin/cloud/shepherd/equinix/manager/test_agent/test_agent_/test_agent \
+    -agent_executable_path bazel-bin/cloud/shepherd/manager/test_agent/test_agent_/test_agent \
     -agent_endpoint example.com \
     -equinix_ssh_key_label $USER-FIXME \
     -equinix_device_prefix $USER-FIXME- \
diff --git a/cloud/shepherd/equinix/manager/control_loop.go b/cloud/shepherd/manager/control_loop.go
similarity index 88%
rename from cloud/shepherd/equinix/manager/control_loop.go
rename to cloud/shepherd/manager/control_loop.go
index b30edbc..e1fdd1d 100644
--- a/cloud/shepherd/equinix/manager/control_loop.go
+++ b/cloud/shepherd/manager/control_loop.go
@@ -5,8 +5,6 @@
 	"errors"
 	"flag"
 	"fmt"
-	"strconv"
-	"strings"
 	"time"
 
 	"github.com/google/uuid"
@@ -17,6 +15,7 @@
 	"source.monogon.dev/cloud/bmaas/bmdb"
 	"source.monogon.dev/cloud/bmaas/bmdb/metrics"
 	"source.monogon.dev/cloud/bmaas/bmdb/model"
+	"source.monogon.dev/go/mflags"
 )
 
 // task describes a single server currently being processed by a control loop.
@@ -80,37 +79,11 @@
 	return c
 }
 
-// flagLimiter configures a *rate.Limiter as a flag.
-func flagLimiter(l **rate.Limiter, name, defval, help string) {
-	syntax := "'duration,count' eg. '2m,10' for a 10-sized bucket refilled at one token every 2 minutes"
-	help = help + fmt.Sprintf(" (default: %q, syntax: %s)", defval, syntax)
-	flag.Func(name, help, func(val string) error {
-		if val == "" {
-			val = defval
-		}
-		parts := strings.Split(val, ",")
-		if len(parts) != 2 {
-			return fmt.Errorf("invalid syntax, want: %s", syntax)
-		}
-		duration, err := time.ParseDuration(parts[0])
-		if err != nil {
-			return fmt.Errorf("invalid duration: %w", err)
-		}
-		refill, err := strconv.ParseUint(parts[1], 10, 31)
-		if err != nil {
-			return fmt.Errorf("invalid refill rate: %w", err)
-		}
-		*l = rate.NewLimiter(rate.Every(duration), int(refill))
-		return nil
-	})
-	flag.Set(name, defval)
-}
-
 // RegisterFlags should be called on this configuration whenever the embeddeding
 // component/configuration is registering its own flags. The prefix should be the
 // name of the component.
 func (c *ControlLoopConfig) RegisterFlags(prefix string) {
-	flagLimiter(&c.DBQueryLimiter, prefix+"_db_query_rate", "250ms,8", "Rate limiting for BMDB queries")
+	mflags.Limiter(&c.DBQueryLimiter, prefix+"_db_query_rate", "250ms,8", "Rate limiting for BMDB queries")
 	flag.IntVar(&c.Parallelism, prefix+"_loop_parallelism", 1, "How many initializer instances to run in parallel, ie. how many agents to attempt to (re)start at once")
 }
 
diff --git a/cloud/shepherd/manager/fake_ssh_client.go b/cloud/shepherd/manager/fake_ssh_client.go
new file mode 100644
index 0000000..1d9d371
--- /dev/null
+++ b/cloud/shepherd/manager/fake_ssh_client.go
@@ -0,0 +1,58 @@
+package manager
+
+import (
+	"context"
+	"crypto/ed25519"
+	"crypto/rand"
+	"fmt"
+	"time"
+
+	"google.golang.org/protobuf/proto"
+
+	apb "source.monogon.dev/cloud/agent/api"
+)
+
+// FakeSSHClient is an SSHClient that pretends to start an agent, but in reality
+// just responds with what an agent would respond on every execution attempt.
+type FakeSSHClient struct{}
+
+type fakeSSHConnection struct{}
+
+func (f *FakeSSHClient) Dial(ctx context.Context, address string, timeout time.Duration) (SSHConnection, error) {
+	return &fakeSSHConnection{}, nil
+}
+
+func (f *fakeSSHConnection) Execute(ctx context.Context, command string, stdin []byte) (stdout []byte, stderr []byte, err error) {
+	var aim apb.TakeoverInit
+	if err := proto.Unmarshal(stdin, &aim); err != nil {
+		return nil, nil, fmt.Errorf("while unmarshaling TakeoverInit message: %v", err)
+	}
+
+	// Agent should send back apb.TakeoverResponse on its standard output.
+	pub, _, err := ed25519.GenerateKey(rand.Reader)
+	if err != nil {
+		return nil, nil, fmt.Errorf("while generating agent public key: %v", err)
+	}
+	arsp := apb.TakeoverResponse{
+		Result: &apb.TakeoverResponse_Success{Success: &apb.TakeoverSuccess{
+			InitMessage: &aim,
+			Key:         pub,
+		}},
+	}
+	arspb, err := proto.Marshal(&arsp)
+	if err != nil {
+		return nil, nil, fmt.Errorf("while marshaling TakeoverResponse message: %v", err)
+	}
+	return arspb, nil, nil
+}
+
+func (f *fakeSSHConnection) Upload(ctx context.Context, targetPath string, data []byte) error {
+	if targetPath != "/fake/path" {
+		return fmt.Errorf("unexpected target path in test")
+	}
+	return nil
+}
+
+func (f *fakeSSHConnection) Close() error {
+	return nil
+}
diff --git a/cloud/shepherd/equinix/manager/initializer.go b/cloud/shepherd/manager/initializer.go
similarity index 69%
rename from cloud/shepherd/equinix/manager/initializer.go
rename to cloud/shepherd/manager/initializer.go
index 272df20..5abbc68 100644
--- a/cloud/shepherd/equinix/manager/initializer.go
+++ b/cloud/shepherd/manager/initializer.go
@@ -14,8 +14,6 @@
 	"time"
 
 	"github.com/google/uuid"
-	"github.com/packethost/packngo"
-	"golang.org/x/crypto/ssh"
 	"google.golang.org/protobuf/proto"
 	"k8s.io/klog/v2"
 
@@ -24,7 +22,7 @@
 	"source.monogon.dev/cloud/bmaas/bmdb"
 	"source.monogon.dev/cloud/bmaas/bmdb/metrics"
 	"source.monogon.dev/cloud/bmaas/bmdb/model"
-	ecl "source.monogon.dev/cloud/shepherd/equinix/wrapngo"
+	"source.monogon.dev/cloud/shepherd"
 )
 
 // InitializerConfig configures how the Initializer will deploy Agents on
@@ -44,6 +42,7 @@
 	// Endpoint is the address Agent will use to contact the BMaaS
 	// infrastructure. Must be set.
 	Endpoint string
+
 	// EndpointCACertificate is an optional DER-encoded (but not PEM-armored) X509
 	// certificate used to populate the trusted CA store of the agent. It should be
 	// set to the CA certificate of the endpoint if not using a system-trusted CA
@@ -60,8 +59,8 @@
 	SSHExecTimeout time.Duration
 }
 
-func (a *InitializerConfig) RegisterFlags() {
-	a.ControlLoopConfig.RegisterFlags("initializer")
+func (ic *InitializerConfig) RegisterFlags() {
+	ic.ControlLoopConfig.RegisterFlags("initializer")
 
 	flag.Func("agent_executable_path", "Local filesystem path of agent binary to be uploaded", func(val string) error {
 		if val == "" {
@@ -71,11 +70,11 @@
 		if err != nil {
 			return fmt.Errorf("could not read: %w", err)
 		}
-		a.Executable = data
+		ic.Executable = data
 		return nil
 	})
-	flag.StringVar(&a.TargetPath, "agent_target_path", "/root/agent", "Filesystem path where the agent will be uploaded to and ran from")
-	flag.StringVar(&a.Endpoint, "agent_endpoint", "", "Address of BMDB Server to which the agent will attempt to connect")
+	flag.StringVar(&ic.TargetPath, "agent_target_path", "/root/agent", "Filesystem path where the agent will be uploaded to and ran from")
+	flag.StringVar(&ic.Endpoint, "agent_endpoint", "", "Address of BMDB Server to which the agent will attempt to connect")
 	flag.Func("agent_endpoint_ca_certificate_path", "Path to PEM X509 CA certificate that the agent endpoint is serving with. If not set, the agent will attempt to use system CA certificates to authenticate the endpoint.", func(val string) error {
 		if val == "" {
 			return nil
@@ -92,65 +91,61 @@
 		if err != nil {
 			return fmt.Errorf("invalid certificate: %w", err)
 		}
-		a.EndpointCACertificate = block.Bytes
+		ic.EndpointCACertificate = block.Bytes
 		return nil
 	})
-	flag.DurationVar(&a.SSHConnectTimeout, "agent_ssh_connect_timeout", 2*time.Second, "Timeout for connecting over SSH to a machine")
-	flag.DurationVar(&a.SSHExecTimeout, "agent_ssh_exec_timeout", 60*time.Second, "Timeout for connecting over SSH to a machine")
+	flag.DurationVar(&ic.SSHConnectTimeout, "agent_ssh_connect_timeout", 2*time.Second, "Timeout for connecting over SSH to a machine")
+	flag.DurationVar(&ic.SSHExecTimeout, "agent_ssh_exec_timeout", 60*time.Second, "Timeout for connecting over SSH to a machine")
+}
+
+func (ic *InitializerConfig) Check() error {
+	if err := ic.ControlLoopConfig.Check(); err != nil {
+		return err
+	}
+
+	if len(ic.Executable) == 0 {
+		return fmt.Errorf("agent executable not configured")
+	}
+	if ic.TargetPath == "" {
+		return fmt.Errorf("agent target path must be set")
+	}
+	if ic.Endpoint == "" {
+		return fmt.Errorf("agent endpoint must be set")
+	}
+	if ic.SSHConnectTimeout == 0 {
+		return fmt.Errorf("agent SSH connection timeout must be set")
+	}
+	if ic.SSHExecTimeout == 0 {
+		return fmt.Errorf("agent SSH execution timeout must be set")
+	}
+
+	return nil
 }
 
 // The Initializer starts the agent on machines that aren't yet running it.
 type Initializer struct {
 	InitializerConfig
 
-	sharedConfig *SharedConfig
-	cl           ecl.Client
-	signer       ssh.Signer
-	sshClient    SSHClient
+	sshClient SSHClient
+	p         shepherd.Provider
 }
 
 // NewInitializer creates an Initializer instance, checking the
 // InitializerConfig, SharedConfig and AgentConfig for errors.
-func NewInitializer(cl ecl.Client, ic InitializerConfig, sc *SharedConfig) (*Initializer, error) {
-	if err := sc.check(); err != nil {
+func NewInitializer(p shepherd.Provider, sshClient SSHClient, ic InitializerConfig) (*Initializer, error) {
+	if err := ic.Check(); err != nil {
 		return nil, err
 	}
-	if err := ic.ControlLoopConfig.Check(); err != nil {
-		return nil, err
-	}
-
-	if len(ic.Executable) == 0 {
-		return nil, fmt.Errorf("agent executable not configured")
-	}
-	if ic.TargetPath == "" {
-		return nil, fmt.Errorf("agent target path must be set")
-	}
-	if ic.Endpoint == "" {
-		return nil, fmt.Errorf("agent endpoint must be set")
-	}
-	if ic.SSHConnectTimeout == 0 {
-		return nil, fmt.Errorf("agent SSH connection timeout must be set")
-	}
-	if ic.SSHExecTimeout == 0 {
-		return nil, fmt.Errorf("agent SSH execution timeout must be set")
-	}
-
-	signer, err := sc.sshSigner()
-	if err != nil {
-		return nil, fmt.Errorf("could not initialize signer: %w", err)
-	}
 
 	return &Initializer{
 		InitializerConfig: ic,
 
-		sharedConfig: sc,
-		cl:           cl,
-		sshClient:    &PlainSSHClient{},
-		signer:       signer,
+		p:         p,
+		sshClient: sshClient,
 	}, nil
 }
 
-func (c *Initializer) getProcessInfo() processInfo {
+func (i *Initializer) getProcessInfo() processInfo {
 	return processInfo{
 		process: model.ProcessShepherdAgentStart,
 		defaultBackoff: bmdb.Backoff{
@@ -162,22 +157,22 @@
 	}
 }
 
-func (c *Initializer) getMachines(ctx context.Context, q *model.Queries, limit int32) ([]model.MachineProvided, error) {
+func (i *Initializer) getMachines(ctx context.Context, q *model.Queries, limit int32) ([]model.MachineProvided, error) {
 	return q.GetMachinesForAgentStart(ctx, model.GetMachinesForAgentStartParams{
 		Limit:    limit,
-		Provider: model.ProviderEquinix,
+		Provider: i.p.Type(),
 	})
 }
 
-func (c *Initializer) processMachine(ctx context.Context, t *task) error {
-	dev, err := c.cl.GetDevice(ctx, c.sharedConfig.ProjectId, t.machine.ProviderID, nil)
+func (i *Initializer) processMachine(ctx context.Context, t *task) error {
+	machine, err := i.p.GetMachine(ctx, shepherd.ProviderID(t.machine.ProviderID))
 	if err != nil {
-		return fmt.Errorf("while fetching device %q: %v", t.machine.ProviderID, err)
+		return fmt.Errorf("while fetching machine %q: %v", t.machine.ProviderID, err)
 	}
 
 	// Start the agent.
-	klog.Infof("Starting agent on device (ID: %s, PID %s)", t.machine.MachineID, t.machine.ProviderID)
-	apk, err := c.startAgent(ctx, c.signer, dev, t.machine.MachineID)
+	klog.Infof("Starting agent on machine (ID: %s, PID %s)", t.machine.MachineID, t.machine.ProviderID)
+	apk, err := i.startAgent(ctx, machine, t.machine.MachineID)
 	if err != nil {
 		return fmt.Errorf("while starting the agent: %w", err)
 	}
@@ -198,29 +193,26 @@
 	return nil
 }
 
-// startAgent runs the agent executable on the target device d, returning the
+// startAgent runs the agent executable on the target machine m, returning the
 // agent's public key on success.
-func (i *Initializer) startAgent(ctx context.Context, sgn ssh.Signer, d *packngo.Device, mid uuid.UUID) ([]byte, error) {
+func (i *Initializer) startAgent(ctx context.Context, m shepherd.Machine, mid uuid.UUID) ([]byte, error) {
 	// Provide a bound on execution time in case we get stuck after the SSH
 	// connection is established.
 	sctx, sctxC := context.WithTimeout(ctx, i.SSHExecTimeout)
 	defer sctxC()
 
-	// Use the device's IP address exposed by Equinix API.
-	ni := d.GetNetworkInfo()
-	var addr string
-	if ni.PublicIPv4 != "" {
-		addr = net.JoinHostPort(ni.PublicIPv4, "22")
-	} else if ni.PublicIPv6 != "" {
-		addr = net.JoinHostPort(ni.PublicIPv6, "22")
-	} else {
-		return nil, fmt.Errorf("device (machine ID: %s) has no available addresses", mid)
+	// Use the machine's IP address
+	ni := m.Addr()
+	if !ni.IsValid() {
+		return nil, fmt.Errorf("machine (machine ID: %s) has no available addresses", mid)
 	}
-	klog.V(1).Infof("Dialing device (machine ID: %s, addr: %s).", mid, addr)
 
-	conn, err := i.sshClient.Dial(sctx, addr, "root", sgn, i.SSHConnectTimeout)
+	addr := net.JoinHostPort(ni.String(), "22")
+	klog.V(1).Infof("Dialing machine (machine ID: %s, addr: %s).", mid, addr)
+
+	conn, err := i.sshClient.Dial(sctx, addr, i.SSHConnectTimeout)
 	if err != nil {
-		return nil, fmt.Errorf("while dialing the device: %w", err)
+		return nil, fmt.Errorf("while dialing the machine: %w", err)
 	}
 	defer conn.Close()
 
diff --git a/cloud/shepherd/manager/initializer_test.go b/cloud/shepherd/manager/initializer_test.go
new file mode 100644
index 0000000..5ba2253
--- /dev/null
+++ b/cloud/shepherd/manager/initializer_test.go
@@ -0,0 +1,91 @@
+package manager
+
+import (
+	"context"
+	"testing"
+	"time"
+
+	"golang.org/x/time/rate"
+
+	"source.monogon.dev/cloud/bmaas/bmdb"
+	"source.monogon.dev/cloud/bmaas/bmdb/model"
+	"source.monogon.dev/cloud/lib/component"
+)
+
+// TestInitializerSmokes makes sure the Initializer doesn't go up in flames on
+// the happy path.
+func TestInitializerSmokes(t *testing.T) {
+	provider := newDummyProvider(100)
+
+	ic := InitializerConfig{
+		ControlLoopConfig: ControlLoopConfig{
+			DBQueryLimiter: rate.NewLimiter(rate.Every(time.Second), 10),
+		},
+		Executable:        []byte("beep boop i'm a real program"),
+		TargetPath:        "/fake/path",
+		Endpoint:          "example.com:1234",
+		SSHConnectTimeout: time.Second,
+		SSHExecTimeout:    time.Second,
+	}
+
+	i, err := NewInitializer(provider, provider.sshClient(), ic)
+	if err != nil {
+		t.Fatalf("Could not create Initializer: %v", err)
+	}
+
+	b := bmdb.BMDB{
+		Config: bmdb.Config{
+			Database: component.CockroachConfig{
+				InMemory: true,
+			},
+			ComponentName: "test",
+			RuntimeInfo:   "test",
+		},
+	}
+	conn, err := b.Open(true)
+	if err != nil {
+		t.Fatalf("Could not create in-memory BMDB: %v", err)
+	}
+
+	ctx, ctxC := context.WithCancel(context.Background())
+	t.Cleanup(ctxC)
+
+	go RunControlLoop(ctx, conn, i)
+
+	sess, err := conn.StartSession(ctx)
+	if err != nil {
+		t.Fatalf("Failed to create BMDB session for verifiaction: %v", err)
+	}
+
+	// Create 10 provided machines for testing.
+	if _, err := provider.createDummyMachines(ctx, sess, 10); err != nil {
+		t.Fatalf("Failed to create dummy machines: %v", err)
+	}
+
+	// Expect to find 0 machines needing start.
+	for {
+		time.Sleep(100 * time.Millisecond)
+
+		var machines []model.MachineProvided
+		err = sess.Transact(ctx, func(q *model.Queries) error {
+			var err error
+			machines, err = q.GetMachinesForAgentStart(ctx, model.GetMachinesForAgentStartParams{
+				Limit:    100,
+				Provider: provider.Type(),
+			})
+			return err
+		})
+		if err != nil {
+			t.Fatalf("Failed to run Transaction: %v", err)
+		}
+		if len(machines) == 0 {
+			break
+		}
+	}
+
+	for _, m := range provider.machines {
+		if !m.agentStarted {
+			t.Fatalf("Initializer didn't start agent on machine %q", m.id)
+		}
+	}
+}
diff --git a/cloud/shepherd/equinix/manager/manager.go b/cloud/shepherd/manager/manager.go
similarity index 100%
rename from cloud/shepherd/equinix/manager/manager.go
rename to cloud/shepherd/manager/manager.go
diff --git a/cloud/shepherd/manager/provider_test.go b/cloud/shepherd/manager/provider_test.go
new file mode 100644
index 0000000..d1c6361
--- /dev/null
+++ b/cloud/shepherd/manager/provider_test.go
@@ -0,0 +1,182 @@
+package manager
+
+import (
+	"context"
+	"fmt"
+	"net/netip"
+	"time"
+
+	"github.com/google/uuid"
+	"k8s.io/klog/v2"
+
+	"source.monogon.dev/cloud/bmaas/bmdb"
+	"source.monogon.dev/cloud/bmaas/bmdb/model"
+	"source.monogon.dev/cloud/shepherd"
+)
+
+type dummyMachine struct {
+	id           shepherd.ProviderID
+	addr         netip.Addr
+	state        shepherd.State
+	agentStarted bool
+}
+
+func (dm *dummyMachine) ID() shepherd.ProviderID {
+	return dm.id
+}
+
+func (dm *dummyMachine) Addr() netip.Addr {
+	return dm.addr
+}
+
+func (dm *dummyMachine) State() shepherd.State {
+	return dm.state
+}
+
+type dummySSHClient struct {
+	SSHClient
+	dp *dummyProvider
+}
+
+type dummySSHConnection struct {
+	SSHConnection
+	m *dummyMachine
+}
+
+func (dsc *dummySSHConnection) Execute(ctx context.Context, command string, stdin []byte) ([]byte, []byte, error) {
+	stdout, stderr, err := dsc.SSHConnection.Execute(ctx, command, stdin)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	dsc.m.agentStarted = true
+	return stdout, stderr, nil
+}
+
+func (dsc *dummySSHClient) Dial(ctx context.Context, address string, timeout time.Duration) (SSHConnection, error) {
+	conn, err := dsc.SSHClient.Dial(ctx, address, timeout)
+	if err != nil {
+		return nil, err
+	}
+
+	addrPort := netip.MustParseAddrPort(address)
+	uid, err := uuid.FromBytes(addrPort.Addr().AsSlice())
+	if err != nil {
+		return nil, err
+	}
+
+	m := dsc.dp.machines[shepherd.ProviderID(uid.String())]
+	if m == nil {
+		return nil, fmt.Errorf("failed finding machine in map")
+	}
+
+	return &dummySSHConnection{conn, m}, nil
+}
+
+func (dp *dummyProvider) sshClient() SSHClient {
+	return &dummySSHClient{
+		SSHClient: &FakeSSHClient{},
+		dp:        dp,
+	}
+}
+
+func newDummyProvider(cap int) *dummyProvider {
+	return &dummyProvider{
+		capacity: cap,
+		machines: make(map[shepherd.ProviderID]*dummyMachine),
+	}
+}
+
+type dummyProvider struct {
+	capacity int
+	machines map[shepherd.ProviderID]*dummyMachine
+}
+
+func (dp *dummyProvider) createDummyMachines(ctx context.Context, session *bmdb.Session, count int) ([]shepherd.Machine, error) {
+	if len(dp.machines)+count > dp.capacity {
+		return nil, fmt.Errorf("no capacity left")
+	}
+
+	var machines []shepherd.Machine
+	for i := 0; i < count; i++ {
+		uid := uuid.Must(uuid.NewRandom())
+		m, err := dp.CreateMachine(ctx, session, shepherd.CreateMachineRequest{
+			UnusedMachine: &dummyMachine{
+				id:    shepherd.ProviderID(uid.String()),
+				state: shepherd.StateKnownUsed,
+				addr:  netip.AddrFrom16(uid),
+			},
+		})
+		if err != nil {
+			return nil, err
+		}
+		machines = append(machines, m)
+	}
+
+	return machines, nil
+}
+
+func (dp *dummyProvider) ListMachines(ctx context.Context) ([]shepherd.Machine, error) {
+	var machines []shepherd.Machine
+	for _, m := range dp.machines {
+		machines = append(machines, m)
+	}
+
+	unusedMachineCount := dp.capacity - len(machines)
+	for i := 0; i < unusedMachineCount; i++ {
+		uid := uuid.Must(uuid.NewRandom())
+		machines = append(machines, &dummyMachine{
+			id:    shepherd.ProviderID(uid.String()),
+			state: shepherd.StateKnownUnused,
+			addr:  netip.AddrFrom16(uid),
+		})
+	}
+
+	return machines, nil
+}
+
+func (dp *dummyProvider) GetMachine(ctx context.Context, id shepherd.ProviderID) (shepherd.Machine, error) {
+	for _, m := range dp.machines {
+		if m.ID() == id {
+			return m, nil
+		}
+	}
+
+	return nil, shepherd.ErrMachineNotFound
+}
+
+func (dp *dummyProvider) CreateMachine(ctx context.Context, session *bmdb.Session, request shepherd.CreateMachineRequest) (shepherd.Machine, error) {
+	dm := request.UnusedMachine.(*dummyMachine)
+
+	err := session.Transact(ctx, func(q *model.Queries) error {
+		// Create a new machine record within BMDB.
+		m, err := q.NewMachine(ctx)
+		if err != nil {
+			return fmt.Errorf("while creating a new machine record in BMDB: %w", err)
+		}
+
+		p := model.MachineAddProvidedParams{
+			MachineID:  m.MachineID,
+			ProviderID: string(dm.id),
+			Provider:   dp.Type(),
+		}
+		klog.Infof("Setting \"provided\" tag (ID: %s, PID: %s, Provider: %s).", p.MachineID, p.ProviderID, p.Provider)
+		if err := q.MachineAddProvided(ctx, p); err != nil {
+			return fmt.Errorf("while tagging machine active: %w", err)
+		}
+		return nil
+	})
+
+	if err != nil {
+		return nil, err
+	}
+
+	dm.state = shepherd.StateKnownUsed
+	dp.machines[dm.id] = dm
+
+	return dm, nil
+}
+
+func (dp *dummyProvider) Type() model.Provider {
+	return model.ProviderNone
+}
diff --git a/cloud/shepherd/manager/provisioner.go b/cloud/shepherd/manager/provisioner.go
new file mode 100644
index 0000000..a77f241
--- /dev/null
+++ b/cloud/shepherd/manager/provisioner.go
@@ -0,0 +1,392 @@
+package manager
+
+import (
+	"context"
+	"errors"
+	"flag"
+	"fmt"
+	"net/netip"
+	"sort"
+	"time"
+
+	"github.com/google/uuid"
+	"golang.org/x/time/rate"
+	"k8s.io/klog/v2"
+
+	"source.monogon.dev/cloud/bmaas/bmdb"
+	"source.monogon.dev/cloud/bmaas/bmdb/metrics"
+	"source.monogon.dev/cloud/bmaas/bmdb/model"
+	"source.monogon.dev/cloud/shepherd"
+	"source.monogon.dev/go/mflags"
+)
+
+// Provisioner implements the server provisioning logic. Provisioning entails
+// bringing all available machines (subject to limits) into BMDB.
+type Provisioner struct {
+	ProvisionerConfig
+	p shepherd.Provider
+}
+
+// ProvisionerConfig configures the provisioning process.
+type ProvisionerConfig struct {
+	// MaxCount is the maximum count of managed servers. No new devices will be
+	// created after reaching the limit. No attempt will be made to reduce the
+	// server count.
+	MaxCount uint
+
+	// ReconcileLoopLimiter limits the rate of the main reconciliation loop
+	// iterating.
+	ReconcileLoopLimiter *rate.Limiter
+
+	// DeviceCreation limits the rate at which devices are created.
+	DeviceCreationLimiter *rate.Limiter
+
+	// ChunkSize is how many machines will try to be spawned in a
+	// single reconciliation loop. Higher numbers allow for faster initial
+	// provisioning, but lower numbers decrease potential raciness with other systems
+	// and make sure that other parts of the reconciliation logic are ran regularly.
+	//
+	// 20 is decent starting point.
+	ChunkSize uint
+}
+
+func (pc *ProvisionerConfig) RegisterFlags() {
+	flag.UintVar(&pc.MaxCount, "provisioner_max_machines", 50, "Limit of machines that the provisioner will attempt to pull into the BMDB. Zero for no limit.")
+	mflags.Limiter(&pc.ReconcileLoopLimiter, "provisioner_reconciler_rate", "1m,1", "Rate limiting for main provisioner reconciliation loop")
+	mflags.Limiter(&pc.DeviceCreationLimiter, "provisioner_device_creation_rate", "5s,1", "Rate limiting for machine creation")
+	flag.UintVar(&pc.ChunkSize, "provisioner_reservation_chunk_size", 20, "How many machines will the provisioner attempt to create in a single reconciliation loop iteration")
+}
+
+func (pc *ProvisionerConfig) check() error {
+	// If these are unset, it's probably because someone is using us as a library.
+	// Provide error messages useful to code users instead of flag names.
+	if pc.ReconcileLoopLimiter == nil {
+		return fmt.Errorf("ReconcileLoopLimiter must be set")
+	}
+	if pc.DeviceCreationLimiter == nil {
+		return fmt.Errorf("DeviceCreationLimiter must be set")
+	}
+	if pc.ChunkSize == 0 {
+		return fmt.Errorf("ChunkSize must be set")
+	}
+	return nil
+}
+
+// NewProvisioner creates a Provisioner instance, checking ProvisionerConfig and
+// providerConfig for errors.
+func NewProvisioner(p shepherd.Provider, pc ProvisionerConfig) (*Provisioner, error) {
+	if err := pc.check(); err != nil {
+		return nil, err
+	}
+
+	return &Provisioner{
+		ProvisionerConfig: pc,
+		p:                 p,
+	}, nil
+}
+
+// Run the provisioner blocking the current goroutine until the given context
+// expires.
+func (p *Provisioner) Run(ctx context.Context, conn *bmdb.Connection) error {
+
+	var sess *bmdb.Session
+	var err error
+	for {
+		if sess == nil {
+			sess, err = conn.StartSession(ctx, bmdb.SessionOption{Processor: metrics.ProcessorShepherdProvisioner})
+			if err != nil {
+				return fmt.Errorf("could not start BMDB session: %w", err)
+			}
+		}
+		err = p.runInSession(ctx, sess)
+
+		switch {
+		case err == nil:
+		case errors.Is(err, ctx.Err()):
+			return err
+		case errors.Is(err, bmdb.ErrSessionExpired):
+			klog.Errorf("Session expired, restarting...")
+			sess = nil
+			time.Sleep(time.Second)
+		case err != nil:
+			klog.Errorf("Processing failed: %v", err)
+			// TODO(q3k): close session
+			time.Sleep(time.Second)
+		}
+	}
+}
+
+type machineListing struct {
+	machines []shepherd.Machine
+	err      error
+}
+
+// runInSession executes one iteration of the provisioner's control loop within a
+// BMDB session. This control loop attempts to bring all capacity into machines in
+// the BMDB, subject to limits.
+func (p *Provisioner) runInSession(ctx context.Context, sess *bmdb.Session) error {
+	if err := p.ReconcileLoopLimiter.Wait(ctx); err != nil {
+		return err
+	}
+
+	providerC := make(chan *machineListing, 1)
+	bmdbC := make(chan *machineListing, 1)
+
+	klog.Infof("Getting provider and bmdb machines...")
+
+	// Make sub-context for two parallel operations, and so that we can cancel one
+	// immediately if the other fails.
+	subCtx, subCtxC := context.WithCancel(ctx)
+	defer subCtxC()
+
+	go func() {
+		machines, err := p.listInProvider(subCtx)
+		providerC <- &machineListing{
+			machines: machines,
+			err:      err,
+		}
+	}()
+	go func() {
+		machines, err := p.listInBMDB(subCtx, sess)
+		bmdbC <- &machineListing{
+			machines: machines,
+			err:      err,
+		}
+	}()
+	var inProvider, inBMDB *machineListing
+	for {
+		select {
+		case inProvider = <-providerC:
+			if err := inProvider.err; err != nil {
+				return fmt.Errorf("listing provider machines failed: %w", err)
+			}
+			klog.Infof("Got %d machines in provider.", len(inProvider.machines))
+		case inBMDB = <-bmdbC:
+			if err := inBMDB.err; err != nil {
+				return fmt.Errorf("listing BMDB machines failed: %w", err)
+			}
+			klog.Infof("Got %d machines in BMDB.", len(inBMDB.machines))
+		}
+		if inProvider != nil && inBMDB != nil {
+			break
+		}
+	}
+
+	subCtxC()
+	if err := p.reconcile(ctx, sess, inProvider.machines, inBMDB.machines); err != nil {
+		return fmt.Errorf("reconciliation failed: %w", err)
+	}
+	return nil
+}
+
+// listInProviders returns all machines that the provider thinks we should be
+// managing.
+func (p *Provisioner) listInProvider(ctx context.Context) ([]shepherd.Machine, error) {
+	machines, err := p.p.ListMachines(ctx)
+	if err != nil {
+		return nil, fmt.Errorf("while fetching managed machines: %w", err)
+	}
+	sort.Slice(machines, func(i, j int) bool {
+		return machines[i].ID() < machines[j].ID()
+	})
+	return machines, nil
+}
+
+type providedMachine struct {
+	model.MachineProvided
+}
+
+func (p providedMachine) ID() shepherd.ProviderID {
+	return shepherd.ProviderID(p.ProviderID)
+}
+
+func (p providedMachine) Addr() netip.Addr {
+	if !p.ProviderIpAddress.Valid {
+		return netip.Addr{}
+	}
+
+	addr, err := netip.ParseAddr(p.ProviderIpAddress.String)
+	if err != nil {
+		return netip.Addr{}
+	}
+	return addr
+}
+
+func (p providedMachine) State() shepherd.State {
+	return shepherd.StateKnownUsed
+}
+
+// listInBMDB returns all the machines that the BMDB thinks we should be managing.
+func (p *Provisioner) listInBMDB(ctx context.Context, sess *bmdb.Session) ([]shepherd.Machine, error) {
+	var res []shepherd.Machine
+	err := sess.Transact(ctx, func(q *model.Queries) error {
+		machines, err := q.GetProvidedMachines(ctx, p.p.Type())
+		if err != nil {
+			return err
+		}
+		res = make([]shepherd.Machine, 0, len(machines))
+		for _, machine := range machines {
+			_, err := uuid.Parse(machine.ProviderID)
+			if err != nil {
+				klog.Errorf("BMDB machine %s has unparseable provider ID %q", machine.MachineID, machine.ProviderID)
+				continue
+			}
+
+			res = append(res, providedMachine{machine})
+		}
+		return nil
+	})
+	if err != nil {
+		return nil, err
+	}
+	sort.Slice(res, func(i, j int) bool {
+		return res[i].ID() < res[j].ID()
+	})
+	return res, nil
+}
+
+// resolvePossiblyUsed checks if the state is set to possibly used and finds out
+// which state is the correct one.
+func (p *Provisioner) resolvePossiblyUsed(machine shepherd.Machine, providedMachines map[shepherd.ProviderID]bool) shepherd.State {
+	state, id := machine.State(), machine.ID()
+
+	// Bail out if this isn't a possibly used state.
+	if state != shepherd.StatePossiblyUsed {
+		return state
+	}
+
+	// If a machine does not have a valid id, its always seen as unused.
+	if !id.IsValid() {
+		return shepherd.StateKnownUnused
+	}
+
+	// If the machine is not inside the bmdb, it's seen as unused.
+	if _, ok := providedMachines[id]; !ok {
+		return shepherd.StateKnownUnused
+	}
+
+	return shepherd.StateKnownUsed
+}
+
+// reconcile takes a list of machines that the provider thinks we should be
+// managing and that the BMDB thinks we should be managing, and tries to make
+// sense of that. First, some checks are performed across the two lists to make
+// sure we haven't dropped anything. Then, additional machines are deployed from
+// hardware reservations as needed.
+func (p *Provisioner) reconcile(ctx context.Context, sess *bmdb.Session, inProvider, bmdbMachines []shepherd.Machine) error {
+	klog.Infof("Reconciling...")
+
+	bmdb := make(map[shepherd.ProviderID]bool)
+	for _, machine := range bmdbMachines {
+		// Dont check the state here as its hardcoded to be known used.
+		bmdb[machine.ID()] = true
+	}
+
+	var availableMachines []shepherd.Machine
+	provider := make(map[shepherd.ProviderID]bool)
+	for _, machine := range inProvider {
+		state := p.resolvePossiblyUsed(machine, bmdb)
+
+		switch state {
+		case shepherd.StateKnownUnused:
+			availableMachines = append(availableMachines, machine)
+
+		case shepherd.StateKnownUsed:
+			provider[machine.ID()] = true
+
+		default:
+			return fmt.Errorf("machine has invalid state (ID: %s, Addr: %s): %s", machine.ID(), machine.Addr(), state)
+		}
+	}
+
+	managed := make(map[shepherd.ProviderID]bool)
+
+	// Some desynchronization between the BMDB and Provider point of view might be so
+	// bad we shouldn't attempt to do any work, at least not any time soon.
+	badbadnotgood := false
+
+	// Find any machines supposedly managed by us in the provider, but not in the
+	// BMDB.
+	for machine, _ := range provider {
+		if bmdb[machine] {
+			managed[machine] = true
+			continue
+		}
+		klog.Errorf("Provider machine %s has no corresponding machine in BMDB.", machine)
+		badbadnotgood = true
+	}
+
+	// Find any machines in the BMDB but not in the provider.
+	for machine, _ := range bmdb {
+		if !provider[machine] {
+			klog.Errorf("Provider device ID %s referred to in BMDB (from TODO) but missing in provider.", machine)
+			badbadnotgood = true
+		}
+	}
+
+	// Bail if things are weird.
+	if badbadnotgood {
+		klog.Errorf("Something's very wrong. Bailing early and refusing to do any work.")
+		return fmt.Errorf("fatal discrepency between BMDB and provider")
+	}
+
+	// Summarize all managed machines, which is the intersection of BMDB and
+	// Provisioner machines, usually both of these sets being equal.
+	nmanaged := len(managed)
+	klog.Infof("Total managed machines: %d", nmanaged)
+
+	if p.MaxCount != 0 && p.MaxCount <= uint(nmanaged) {
+		klog.Infof("Not bringing up more machines (at limit of %d machines)", p.MaxCount)
+		return nil
+	}
+
+	limitName := "no limit"
+	if p.MaxCount != 0 {
+		limitName = fmt.Sprintf("%d", p.MaxCount)
+	}
+	klog.Infof("Below managed machine limit (%s), bringing up more...", limitName)
+
+	if len(availableMachines) == 0 {
+		klog.Infof("No more capacity available.")
+		return nil
+	}
+
+	toProvision := availableMachines
+	// Limit them to MaxCount, if applicable.
+	if p.MaxCount != 0 {
+		needed := int(p.MaxCount) - nmanaged
+		if len(toProvision) < needed {
+			needed = len(toProvision)
+		}
+		toProvision = toProvision[:needed]
+	}
+
+	// Limit them to an arbitrary 'chunk' size so that we don't do too many things in
+	// a single reconciliation operation.
+	if uint(len(toProvision)) > p.ChunkSize {
+		toProvision = toProvision[:p.ChunkSize]
+	}
+
+	if len(toProvision) == 0 {
+		klog.Infof("No more unused machines available, or all filtered out.")
+		return nil
+	}
+
+	klog.Infof("Bringing up %d machines...", len(toProvision))
+	for _, machine := range toProvision {
+		if err := p.DeviceCreationLimiter.Wait(ctx); err != nil {
+			return err
+		}
+
+		nd, err := p.p.CreateMachine(ctx, sess, shepherd.CreateMachineRequest{
+			UnusedMachine: machine,
+		})
+		if err != nil {
+			klog.Errorf("while creating new device (ID: %s, Addr: %s, State: %s): %w", machine.ID(), machine.Addr(), machine.State(), err)
+			continue
+		}
+		klog.Infof("Created new machine with ID: %s", nd.ID())
+	}
+
+	return nil
+}
diff --git a/cloud/shepherd/manager/provisioner_test.go b/cloud/shepherd/manager/provisioner_test.go
new file mode 100644
index 0000000..5adc408
--- /dev/null
+++ b/cloud/shepherd/manager/provisioner_test.go
@@ -0,0 +1,138 @@
+package manager
+
+import (
+	"context"
+	"testing"
+	"time"
+
+	"golang.org/x/time/rate"
+
+	"source.monogon.dev/cloud/bmaas/bmdb"
+	"source.monogon.dev/cloud/bmaas/bmdb/model"
+	"source.monogon.dev/cloud/lib/component"
+	"source.monogon.dev/cloud/shepherd"
+)
+
+// TestProvisionerSmokes makes sure the Provisioner doesn't go up in flames on
+// the happy path.
+func TestProvisionerSmokes(t *testing.T) {
+	pc := ProvisionerConfig{
+		MaxCount: 10,
+		// We need 3 iterations to provide 10 machines with a chunk size of 4.
+		ReconcileLoopLimiter:  rate.NewLimiter(rate.Every(10*time.Second), 3),
+		DeviceCreationLimiter: rate.NewLimiter(rate.Every(time.Second), 10),
+		ChunkSize:             4,
+	}
+
+	provider := newDummyProvider(100)
+
+	p, err := NewProvisioner(provider, pc)
+	if err != nil {
+		t.Fatalf("Could not create Provisioner: %v", err)
+	}
+
+	ctx, ctxC := context.WithCancel(context.Background())
+	defer ctxC()
+
+	b := bmdb.BMDB{
+		Config: bmdb.Config{
+			Database: component.CockroachConfig{
+				InMemory: true,
+			},
+			ComponentName: "test",
+			RuntimeInfo:   "test",
+		},
+	}
+	conn, err := b.Open(true)
+	if err != nil {
+		t.Fatalf("Could not create in-memory BMDB: %v", err)
+	}
+
+	go p.Run(ctx, conn)
+
+	sess, err := conn.StartSession(ctx)
+	if err != nil {
+		t.Fatalf("Failed to create BMDB session for verification: %v", err)
+	}
+	for {
+		time.Sleep(100 * time.Millisecond)
+
+		var provided []model.MachineProvided
+		err = sess.Transact(ctx, func(q *model.Queries) error {
+			var err error
+			provided, err = q.GetProvidedMachines(ctx, provider.Type())
+			return err
+		})
+		if err != nil {
+			t.Errorf("Transact failed: %v", err)
+		}
+		if len(provided) < 10 {
+			continue
+		}
+		if len(provided) > 10 {
+			t.Errorf("%d machines provided (limit: 10)", len(provided))
+		}
+
+		for _, mp := range provided {
+			if provider.machines[shepherd.ProviderID(mp.ProviderID)] == nil {
+				t.Errorf("BMDB machine %q has unknown provider ID %q", mp.MachineID, mp.ProviderID)
+			}
+		}
+
+		return
+	}
+}
+
+// TestProvisioner_resolvePossiblyUsed makes sure the PossiblyUsed state is
+// resolved correctly.
+func TestProvisioner_resolvePossiblyUsed(t *testing.T) {
+	const providedMachineID = "provided-machine"
+
+	providedMachines := map[shepherd.ProviderID]bool{
+		providedMachineID: true,
+	}
+
+	tests := []struct {
+		name         string
+		machineID    shepherd.ProviderID
+		machineState shepherd.State
+		wantedState  shepherd.State
+	}{
+		{
+			name:         "skip KnownUsed",
+			machineState: shepherd.StateKnownUsed,
+			wantedState:  shepherd.StateKnownUsed,
+		},
+		{
+			name:         "skip KnownUnused",
+			machineState: shepherd.StateKnownUnused,
+			wantedState:  shepherd.StateKnownUnused,
+		},
+		{
+			name:         "invalid ID",
+			machineID:    shepherd.InvalidProviderID,
+			machineState: shepherd.StatePossiblyUsed,
+			wantedState:  shepherd.StateKnownUnused,
+		},
+		{
+			name:         "valid ID, not in providedMachines",
+			machineID:    "unused-machine",
+			machineState: shepherd.StatePossiblyUsed,
+			wantedState:  shepherd.StateKnownUnused,
+		},
+		{
+			name:         "valid ID, in providedMachines",
+			machineID:    providedMachineID,
+			machineState: shepherd.StatePossiblyUsed,
+			wantedState:  shepherd.StateKnownUsed,
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			p := &Provisioner{}
+			if got := p.resolvePossiblyUsed(&dummyMachine{id: tt.machineID, state: tt.machineState}, providedMachines); got != tt.wantedState {
+				t.Errorf("resolvePossiblyUsed() = %v, want %v", got, tt.wantedState)
+			}
+		})
+	}
+}
diff --git a/cloud/shepherd/equinix/manager/recoverer.go b/cloud/shepherd/manager/recoverer.go
similarity index 67%
rename from cloud/shepherd/equinix/manager/recoverer.go
rename to cloud/shepherd/manager/recoverer.go
index 3779b02..a94700a 100644
--- a/cloud/shepherd/equinix/manager/recoverer.go
+++ b/cloud/shepherd/manager/recoverer.go
@@ -2,7 +2,6 @@
 
 import (
 	"context"
-	"flag"
 	"fmt"
 	"time"
 
@@ -11,34 +10,31 @@
 	"source.monogon.dev/cloud/bmaas/bmdb"
 	"source.monogon.dev/cloud/bmaas/bmdb/metrics"
 	"source.monogon.dev/cloud/bmaas/bmdb/model"
-	ecl "source.monogon.dev/cloud/shepherd/equinix/wrapngo"
+	"source.monogon.dev/cloud/shepherd"
 )
 
 type RecovererConfig struct {
 	ControlLoopConfig
-	RebootWaitSeconds int
 }
 
 func (r *RecovererConfig) RegisterFlags() {
 	r.ControlLoopConfig.RegisterFlags("recoverer")
-	flag.IntVar(&r.RebootWaitSeconds, "recoverer_reboot_wait_seconds", 30, "How many seconds to sleep to ensure a reboot happend")
 }
 
 // The Recoverer reboots machines whose agent has stopped sending heartbeats or
 // has not sent any heartbeats at all.
 type Recoverer struct {
 	RecovererConfig
-
-	cl ecl.Client
+	r shepherd.Recoverer
 }
 
-func NewRecoverer(cl ecl.Client, rc RecovererConfig) (*Recoverer, error) {
+func NewRecoverer(r shepherd.Recoverer, rc RecovererConfig) (*Recoverer, error) {
 	if err := rc.ControlLoopConfig.Check(); err != nil {
 		return nil, err
 	}
 	return &Recoverer{
 		RecovererConfig: rc,
-		cl:              cl,
+		r:               r,
 	}, nil
 }
 
@@ -57,24 +53,15 @@
 func (r *Recoverer) getMachines(ctx context.Context, q *model.Queries, limit int32) ([]model.MachineProvided, error) {
 	return q.GetMachineForAgentRecovery(ctx, model.GetMachineForAgentRecoveryParams{
 		Limit:    limit,
-		Provider: model.ProviderEquinix,
+		Provider: r.r.Type(),
 	})
 }
 
 func (r *Recoverer) processMachine(ctx context.Context, t *task) error {
-	klog.Infof("Starting recovery of device (ID: %s, PID %s)", t.machine.MachineID, t.machine.ProviderID)
+	klog.Infof("Starting recovery of machine (ID: %s, PID %s)", t.machine.MachineID, t.machine.ProviderID)
 
-	if err := r.cl.RebootDevice(ctx, t.machine.ProviderID); err != nil {
-		return fmt.Errorf("failed to reboot device: %w", err)
-	}
-
-	// TODO(issue/215): replace this
-	// This is required as Equinix doesn't reboot the machines synchronously
-	// during the API call.
-	select {
-	case <-time.After(time.Duration(r.RebootWaitSeconds) * time.Second):
-	case <-ctx.Done():
-		return fmt.Errorf("while waiting for reboot: %w", ctx.Err())
+	if err := r.r.RebootMachine(ctx, shepherd.ProviderID(t.machine.ProviderID)); err != nil {
+		return fmt.Errorf("failed to reboot machine: %w", err)
 	}
 
 	klog.Infof("Removing AgentStarted/AgentHeartbeat (ID: %s, PID: %s)...", t.machine.MachineID, t.machine.ProviderID)
diff --git a/cloud/shepherd/equinix/manager/ssh.go b/cloud/shepherd/manager/ssh_client.go
similarity index 87%
rename from cloud/shepherd/equinix/manager/ssh.go
rename to cloud/shepherd/manager/ssh_client.go
index 3eff4c5..a1a305a 100644
--- a/cloud/shepherd/equinix/manager/ssh.go
+++ b/cloud/shepherd/manager/ssh_client.go
@@ -16,9 +16,9 @@
 // would be PlainSSHClient, but tests can use this interface to dependency-inject
 // fake SSH connections.
 type SSHClient interface {
-	// Dial returns an SSHConnection to a given address (host:port pair) using a
-	// given username/sshkey for authentication, and with a timeout for connection.
-	Dial(ctx context.Context, address string, username string, sshkey ssh.Signer, connectTimeout time.Duration) (SSHConnection, error)
+	// Dial returns an SSHConnection to a given address (host:port pair) with
+	// a timeout for connection.
+	Dial(ctx context.Context, address string, connectTimeout time.Duration) (SSHConnection, error)
 }
 
 type SSHConnection interface {
@@ -36,13 +36,15 @@
 // PlainSSHClient implements SSHClient (and SSHConnection) using
 // golang.org/x/crypto/ssh.
 type PlainSSHClient struct {
+	AuthMethod ssh.AuthMethod
+	Username   string
 }
 
 type plainSSHConn struct {
 	cl *ssh.Client
 }
 
-func (p *PlainSSHClient) Dial(ctx context.Context, address, username string, sshkey ssh.Signer, connectTimeout time.Duration) (SSHConnection, error) {
+func (p *PlainSSHClient) Dial(ctx context.Context, address string, connectTimeout time.Duration) (SSHConnection, error) {
 	d := net.Dialer{
 		Timeout: connectTimeout,
 	}
@@ -51,10 +53,9 @@
 		return nil, err
 	}
 	conf := &ssh.ClientConfig{
-		// Equinix OS installations always use root.
-		User: username,
+		User: p.Username,
 		Auth: []ssh.AuthMethod{
-			ssh.PublicKeys(sshkey),
+			p.AuthMethod,
 		},
 		// Ignore the host key, since it's likely the first time anything logs into
 		// this device, and also because there's no way of knowing its fingerprint.
diff --git a/cloud/shepherd/manager/ssh_key_signer.go b/cloud/shepherd/manager/ssh_key_signer.go
new file mode 100644
index 0000000..7a8d08a
--- /dev/null
+++ b/cloud/shepherd/manager/ssh_key_signer.go
@@ -0,0 +1,108 @@
+package manager
+
+import (
+	"crypto/ed25519"
+	"crypto/rand"
+	"flag"
+	"fmt"
+	"os"
+	"sync"
+
+	"golang.org/x/crypto/ssh"
+	"k8s.io/klog/v2"
+)
+
+type SSHKey struct {
+	// myKey guards Key.
+	muKey sync.Mutex
+
+	// SSH key to use when creating machines and then connecting to them. If not
+	// provided, it will be automatically loaded from KeyPersistPath, and if that
+	// doesn't exist either, it will be first generated and persisted there.
+	Key ed25519.PrivateKey
+
+	// Path at which the SSH key will be loaded from and persisted to, if Key is not
+	// explicitly set. Either KeyPersistPath or Key must be set.
+	KeyPersistPath string
+}
+
+func (c *SSHKey) RegisterFlags() {
+	flag.StringVar(&c.KeyPersistPath, "ssh_key_path", "", "Local filesystem path to read SSH key from, and save generated key to")
+}
+
+// sshKey returns the SSH key as defined by the Key and KeyPersistPath options,
+// loading/generating/persisting it as necessary.
+func (c *SSHKey) sshKey() (ed25519.PrivateKey, error) {
+	c.muKey.Lock()
+	defer c.muKey.Unlock()
+
+	if c.Key != nil {
+		return c.Key, nil
+	}
+	if c.KeyPersistPath == "" {
+		return nil, fmt.Errorf("-ssh_key_path must be set")
+	}
+
+	data, err := os.ReadFile(c.KeyPersistPath)
+	switch {
+	case err == nil:
+		if len(data) != ed25519.PrivateKeySize {
+			return nil, fmt.Errorf("%s is not a valid ed25519 private key", c.KeyPersistPath)
+		}
+		c.Key = data
+		klog.Infof("Loaded SSH key from %s", c.KeyPersistPath)
+		return c.Key, nil
+	case os.IsNotExist(err):
+		if err := c.sshGenerateUnlocked(); err != nil {
+			return nil, err
+		}
+		if err := os.WriteFile(c.KeyPersistPath, c.Key, 0400); err != nil {
+			return nil, fmt.Errorf("could not persist key: %w", err)
+		}
+		return c.Key, nil
+	default:
+		return nil, fmt.Errorf("could not load peristed key: %w", err)
+	}
+}
+
+// PublicKey returns the SSH public key marshaled for use, based on sshKey.
+func (c *SSHKey) PublicKey() (string, error) {
+	private, err := c.sshKey()
+	if err != nil {
+		return "", err
+	}
+	// Marshal the public key part in OpenSSH authorized_keys.
+	sshpub, err := ssh.NewPublicKey(private.Public())
+	if err != nil {
+		return "", fmt.Errorf("while building SSH public key: %w", err)
+	}
+	return string(ssh.MarshalAuthorizedKey(sshpub)), nil
+}
+
+// Signer builds an ssh.Signer (for use in SSH connections) based on sshKey.
+func (c *SSHKey) Signer() (ssh.Signer, error) {
+	private, err := c.sshKey()
+	if err != nil {
+		return nil, err
+	}
+	// Set up the internal ssh.Signer to be later used to initiate SSH
+	// connections with newly provided hosts.
+	signer, err := ssh.NewSignerFromKey(private)
+	if err != nil {
+		return nil, fmt.Errorf("while building SSH signer: %w", err)
+	}
+	return signer, nil
+}
+
+// sshGenerateUnlocked saves a new private key into SharedConfig.Key.
+func (c *SSHKey) sshGenerateUnlocked() error {
+	if c.Key != nil {
+		return nil
+	}
+	_, priv, err := ed25519.GenerateKey(rand.Reader)
+	if err != nil {
+		return fmt.Errorf("while generating SSH key: %w", err)
+	}
+	c.Key = priv
+	return nil
+}
diff --git a/cloud/shepherd/equinix/manager/test_agent/BUILD.bazel b/cloud/shepherd/manager/test_agent/BUILD.bazel
similarity index 82%
rename from cloud/shepherd/equinix/manager/test_agent/BUILD.bazel
rename to cloud/shepherd/manager/test_agent/BUILD.bazel
index 699082d..7636cdd 100644
--- a/cloud/shepherd/equinix/manager/test_agent/BUILD.bazel
+++ b/cloud/shepherd/manager/test_agent/BUILD.bazel
@@ -5,14 +5,14 @@
     name = "test_agent",
     embed = [":test_agent_lib"],
     visibility = [
-        "//cloud/shepherd/equinix/manager:__pkg__",
+        "//cloud/shepherd/manager:__pkg__",
     ],
 )
 
 go_library(
     name = "test_agent_lib",
     srcs = ["main.go"],
-    importpath = "source.monogon.dev/cloud/shepherd/equinix/manager/test_agent",
+    importpath = "source.monogon.dev/cloud/shepherd/manager/test_agent",
     visibility = ["//visibility:private"],
     deps = [
         "//cloud/agent/api",
diff --git a/cloud/shepherd/equinix/manager/test_agent/main.go b/cloud/shepherd/manager/test_agent/main.go
similarity index 100%
rename from cloud/shepherd/equinix/manager/test_agent/main.go
rename to cloud/shepherd/manager/test_agent/main.go
diff --git a/cloud/shepherd/mini/BUILD.bazel b/cloud/shepherd/mini/BUILD.bazel
new file mode 100644
index 0000000..eb949ee
--- /dev/null
+++ b/cloud/shepherd/mini/BUILD.bazel
@@ -0,0 +1,48 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
+load("@io_bazel_rules_docker//container:container.bzl", "container_image")
+load("//build/static_binary_tarball:def.bzl", "static_binary_tarball")
+
+go_library(
+    name = "mini_lib",
+    srcs = [
+        "main.go",
+        "provider.go",
+        "ssh.go",
+    ],
+    importpath = "source.monogon.dev/cloud/shepherd/mini",
+    visibility = ["//visibility:public"],
+    deps = [
+        "//cloud/bmaas/bmdb",
+        "//cloud/bmaas/bmdb/model",
+        "//cloud/bmaas/bmdb/webug",
+        "//cloud/lib/component",
+        "//cloud/shepherd",
+        "//cloud/shepherd/manager",
+        "//metropolis/cli/pkg/context",
+        "@io_k8s_klog_v2//:klog",
+        "@org_golang_x_crypto//ssh",
+    ],
+)
+
+go_binary(
+    name = "mini",
+    embed = [":mini_lib"],
+    visibility = ["//visibility:public"],
+)
+
+static_binary_tarball(
+    name = "mini_layer",
+    executable = ":mini",
+)
+
+container_image(
+    name = "mini_container",
+    base = "@go_image_base//image",
+    entrypoint = ["/app/cloud/shepherd/mini/mini_/mini"],
+    tars = [
+        ":mini_layer",
+        "//cloud/takeover:takeover_layer",
+    ],
+    visibility = ["//visibility:public"],
+    workdir = "/app",
+)
diff --git a/cloud/shepherd/mini/main.go b/cloud/shepherd/mini/main.go
new file mode 100644
index 0000000..67231c0
--- /dev/null
+++ b/cloud/shepherd/mini/main.go
@@ -0,0 +1,191 @@
+package main
+
+import (
+	"context"
+	"encoding/json"
+	"flag"
+	"fmt"
+	"io"
+	"net/http"
+	"net/url"
+	"os"
+	"strings"
+
+	"k8s.io/klog/v2"
+
+	"source.monogon.dev/cloud/bmaas/bmdb"
+	"source.monogon.dev/cloud/bmaas/bmdb/model"
+	"source.monogon.dev/cloud/bmaas/bmdb/webug"
+	"source.monogon.dev/cloud/lib/component"
+	"source.monogon.dev/cloud/shepherd"
+	"source.monogon.dev/cloud/shepherd/manager"
+	clicontext "source.monogon.dev/metropolis/cli/pkg/context"
+)
+
+type Config struct {
+	Component   component.ComponentConfig
+	BMDB        bmdb.BMDB
+	WebugConfig webug.Config
+
+	InitializerConfig manager.InitializerConfig
+	ProvisionerConfig manager.ProvisionerConfig
+	RecovererConfig   manager.RecovererConfig
+
+	SSHConfig        sshConfig
+	DeviceListSource string
+	ProviderType     model.Provider
+}
+
+// TODO(q3k): factor this out to BMDB library?
+func runtimeInfo() string {
+	hostname, _ := os.Hostname()
+	if hostname == "" {
+		hostname = "UNKNOWN"
+	}
+	return fmt.Sprintf("host %s", hostname)
+}
+
+func (c *Config) RegisterFlags() {
+	c.Component.RegisterFlags("shepherd")
+	c.BMDB.ComponentName = "shepherd-mini"
+	c.BMDB.RuntimeInfo = runtimeInfo()
+	c.BMDB.Database.RegisterFlags("bmdb")
+	c.WebugConfig.RegisterFlags()
+
+	c.InitializerConfig.RegisterFlags()
+	c.ProvisionerConfig.RegisterFlags()
+	c.RecovererConfig.RegisterFlags()
+
+	c.SSHConfig.RegisterFlags()
+	flag.StringVar(&c.DeviceListSource, "mini_device_list_url", "", "The url from where to fetch the device list. For local paths use file:// as scheme")
+	flag.Func("mini_provider", "The provider this mini shepherd should emulate. Supported values are: lumen,equinix", func(s string) error {
+		switch s {
+		case strings.ToLower(string(model.ProviderEquinix)):
+			c.ProviderType = model.ProviderEquinix
+		case strings.ToLower(string(model.ProviderLumen)):
+			c.ProviderType = model.ProviderLumen
+		default:
+			return fmt.Errorf("invalid provider name")
+		}
+		return nil
+	})
+}
+
+type deviceList []machine
+
+func (dl deviceList) asMap() map[shepherd.ProviderID]machine {
+	mm := make(map[shepherd.ProviderID]machine)
+	for _, m := range dl {
+		mm[m.ProviderID] = m
+	}
+	return mm
+}
+
+func fetchDeviceList(s string) (deviceList, error) {
+	var r io.Reader
+	u, err := url.Parse(s)
+	if err != nil {
+		return nil, fmt.Errorf("failed parsing device list url: %v", err)
+	}
+
+	if u.Scheme != "file" {
+		resp, err := http.Get(u.String())
+		if err != nil {
+			return nil, err
+		}
+		defer resp.Body.Close()
+
+		if resp.StatusCode != http.StatusOK {
+			return nil, fmt.Errorf("invalid status code: %d != %v", http.StatusOK, resp.StatusCode)
+		}
+		r = resp.Body
+	} else {
+		f, err := os.Open(u.Path)
+		if err != nil {
+			return nil, err
+		}
+		defer f.Close()
+		r = f
+	}
+
+	var d deviceList
+	dec := json.NewDecoder(r)
+	dec.DisallowUnknownFields()
+	if err := dec.Decode(&d); err != nil {
+		return nil, err
+	}
+
+	klog.Infof("Fetched device list with %d entries", len(d))
+
+	return d, nil
+}
+
+func main() {
+	var c Config
+	c.RegisterFlags()
+
+	flag.Parse()
+	if flag.NArg() > 0 {
+		klog.Exitf("unexpected positional arguments: %v", flag.Args())
+	}
+
+	registry := c.Component.PrometheusRegistry()
+	c.BMDB.EnableMetrics(registry)
+
+	ctx := clicontext.WithInterrupt(context.Background())
+	c.Component.StartPrometheus(ctx)
+
+	conn, err := c.BMDB.Open(true)
+	if err != nil {
+		klog.Exitf("Failed to open BMDB connection: %v", err)
+	}
+
+	sshClient, err := c.SSHConfig.NewClient()
+	if err != nil {
+		klog.Exitf("Failed to create SSH client: %v", err)
+	}
+
+	if c.DeviceListSource == "" {
+		klog.Exitf("-mini_device_list_source must be set")
+	}
+
+	list, err := fetchDeviceList(c.DeviceListSource)
+	if err != nil {
+		klog.Exitf("Failed to fetch device list: %v", err)
+	}
+
+	mini := &provider{
+		providerType: c.ProviderType,
+		machines:     list.asMap(),
+	}
+
+	provisioner, err := manager.NewProvisioner(mini, c.ProvisionerConfig)
+	if err != nil {
+		klog.Exitf("%v", err)
+	}
+
+	initializer, err := manager.NewInitializer(mini, sshClient, c.InitializerConfig)
+	if err != nil {
+		klog.Exitf("%v", err)
+	}
+
+	go func() {
+		err = provisioner.Run(ctx, conn)
+		if err != nil {
+			klog.Exit(err)
+		}
+	}()
+	go func() {
+		err = manager.RunControlLoop(ctx, conn, initializer)
+		if err != nil {
+			klog.Exit(err)
+		}
+	}()
+	go func() {
+		if err := c.WebugConfig.Start(ctx, conn); err != nil && err != ctx.Err() {
+			klog.Exitf("Failed to start webug: %v", err)
+		}
+	}()
+
+	<-ctx.Done()
+}
diff --git a/cloud/shepherd/mini/provider.go b/cloud/shepherd/mini/provider.go
new file mode 100644
index 0000000..05b628f
--- /dev/null
+++ b/cloud/shepherd/mini/provider.go
@@ -0,0 +1,126 @@
+package main
+
+import (
+	"context"
+	"database/sql"
+	"fmt"
+	"net/netip"
+
+	"k8s.io/klog/v2"
+
+	"source.monogon.dev/cloud/bmaas/bmdb"
+	"source.monogon.dev/cloud/bmaas/bmdb/model"
+	"source.monogon.dev/cloud/shepherd"
+)
+
+// provider represents a shepherd.Provider that works entirely on a
+// static device list. It requires a provider type and a device list.
+type provider struct {
+	providerType model.Provider
+	machines     map[shepherd.ProviderID]machine
+}
+
+type machine struct {
+	ProviderID shepherd.ProviderID `json:"ID"`
+	Address    netip.Addr          `json:"Addr"`
+	Location   string              `json:"Location"`
+}
+
+func (d machine) ID() shepherd.ProviderID {
+	return d.ProviderID
+}
+
+func (d machine) Addr() netip.Addr {
+	return d.Address
+}
+
+func (d machine) State() shepherd.State {
+	return shepherd.StatePossiblyUsed
+}
+
+func (p *provider) ListMachines(ctx context.Context) ([]shepherd.Machine, error) {
+	machines := make([]shepherd.Machine, 0, len(p.machines))
+	for _, m := range p.machines {
+		machines = append(machines, m)
+	}
+
+	return machines, nil
+}
+
+func (p *provider) GetMachine(ctx context.Context, id shepherd.ProviderID) (shepherd.Machine, error) {
+	// If the provided machine is not inside our known machines,
+	// bail-out early as this is unsupported.
+	if _, ok := p.machines[id]; !ok {
+		return nil, fmt.Errorf("unknown provided machine requested")
+	}
+
+	return p.machines[id], nil
+}
+
+func (p *provider) CreateMachine(ctx context.Context, session *bmdb.Session, request shepherd.CreateMachineRequest) (shepherd.Machine, error) {
+	if request.UnusedMachine == nil {
+		return nil, fmt.Errorf("parameter UnusedMachine is missing")
+	}
+
+	//TODO: Do we just trust the implementation to be correct?
+	m, ok := request.UnusedMachine.(machine)
+	if !ok {
+		return nil, fmt.Errorf("invalid type for parameter UnusedMachine")
+	}
+
+	if err := p.assimilate(ctx, session, m); err != nil {
+		klog.Errorf("Failed to provision machine %s: %v", m.ProviderID, err)
+		return nil, err
+	}
+
+	return m, nil
+}
+
+func (p *provider) assimilate(ctx context.Context, sess *bmdb.Session, machine machine) error {
+	return sess.Transact(ctx, func(q *model.Queries) error {
+		// Create a new machine record within BMDB.
+		m, err := q.NewMachine(ctx)
+		if err != nil {
+			return fmt.Errorf("while creating a new machine record in BMDB: %w", err)
+		}
+
+		// Link the new machine with the device, and tag it "provided".
+		addParams := model.MachineAddProvidedParams{
+			MachineID:  m.MachineID,
+			ProviderID: string(machine.ProviderID),
+			Provider:   p.providerType,
+		}
+		klog.Infof("Setting \"provided\" tag (ID: %s, PID: %s, Provider: %s).", addParams.MachineID, addParams.ProviderID, addParams.Provider)
+		if err := q.MachineAddProvided(ctx, addParams); err != nil {
+			return fmt.Errorf("while tagging machine active: %w", err)
+		}
+
+		upParams := model.MachineUpdateProviderStatusParams{
+			ProviderID: string(machine.ProviderID),
+			Provider:   p.providerType,
+			ProviderIpAddress: sql.NullString{
+				String: machine.Address.String(),
+				Valid:  true,
+			},
+			ProviderLocation: sql.NullString{
+				String: machine.Location,
+				Valid:  machine.Location != "",
+			},
+			ProviderStatus: model.NullProviderStatus{
+				ProviderStatus: model.ProviderStatusUnknown,
+				Valid:          true,
+			},
+		}
+
+		klog.Infof("Setting \"provided\" tag status parameter (ID: %s, PID: %s, Provider: %s).", addParams.MachineID, upParams.ProviderID, upParams.Provider)
+		if err := q.MachineUpdateProviderStatus(ctx, upParams); err != nil {
+			return fmt.Errorf("while setting machine params: %w", err)
+		}
+
+		return nil
+	})
+}
+
+func (p *provider) Type() model.Provider {
+	return p.providerType
+}
diff --git a/cloud/shepherd/mini/ssh.go b/cloud/shepherd/mini/ssh.go
new file mode 100644
index 0000000..99f3e90
--- /dev/null
+++ b/cloud/shepherd/mini/ssh.go
@@ -0,0 +1,67 @@
+package main
+
+import (
+	"flag"
+	"fmt"
+
+	"golang.org/x/crypto/ssh"
+	"k8s.io/klog/v2"
+
+	"source.monogon.dev/cloud/shepherd/manager"
+)
+
+type sshConfig struct {
+	User   string
+	Pass   string
+	SSHKey manager.SSHKey
+}
+
+func (sc *sshConfig) check() error {
+	if sc.User == "" {
+		return fmt.Errorf("-ssh_user must be set")
+	}
+
+	if sc.Pass == "" && sc.SSHKey.KeyPersistPath == "" {
+		//TODO: The flag name -ssh_key_path could change, which would make this
+		// error very confusing.
+		return fmt.Errorf("-ssh_pass or -ssh_key_path must be set")
+	}
+
+	return nil
+}
+
+func (sc *sshConfig) RegisterFlags() {
+	flag.StringVar(&sc.User, "ssh_user", "", "SSH username to log into the machines")
+	flag.StringVar(&sc.Pass, "ssh_pass", "", "SSH password to log into the machines")
+	sc.SSHKey.RegisterFlags()
+}
+
+func (sc *sshConfig) NewClient() (*manager.PlainSSHClient, error) {
+	if err := sc.check(); err != nil {
+		return nil, err
+	}
+
+	c := manager.PlainSSHClient{
+		Username: sc.User,
+	}
+
+	switch {
+	case sc.Pass != "":
+		c.AuthMethod = ssh.Password(sc.Pass)
+	case sc.SSHKey.KeyPersistPath != "":
+		signer, err := sc.SSHKey.Signer()
+		if err != nil {
+			return nil, err
+		}
+
+		pubKey, err := sc.SSHKey.PublicKey()
+		if err != nil {
+			return nil, err
+		}
+
+		klog.Infof("Using ssh key auth with public key: %s", pubKey)
+
+		c.AuthMethod = ssh.PublicKeys(signer)
+	}
+	return &c, nil
+}
diff --git a/cloud/shepherd/provider/equinix/BUILD.bazel b/cloud/shepherd/provider/equinix/BUILD.bazel
new file mode 100644
index 0000000..3363d7f
--- /dev/null
+++ b/cloud/shepherd/provider/equinix/BUILD.bazel
@@ -0,0 +1,77 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library", "go_test")
+load("@io_bazel_rules_docker//container:container.bzl", "container_image")
+load("//build/static_binary_tarball:def.bzl", "static_binary_tarball")
+
+go_library(
+    name = "equinix_lib",
+    srcs = [
+        "main.go",
+        "provider.go",
+        "provider_config.go",
+        "updater.go",
+    ],
+    importpath = "source.monogon.dev/cloud/shepherd/provider/equinix",
+    visibility = ["//visibility:private"],
+    deps = [
+        "//cloud/bmaas/bmdb",
+        "//cloud/bmaas/bmdb/metrics",
+        "//cloud/bmaas/bmdb/model",
+        "//cloud/bmaas/bmdb/webug",
+        "//cloud/equinix/wrapngo",
+        "//cloud/lib/component",
+        "//cloud/lib/sinbin",
+        "//cloud/shepherd",
+        "//cloud/shepherd/manager",
+        "//metropolis/cli/pkg/context",
+        "@com_github_packethost_packngo//:packngo",
+        "@io_k8s_klog_v2//:klog",
+        "@org_golang_x_crypto//ssh",
+    ],
+)
+
+go_test(
+    name = "equinix_test",
+    srcs = [
+        "fakequinix_test.go",
+        "initializer_test.go",
+        "provisioner_test.go",
+        "recoverer_test.go",
+        "updater_test.go",
+    ],
+    data = [
+        "@cockroach",
+    ],
+    embed = [":equinix_lib"],
+    deps = [
+        "//cloud/bmaas/bmdb",
+        "//cloud/bmaas/bmdb/model",
+        "//cloud/lib/component",
+        "//cloud/shepherd/manager",
+        "@com_github_google_uuid//:uuid",
+        "@com_github_packethost_packngo//:packngo",
+        "@org_golang_x_time//rate",
+    ],
+)
+
+go_binary(
+    name = "equinix",
+    embed = [":equinix_lib"],
+    visibility = ["//visibility:public"],
+)
+
+static_binary_tarball(
+    name = "equinix_layer",
+    executable = ":equinix",
+)
+
+container_image(
+    name = "equinix_container",
+    base = "@go_image_base//image",
+    entrypoint = ["/app/cloud/shepherd/provider/equinix/equinix_/equinix"],
+    tars = [
+        ":equinix_layer",
+        "//cloud/takeover:takeover_layer",
+    ],
+    visibility = ["//visibility:public"],
+    workdir = "/app",
+)
diff --git a/cloud/shepherd/equinix/manager/fakequinix_test.go b/cloud/shepherd/provider/equinix/fakequinix_test.go
similarity index 99%
rename from cloud/shepherd/equinix/manager/fakequinix_test.go
rename to cloud/shepherd/provider/equinix/fakequinix_test.go
index 4c0ca60..bd0df4a 100644
--- a/cloud/shepherd/equinix/manager/fakequinix_test.go
+++ b/cloud/shepherd/provider/equinix/fakequinix_test.go
@@ -1,4 +1,4 @@
-package manager
+package main
 
 import (
 	"context"
diff --git a/cloud/shepherd/equinix/manager/initializer_test.go b/cloud/shepherd/provider/equinix/initializer_test.go
similarity index 60%
rename from cloud/shepherd/equinix/manager/initializer_test.go
rename to cloud/shepherd/provider/equinix/initializer_test.go
index 82e1f90..3100ad2 100644
--- a/cloud/shepherd/equinix/manager/initializer_test.go
+++ b/cloud/shepherd/provider/equinix/initializer_test.go
@@ -1,4 +1,4 @@
-package manager
+package main
 
 import (
 	"context"
@@ -9,81 +9,43 @@
 	"time"
 
 	"github.com/packethost/packngo"
-	"golang.org/x/crypto/ssh"
 	"golang.org/x/time/rate"
-	"google.golang.org/protobuf/proto"
-
-	apb "source.monogon.dev/cloud/agent/api"
 
 	"source.monogon.dev/cloud/bmaas/bmdb"
 	"source.monogon.dev/cloud/bmaas/bmdb/model"
 	"source.monogon.dev/cloud/lib/component"
+	"source.monogon.dev/cloud/shepherd/manager"
 )
 
-// fakeSSHClient is an SSHClient that pretends to start an agent, but in reality
-// just responds with what an agent would respond on every execution attempt.
-type fakeSSHClient struct{}
-
-type fakeSSHConnection struct{}
-
-func (f *fakeSSHClient) Dial(ctx context.Context, address, username string, sshkey ssh.Signer, timeout time.Duration) (SSHConnection, error) {
-	return &fakeSSHConnection{}, nil
-}
-
-func (f *fakeSSHConnection) Execute(ctx context.Context, command string, stdin []byte) (stdout []byte, stderr []byte, err error) {
-	var aim apb.TakeoverInit
-	if err := proto.Unmarshal(stdin, &aim); err != nil {
-		return nil, nil, fmt.Errorf("while unmarshaling TakeoverInit message: %v", err)
-	}
-
-	// Agent should send back apb.TakeoverResponse on its standard output.
-	pub, _, err := ed25519.GenerateKey(rand.Reader)
-	if err != nil {
-		return nil, nil, fmt.Errorf("while generating agent public key: %v", err)
-	}
-	arsp := apb.TakeoverResponse{
-		Result: &apb.TakeoverResponse_Success{Success: &apb.TakeoverSuccess{
-			InitMessage: &aim,
-			Key:         pub,
-		}},
-	}
-	arspb, err := proto.Marshal(&arsp)
-	if err != nil {
-		return nil, nil, fmt.Errorf("while marshaling TakeoverResponse message: %v", err)
-	}
-	return arspb, nil, nil
-}
-
-func (f *fakeSSHConnection) Upload(ctx context.Context, targetPath string, data []byte) error {
-	if targetPath != "/fake/path" {
-		return fmt.Errorf("unexpected target path in test")
-	}
-	return nil
-}
-
-func (f *fakeSSHConnection) Close() error {
-	return nil
-}
-
 type initializerDut struct {
-	f    *fakequinix
-	i    *Initializer
-	bmdb *bmdb.Connection
-	ctx  context.Context
+	f        *fakequinix
+	i        *manager.Initializer
+	bmdb     *bmdb.Connection
+	ctx      context.Context
+	provider *equinixProvider
 }
 
 func newInitializerDut(t *testing.T) *initializerDut {
 	t.Helper()
 
-	_, key, _ := ed25519.GenerateKey(rand.Reader)
-	sc := SharedConfig{
+	sc := providerConfig{
 		ProjectId:    "noproject",
 		KeyLabel:     "somekey",
-		Key:          key,
 		DevicePrefix: "test-",
 	}
-	ic := InitializerConfig{
-		ControlLoopConfig: ControlLoopConfig{
+	_, key, _ := ed25519.GenerateKey(rand.Reader)
+	k := manager.SSHKey{
+		Key: key,
+	}
+
+	f := newFakequinix(sc.ProjectId, 100)
+	provider, err := sc.New(&k, f)
+	if err != nil {
+		t.Fatalf("Could not create Provider: %v", err)
+	}
+
+	ic := manager.InitializerConfig{
+		ControlLoopConfig: manager.ControlLoopConfig{
 			DBQueryLimiter: rate.NewLimiter(rate.Every(time.Second), 10),
 		},
 		Executable:        []byte("beep boop i'm a real program"),
@@ -93,8 +55,7 @@
 		SSHExecTimeout:    time.Second,
 	}
 
-	f := newFakequinix(sc.ProjectId, 100)
-	i, err := NewInitializer(f, ic, &sc)
+	i, err := manager.NewInitializer(provider, &manager.FakeSSHClient{}, ic)
 	if err != nil {
 		t.Fatalf("Could not create Initializer: %v", err)
 	}
@@ -116,18 +77,17 @@
 	ctx, ctxC := context.WithCancel(context.Background())
 	t.Cleanup(ctxC)
 
-	if err := sc.SSHEquinixEnsure(ctx, f); err != nil {
+	if err := provider.SSHEquinixEnsure(ctx); err != nil {
 		t.Fatalf("Failed to ensure SSH key: %v", err)
 	}
-
-	i.sshClient = &fakeSSHClient{}
-	go RunControlLoop(ctx, conn, i)
+	go manager.RunControlLoop(ctx, conn, i)
 
 	return &initializerDut{
-		f:    f,
-		i:    i,
-		bmdb: conn,
-		ctx:  ctx,
+		f:        f,
+		i:        i,
+		bmdb:     conn,
+		ctx:      ctx,
+		provider: provider,
 	}
 }
 
@@ -138,10 +98,9 @@
 	f := dut.f
 	ctx := dut.ctx
 	conn := dut.bmdb
-	sc := dut.i.sharedConfig
 
-	reservations, _ := f.ListReservations(ctx, sc.ProjectId)
-	kid, err := sc.sshEquinixId(ctx, f)
+	reservations, _ := f.ListReservations(ctx, f.pid)
+	kid, err := dut.provider.sshEquinixId(ctx)
 	if err != nil {
 		t.Fatalf("Failed to retrieve equinix key ID: %v", err)
 	}
@@ -156,7 +115,7 @@
 		dev, _ := f.CreateDevice(ctx, &packngo.DeviceCreateRequest{
 			Hostname:              fmt.Sprintf("test-%d", i),
 			OS:                    "fake",
-			ProjectID:             sc.ProjectId,
+			ProjectID:             f.pid,
 			HardwareReservationID: res.ID,
 			ProjectSSHKeys:        []string{kid},
 		})
diff --git a/cloud/shepherd/equinix/manager/server/main.go b/cloud/shepherd/provider/equinix/main.go
similarity index 65%
rename from cloud/shepherd/equinix/manager/server/main.go
rename to cloud/shepherd/provider/equinix/main.go
index 38735db..3a402e8 100644
--- a/cloud/shepherd/equinix/manager/server/main.go
+++ b/cloud/shepherd/provider/equinix/main.go
@@ -5,29 +5,31 @@
 	"flag"
 	"fmt"
 	"os"
-	"strings"
 
+	"golang.org/x/crypto/ssh"
 	"k8s.io/klog/v2"
 
 	"source.monogon.dev/cloud/bmaas/bmdb"
 	"source.monogon.dev/cloud/bmaas/bmdb/webug"
+	"source.monogon.dev/cloud/equinix/wrapngo"
 	"source.monogon.dev/cloud/lib/component"
-	"source.monogon.dev/cloud/shepherd/equinix/manager"
-	"source.monogon.dev/cloud/shepherd/equinix/wrapngo"
+	"source.monogon.dev/cloud/shepherd/manager"
 	clicontext "source.monogon.dev/metropolis/cli/pkg/context"
 )
 
 type Config struct {
-	Component component.ComponentConfig
-	BMDB      bmdb.BMDB
+	Component   component.ComponentConfig
+	BMDB        bmdb.BMDB
+	WebugConfig webug.Config
 
-	SharedConfig      manager.SharedConfig
-	ProvisionerConfig manager.ProvisionerConfig
+	SSHKey            manager.SSHKey
 	InitializerConfig manager.InitializerConfig
+	ProvisionerConfig manager.ProvisionerConfig
 	RecovererConfig   manager.RecovererConfig
-	UpdaterConfig     manager.UpdaterConfig
-	WebugConfig       webug.Config
-	API               wrapngo.Opts
+
+	API           wrapngo.Opts
+	Provider      providerConfig
+	UpdaterConfig UpdaterConfig
 }
 
 // TODO(q3k): factor this out to BMDB library?
@@ -44,19 +46,22 @@
 	c.BMDB.ComponentName = "shepherd-equinix"
 	c.BMDB.RuntimeInfo = runtimeInfo()
 	c.BMDB.Database.RegisterFlags("bmdb")
-
-	c.SharedConfig.RegisterFlags("")
-	c.ProvisionerConfig.RegisterFlags()
-	c.InitializerConfig.RegisterFlags()
-	c.RecovererConfig.RegisterFlags()
-	c.UpdaterConfig.RegisterFlags()
 	c.WebugConfig.RegisterFlags()
+
+	c.SSHKey.RegisterFlags()
+	c.InitializerConfig.RegisterFlags()
+	c.ProvisionerConfig.RegisterFlags()
+	c.RecovererConfig.RegisterFlags()
+
 	c.API.RegisterFlags()
+	c.Provider.RegisterFlags()
+	c.UpdaterConfig.RegisterFlags()
 }
 
 func main() {
-	c := &Config{}
+	var c Config
 	c.RegisterFlags()
+
 	flag.Parse()
 	if flag.NArg() > 0 {
 		klog.Exitf("unexpected positional arguments: %v", flag.Args())
@@ -74,32 +79,33 @@
 	c.API.MetricsRegistry = registry
 	api := wrapngo.New(&c.API)
 
-	// These variables are _very_ important to configure correctly, otherwise someone
-	// running this locally with prod creds will actually destroy production
-	// data.
-	if strings.Contains(c.SharedConfig.KeyLabel, "FIXME") {
-		klog.Exitf("refusing to run with -equinix_ssh_key_label %q, please set it to something unique", c.SharedConfig.KeyLabel)
-	}
-	if strings.Contains(c.SharedConfig.DevicePrefix, "FIXME") {
-		klog.Exitf("refusing to run with -equinix_device_prefix %q, please set it to something unique", c.SharedConfig.DevicePrefix)
-	}
-
-	klog.Infof("Ensuring our SSH key is configured...")
-	if err := c.SharedConfig.SSHEquinixEnsure(ctx, api); err != nil {
-		klog.Exitf("Ensuring SSH key failed: %v", err)
-	}
-
-	provisioner, err := c.ProvisionerConfig.New(api, &c.SharedConfig)
+	provider, err := c.Provider.New(&c.SSHKey, api)
 	if err != nil {
 		klog.Exitf("%v", err)
 	}
 
-	initializer, err := manager.NewInitializer(api, c.InitializerConfig, &c.SharedConfig)
+	sshSigner, err := c.SSHKey.Signer()
 	if err != nil {
 		klog.Exitf("%v", err)
 	}
 
-	recoverer, err := manager.NewRecoverer(api, c.RecovererConfig)
+	sshClient := &manager.PlainSSHClient{
+		AuthMethod: ssh.PublicKeys(sshSigner),
+		// Equinix OS installations always use root.
+		Username: "root",
+	}
+
+	provisioner, err := manager.NewProvisioner(provider, c.ProvisionerConfig)
+	if err != nil {
+		klog.Exitf("%v", err)
+	}
+
+	initializer, err := manager.NewInitializer(provider, sshClient, c.InitializerConfig)
+	if err != nil {
+		klog.Exitf("%v", err)
+	}
+
+	recoverer, err := manager.NewRecoverer(provider, c.RecovererConfig)
 	if err != nil {
 		klog.Exitf("%v", err)
 	}
diff --git a/cloud/shepherd/provider/equinix/provider.go b/cloud/shepherd/provider/equinix/provider.go
new file mode 100644
index 0000000..edc8f3f
--- /dev/null
+++ b/cloud/shepherd/provider/equinix/provider.go
@@ -0,0 +1,369 @@
+package main
+
+import (
+	"context"
+	"fmt"
+	"net/netip"
+	"slices"
+	"strings"
+	"time"
+
+	"github.com/packethost/packngo"
+	"k8s.io/klog/v2"
+
+	"source.monogon.dev/cloud/bmaas/bmdb"
+	"source.monogon.dev/cloud/bmaas/bmdb/model"
+	"source.monogon.dev/cloud/equinix/wrapngo"
+	"source.monogon.dev/cloud/lib/sinbin"
+	"source.monogon.dev/cloud/shepherd"
+	"source.monogon.dev/cloud/shepherd/manager"
+)
+
+type equinixProvider struct {
+	config *providerConfig
+	api    wrapngo.Client
+	sshKey *manager.SSHKey
+
+	// badReservations is a holiday resort for Equinix hardware reservations which
+	// failed to be provisioned for some reason or another. We keep a list of them in
+	// memory just so that we don't repeatedly try to provision the same known bad
+	// machines.
+	badReservations sinbin.Sinbin[string]
+
+	reservationDeadline time.Time
+	reservationCache    []packngo.HardwareReservation
+}
+
+func (ep *equinixProvider) RebootMachine(ctx context.Context, id shepherd.ProviderID) error {
+	if err := ep.api.RebootDevice(ctx, string(id)); err != nil {
+		return fmt.Errorf("failed to reboot device: %w", err)
+	}
+
+	// TODO(issue/215): replace this
+	// This is required as Equinix doesn't reboot the machines synchronously
+	// during the API call.
+	select {
+	case <-time.After(time.Duration(ep.config.RebootWaitSeconds) * time.Second):
+	case <-ctx.Done():
+		return fmt.Errorf("while waiting for reboot: %w", ctx.Err())
+	}
+	return nil
+}
+
+func (ep *equinixProvider) ReinstallMachine(ctx context.Context, id shepherd.ProviderID) error {
+	return shepherd.ErrNotImplemented
+}
+
+func (ep *equinixProvider) GetMachine(ctx context.Context, id shepherd.ProviderID) (shepherd.Machine, error) {
+	machines, err := ep.ListMachines(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	for _, machine := range machines {
+		if machine.ID() == id {
+			return machine, nil
+		}
+	}
+
+	return nil, shepherd.ErrMachineNotFound
+}
+
+func (ep *equinixProvider) ListMachines(ctx context.Context) ([]shepherd.Machine, error) {
+	if ep.reservationDeadline.Before(time.Now()) {
+		reservations, err := ep.listReservations(ctx)
+		if err != nil {
+			return nil, err
+		}
+		ep.reservationCache = reservations
+		ep.reservationDeadline = time.Now().Add(ep.config.ReservationCacheTimeout)
+	}
+
+	devices, err := ep.managedDevices(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	machines := make([]shepherd.Machine, 0, len(ep.reservationCache)+len(devices))
+	for _, device := range devices {
+		machines = append(machines, &machine{device})
+	}
+
+	for _, res := range ep.reservationCache {
+		machines = append(machines, reservation{res})
+	}
+
+	return machines, nil
+}
+
+func (ep *equinixProvider) CreateMachine(ctx context.Context, session *bmdb.Session, request shepherd.CreateMachineRequest) (shepherd.Machine, error) {
+	if request.UnusedMachine == nil {
+		return nil, fmt.Errorf("parameter UnusedMachine is missing")
+	}
+
+	//TODO: Do we just trust the implementation to be correct?
+	res, ok := request.UnusedMachine.(reservation)
+	if !ok {
+		return nil, fmt.Errorf("invalid type for parameter UnusedMachine")
+	}
+
+	d, err := ep.provision(ctx, session, res.HardwareReservation)
+	if err != nil {
+		klog.Errorf("Failed to provision reservation %s: %v", res.HardwareReservation.ID, err)
+		until := time.Now().Add(time.Hour)
+		klog.Errorf("Adding hardware reservation %s to sinbin until %s", res.HardwareReservation.ID, until)
+		ep.badReservations.Add(res.HardwareReservation.ID, until)
+		return nil, err
+	}
+
+	return &machine{*d}, nil
+}
+
+func (ep *equinixProvider) Type() model.Provider {
+	return model.ProviderEquinix
+}
+
+type reservation struct {
+	packngo.HardwareReservation
+}
+
+func (e reservation) ID() shepherd.ProviderID {
+	return shepherd.InvalidProviderID
+}
+
+func (e reservation) Addr() netip.Addr {
+	return netip.Addr{}
+}
+
+func (e reservation) State() shepherd.State {
+	return shepherd.StateKnownUnused
+}
+
+type machine struct {
+	packngo.Device
+}
+
+func (e *machine) ID() shepherd.ProviderID {
+	return shepherd.ProviderID(e.Device.ID)
+}
+
+func (e *machine) Addr() netip.Addr {
+	ni := e.GetNetworkInfo()
+
+	var addr string
+	if ni.PublicIPv4 != "" {
+		addr = ni.PublicIPv4
+	} else if ni.PublicIPv6 != "" {
+		addr = ni.PublicIPv6
+	} else {
+		klog.Errorf("missing address for machine: %v", e.ID())
+		return netip.Addr{}
+	}
+
+	a, err := netip.ParseAddr(addr)
+	if err != nil {
+		klog.Errorf("failed parsing address %q: %v", addr, err)
+		return netip.Addr{}
+	}
+
+	return a
+}
+
+func (e *machine) State() shepherd.State {
+	return shepherd.StateKnownUsed
+}
+
+// listReservations doesn't lock the mutex and expects the caller to lock.
+func (ep *equinixProvider) listReservations(ctx context.Context) ([]packngo.HardwareReservation, error) {
+	klog.Infof("Retrieving hardware reservations, this will take a while...")
+	reservations, err := ep.api.ListReservations(ctx, ep.config.ProjectId)
+	if err != nil {
+		return nil, fmt.Errorf("failed to list reservations: %w", err)
+	}
+
+	var available []packngo.HardwareReservation
+	var inUse, notProvisionable, penalized int
+	for _, reservation := range reservations {
+		if reservation.Device != nil {
+			inUse++
+			continue
+		}
+		if !reservation.Provisionable {
+			notProvisionable++
+			continue
+		}
+		if ep.badReservations.Penalized(reservation.ID) {
+			penalized++
+			continue
+		}
+		available = append(available, reservation)
+	}
+	klog.Infof("Retrieved hardware reservations: %d (total), %d (available), %d (in use), %d (not provisionable), %d (penalized)", len(reservations), len(available), inUse, notProvisionable, penalized)
+
+	return available, nil
+}
+
+// provision attempts to create a device within Equinix using given Hardware
+// Reservation rsv. The resulting device is registered with BMDB, and tagged as
+// "provided" in the process.
+func (ep *equinixProvider) provision(ctx context.Context, sess *bmdb.Session, rsv packngo.HardwareReservation) (*packngo.Device, error) {
+	klog.Infof("Creating a new device using reservation ID %s.", rsv.ID)
+	hostname := ep.config.DevicePrefix + rsv.ID[:18]
+	kid, err := ep.sshEquinixId(ctx)
+	if err != nil {
+		return nil, err
+	}
+	req := &packngo.DeviceCreateRequest{
+		Hostname:              hostname,
+		OS:                    ep.config.OS,
+		Plan:                  rsv.Plan.Slug,
+		ProjectID:             ep.config.ProjectId,
+		HardwareReservationID: rsv.ID,
+		ProjectSSHKeys:        []string{kid},
+	}
+	if ep.config.UseProjectKeys {
+		klog.Warningf("INSECURE: Machines will be created with ALL PROJECT SSH KEYS!")
+		req.ProjectSSHKeys = nil
+	}
+
+	nd, err := ep.api.CreateDevice(ctx, req)
+	if err != nil {
+		return nil, fmt.Errorf("while creating new device within Equinix: %w", err)
+	}
+	klog.Infof("Created a new device within Equinix (RID: %s, PID: %s, HOST: %s)", rsv.ID, nd.ID, hostname)
+
+	slices.DeleteFunc(ep.reservationCache, func(v packngo.HardwareReservation) bool {
+		return rsv.ID == v.ID
+	})
+
+	err = ep.assimilate(ctx, sess, nd.ID)
+	if err != nil {
+		// TODO(serge@monogon.tech) at this point the device at Equinix isn't
+		// matched by a BMDB record. Schedule device deletion or make sure this
+		// case is being handled elsewhere.
+		return nil, err
+	}
+	return nd, nil
+}
+
+// assimilate brings in an already existing machine from Equinix into the BMDB.
+// This is only used in manual testing.
+func (ep *equinixProvider) assimilate(ctx context.Context, sess *bmdb.Session, deviceID string) error {
+	return sess.Transact(ctx, func(q *model.Queries) error {
+		// Create a new machine record within BMDB.
+		m, err := q.NewMachine(ctx)
+		if err != nil {
+			return fmt.Errorf("while creating a new machine record in BMDB: %w", err)
+		}
+
+		// Link the new machine with the Equinix device, and tag it "provided".
+		p := model.MachineAddProvidedParams{
+			MachineID:  m.MachineID,
+			ProviderID: deviceID,
+			Provider:   model.ProviderEquinix,
+		}
+		klog.Infof("Setting \"provided\" tag (ID: %s, PID: %s, Provider: %s).", p.MachineID, p.ProviderID, p.Provider)
+		if err := q.MachineAddProvided(ctx, p); err != nil {
+			return fmt.Errorf("while tagging machine active: %w", err)
+		}
+		return nil
+	})
+}
+
+// sshEquinixGet looks up the Equinix key matching providerConfig.KeyLabel,
+// returning its packngo.SSHKey instance.
+func (ep *equinixProvider) sshEquinix(ctx context.Context) (*packngo.SSHKey, error) {
+	ks, err := ep.api.ListSSHKeys(ctx)
+	if err != nil {
+		return nil, fmt.Errorf("while listing SSH keys: %w", err)
+	}
+
+	for _, k := range ks {
+		if k.Label == ep.config.KeyLabel {
+			return &k, nil
+		}
+	}
+	return nil, NoSuchKey
+}
+
+// sshEquinixId looks up the Equinix key identified by providerConfig.KeyLabel,
+// returning its Equinix-assigned UUID.
+func (ep *equinixProvider) sshEquinixId(ctx context.Context) (string, error) {
+	k, err := ep.sshEquinix(ctx)
+	if err != nil {
+		return "", err
+	}
+	return k.ID, nil
+}
+
+// sshEquinixUpdate makes sure the existing SSH key registered with Equinix
+// matches the one from sshPub.
+func (ep *equinixProvider) sshEquinixUpdate(ctx context.Context, kid string) error {
+	pub, err := ep.sshKey.PublicKey()
+	if err != nil {
+		return err
+	}
+	_, err = ep.api.UpdateSSHKey(ctx, kid, &packngo.SSHKeyUpdateRequest{
+		Key: &pub,
+	})
+	if err != nil {
+		return fmt.Errorf("while updating the SSH key: %w", err)
+	}
+	return nil
+}
+
+// sshEquinixUpload registers a new SSH key from sshPub.
+func (ep *equinixProvider) sshEquinixUpload(ctx context.Context) error {
+	pub, err := ep.sshKey.PublicKey()
+	if err != nil {
+		return fmt.Errorf("while generating public key: %w", err)
+	}
+	_, err = ep.api.CreateSSHKey(ctx, &packngo.SSHKeyCreateRequest{
+		Label:     ep.config.KeyLabel,
+		Key:       pub,
+		ProjectID: ep.config.ProjectId,
+	})
+	if err != nil {
+		return fmt.Errorf("while creating an SSH key: %w", err)
+	}
+	return nil
+}
+
+// SSHEquinixEnsure initializes the locally managed SSH key (from a persistence
+// path or explicitly set key) and updates or uploads it to Equinix. The key is
+// generated as needed The key is generated as needed
+func (ep *equinixProvider) SSHEquinixEnsure(ctx context.Context) error {
+	k, err := ep.sshEquinix(ctx)
+	switch err {
+	case NoSuchKey:
+		if err := ep.sshEquinixUpload(ctx); err != nil {
+			return fmt.Errorf("while uploading key: %w", err)
+		}
+		return nil
+	case nil:
+		if err := ep.sshEquinixUpdate(ctx, k.ID); err != nil {
+			return fmt.Errorf("while updating key: %w", err)
+		}
+		return nil
+	default:
+		return err
+	}
+}
+
+// managedDevices provides a map of device provider IDs to matching
+// packngo.Device instances. It calls Equinix API's ListDevices. The returned
+// devices are filtered according to DevicePrefix provided through Opts. The
+// returned error value, if not nil, will originate in wrapngo.
+func (ep *equinixProvider) managedDevices(ctx context.Context) (map[string]packngo.Device, error) {
+	ds, err := ep.api.ListDevices(ctx, ep.config.ProjectId)
+	if err != nil {
+		return nil, err
+	}
+	dm := map[string]packngo.Device{}
+	for _, d := range ds {
+		if strings.HasPrefix(d.Hostname, ep.config.DevicePrefix) {
+			dm[d.ID] = d
+		}
+	}
+	return dm, nil
+}
diff --git a/cloud/shepherd/provider/equinix/provider_config.go b/cloud/shepherd/provider/equinix/provider_config.go
new file mode 100644
index 0000000..be3bc27
--- /dev/null
+++ b/cloud/shepherd/provider/equinix/provider_config.go
@@ -0,0 +1,97 @@
+package main
+
+import (
+	"errors"
+	"flag"
+	"fmt"
+	"strings"
+	"time"
+
+	"source.monogon.dev/cloud/equinix/wrapngo"
+	"source.monogon.dev/cloud/shepherd/manager"
+)
+
+var (
+	NoSuchKey = errors.New("no such key")
+)
+
+// providerConfig contains configuration options used by both the Initializer and
+// Provisioner components of the Shepherd. In CLI scenarios, RegisterFlags should
+// be called to configure this struct from CLI flags. Otherwise, this structure
+// should be explicitly configured, as the default values are not valid.
+type providerConfig struct {
+	// ProjectId is the Equinix project UUID used by the manager. See Equinix API
+	// documentation for details. Must be set.
+	ProjectId string
+
+	// KeyLabel specifies the ID to use when handling the Equinix-registered SSH
+	// key used to authenticate to newly created servers. Must be set.
+	KeyLabel string
+
+	// DevicePrefix applied to all devices (machines) created by the Provisioner,
+	// and used by the Provisioner to identify machines which it managed.
+	// Must be set.
+	DevicePrefix string
+
+	// OS defines the operating system new devices are created with. Its format
+	// is specified by Equinix API.
+	OS string
+
+	// UseProjectKeys defines if the provisioner adds all ssh keys defined inside
+	// the used project to every new machine. This is only used for debug purposes.
+	UseProjectKeys bool
+
+	// RebootWaitSeconds defines how many seconds to sleep after a reboot call
+	// to ensure a reboot actually happened.
+	RebootWaitSeconds int
+
+	// ReservationCacheTimeout defines how after which time the reservations should be
+	// refreshed.
+	ReservationCacheTimeout time.Duration
+}
+
+func (pc *providerConfig) check() error {
+	if pc.ProjectId == "" {
+		return fmt.Errorf("-equinix_project_id must be set")
+	}
+	if pc.KeyLabel == "" {
+		return fmt.Errorf("-equinix_ssh_key_label must be set")
+	}
+	if pc.DevicePrefix == "" {
+		return fmt.Errorf("-equinix_device_prefix must be set")
+	}
+
+	// These variables are _very_ important to configure correctly, otherwise someone
+	// running this locally with prod creds will actually destroy production
+	// data.
+	if strings.Contains(pc.KeyLabel, "FIXME") {
+		return fmt.Errorf("refusing to run with -equinix_ssh_key_label %q, please set it to something unique", pc.KeyLabel)
+	}
+	if strings.Contains(pc.DevicePrefix, "FIXME") {
+		return fmt.Errorf("refusing to run with -equinix_device_prefix %q, please set it to something unique", pc.DevicePrefix)
+	}
+
+	return nil
+}
+
+func (pc *providerConfig) RegisterFlags() {
+	flag.StringVar(&pc.ProjectId, "equinix_project_id", "", "Equinix project ID where resources will be managed")
+	flag.StringVar(&pc.KeyLabel, "equinix_ssh_key_label", "shepherd-FIXME", "Label used to identify managed SSH key in Equinix project")
+	flag.StringVar(&pc.DevicePrefix, "equinix_device_prefix", "shepherd-FIXME-", "Prefix applied to all devices (machines) in Equinix project, used to identify managed machines")
+	flag.StringVar(&pc.OS, "equinix_os", "ubuntu_20_04", "OS that provisioner will deploy on Equinix machines. Not the target OS for cluster customers.")
+	flag.BoolVar(&pc.UseProjectKeys, "equinix_use_project_keys", false, "Add all Equinix project keys to newly provisioned machines, not just the provisioner's managed key. Debug/development only.")
+	flag.IntVar(&pc.RebootWaitSeconds, "equinix_reboot_wait_seconds", 30, "How many seconds to sleep to ensure a reboot happend")
+	flag.DurationVar(&pc.ReservationCacheTimeout, "equinix_reservation_cache_timeout", time.Minute*15, "Reservation cache validity timeo")
+}
+
+func (pc *providerConfig) New(sshKey *manager.SSHKey, api wrapngo.Client) (*equinixProvider, error) {
+	if err := pc.check(); err != nil {
+		return nil, err
+	}
+
+	return &equinixProvider{
+		config: pc,
+		sshKey: sshKey,
+		api:    api,
+	}, nil
+}
diff --git a/cloud/shepherd/equinix/manager/provisioner_test.go b/cloud/shepherd/provider/equinix/provisioner_test.go
similarity index 83%
rename from cloud/shepherd/equinix/manager/provisioner_test.go
rename to cloud/shepherd/provider/equinix/provisioner_test.go
index 80a90b8..b57546a 100644
--- a/cloud/shepherd/equinix/manager/provisioner_test.go
+++ b/cloud/shepherd/provider/equinix/provisioner_test.go
@@ -1,4 +1,4 @@
-package manager
+package main
 
 import (
 	"context"
@@ -12,28 +12,37 @@
 	"source.monogon.dev/cloud/bmaas/bmdb"
 	"source.monogon.dev/cloud/bmaas/bmdb/model"
 	"source.monogon.dev/cloud/lib/component"
+	"source.monogon.dev/cloud/shepherd/manager"
 )
 
 // TestProvisionerSmokes makes sure the Provisioner doesn't go up in flames on
 // the happy path.
 func TestProvisionerSmokes(t *testing.T) {
-	pc := ProvisionerConfig{
-		OS:       "fake",
+	pc := manager.ProvisionerConfig{
 		MaxCount: 10,
 		// We need 3 iterations to provide 10 machines with a chunk size of 4.
 		ReconcileLoopLimiter:  rate.NewLimiter(rate.Every(10*time.Second), 3),
 		DeviceCreationLimiter: rate.NewLimiter(rate.Every(time.Second), 10),
-		ReservationChunkSize:  4,
+		ChunkSize:             4,
 	}
-	_, key, _ := ed25519.GenerateKey(rand.Reader)
-	sc := SharedConfig{
+	sc := providerConfig{
 		ProjectId:    "noproject",
 		KeyLabel:     "somekey",
-		Key:          key,
 		DevicePrefix: "test-",
 	}
+
+	_, key, _ := ed25519.GenerateKey(rand.Reader)
+	k := manager.SSHKey{
+		Key: key,
+	}
+
 	f := newFakequinix(sc.ProjectId, 100)
-	p, err := pc.New(f, &sc)
+	provider, err := sc.New(&k, f)
+	if err != nil {
+		t.Fatalf("Could not create Provider: %v", err)
+	}
+
+	p, err := manager.NewProvisioner(provider, pc)
 	if err != nil {
 		t.Fatalf("Could not create Provisioner: %v", err)
 	}
@@ -55,7 +64,7 @@
 		t.Fatalf("Could not create in-memory BMDB: %v", err)
 	}
 
-	if err := sc.SSHEquinixEnsure(ctx, f); err != nil {
+	if err := provider.SSHEquinixEnsure(ctx); err != nil {
 		t.Fatalf("Failed to ensure SSH key: %v", err)
 	}
 	go p.Run(ctx, conn)
diff --git a/cloud/shepherd/equinix/manager/recoverer_test.go b/cloud/shepherd/provider/equinix/recoverer_test.go
similarity index 85%
rename from cloud/shepherd/equinix/manager/recoverer_test.go
rename to cloud/shepherd/provider/equinix/recoverer_test.go
index 63e244e..109c375 100644
--- a/cloud/shepherd/equinix/manager/recoverer_test.go
+++ b/cloud/shepherd/provider/equinix/recoverer_test.go
@@ -1,7 +1,9 @@
-package manager
+package main
 
 import (
 	"context"
+	"crypto/ed25519"
+	"crypto/rand"
 	"testing"
 	"time"
 
@@ -11,11 +13,12 @@
 	"source.monogon.dev/cloud/bmaas/bmdb"
 	"source.monogon.dev/cloud/bmaas/bmdb/model"
 	"source.monogon.dev/cloud/lib/component"
+	"source.monogon.dev/cloud/shepherd/manager"
 )
 
 type recovererDut struct {
 	f    *fakequinix
-	r    *Recoverer
+	r    *manager.Recoverer
 	bmdb *bmdb.Connection
 	ctx  context.Context
 }
@@ -23,14 +26,30 @@
 func newRecovererDut(t *testing.T) *recovererDut {
 	t.Helper()
 
-	rc := RecovererConfig{
-		ControlLoopConfig: ControlLoopConfig{
+	rc := manager.RecovererConfig{
+		ControlLoopConfig: manager.ControlLoopConfig{
 			DBQueryLimiter: rate.NewLimiter(rate.Every(time.Second), 10),
 		},
 	}
 
-	f := newFakequinix("fake", 100)
-	r, err := NewRecoverer(f, rc)
+	sc := providerConfig{
+		ProjectId:    "noproject",
+		KeyLabel:     "somekey",
+		DevicePrefix: "test-",
+	}
+
+	_, key, _ := ed25519.GenerateKey(rand.Reader)
+	k := manager.SSHKey{
+		Key: key,
+	}
+
+	f := newFakequinix(sc.ProjectId, 100)
+	provider, err := sc.New(&k, f)
+	if err != nil {
+		t.Fatalf("Could not create Provider: %v", err)
+	}
+
+	r, err := manager.NewRecoverer(provider, rc)
 	if err != nil {
 		t.Fatalf("Could not create Initializer: %v", err)
 	}
@@ -52,7 +71,7 @@
 	ctx, ctxC := context.WithCancel(context.Background())
 	t.Cleanup(ctxC)
 
-	go RunControlLoop(ctx, conn, r)
+	go manager.RunControlLoop(ctx, conn, r)
 
 	return &recovererDut{
 		f:    f,
diff --git a/cloud/shepherd/equinix/manager/updater.go b/cloud/shepherd/provider/equinix/updater.go
similarity index 93%
rename from cloud/shepherd/equinix/manager/updater.go
rename to cloud/shepherd/provider/equinix/updater.go
index dd8c6ff..b053f26 100644
--- a/cloud/shepherd/equinix/manager/updater.go
+++ b/cloud/shepherd/provider/equinix/updater.go
@@ -1,4 +1,4 @@
-package manager
+package main
 
 import (
 	"context"
@@ -14,8 +14,8 @@
 	"source.monogon.dev/cloud/bmaas/bmdb"
 	"source.monogon.dev/cloud/bmaas/bmdb/metrics"
 	"source.monogon.dev/cloud/bmaas/bmdb/model"
+	ecl "source.monogon.dev/cloud/equinix/wrapngo"
 	"source.monogon.dev/cloud/lib/sinbin"
-	ecl "source.monogon.dev/cloud/shepherd/equinix/wrapngo"
 )
 
 type UpdaterConfig struct {
@@ -129,16 +129,16 @@
 // updateLog logs information about the given update as calculated by applyUpdate.
 func updateLog(up *model.MachineUpdateProviderStatusParams) {
 	if up.ProviderReservationID.Valid {
-		klog.Infof("   Device %s: new reservation ID %s", up.ProviderID, up.ProviderReservationID.String)
+		klog.Infof("   Machine %s: new reservation ID %s", up.ProviderID, up.ProviderReservationID.String)
 	}
 	if up.ProviderIpAddress.Valid {
-		klog.Infof("   Device %s: new IP address %s", up.ProviderID, up.ProviderIpAddress.String)
+		klog.Infof("   Machine %s: new IP address %s", up.ProviderID, up.ProviderIpAddress.String)
 	}
 	if up.ProviderLocation.Valid {
-		klog.Infof("   Device %s: new location %s", up.ProviderID, up.ProviderLocation.String)
+		klog.Infof("   Machine %s: new location %s", up.ProviderID, up.ProviderLocation.String)
 	}
 	if up.ProviderStatus.Valid {
-		klog.Infof("   Device %s: new status %s", up.ProviderID, up.ProviderStatus.ProviderStatus)
+		klog.Infof("   Machine %s: new status %s", up.ProviderID, up.ProviderStatus.ProviderStatus)
 	}
 }
 
diff --git a/cloud/shepherd/equinix/manager/updater_test.go b/cloud/shepherd/provider/equinix/updater_test.go
similarity index 99%
rename from cloud/shepherd/equinix/manager/updater_test.go
rename to cloud/shepherd/provider/equinix/updater_test.go
index 145129a..9b23295 100644
--- a/cloud/shepherd/equinix/manager/updater_test.go
+++ b/cloud/shepherd/provider/equinix/updater_test.go
@@ -1,4 +1,4 @@
-package manager
+package main
 
 import (
 	"context"
diff --git a/cloud/shepherd/shepherd.go b/cloud/shepherd/shepherd.go
new file mode 100644
index 0000000..3504eb7
--- /dev/null
+++ b/cloud/shepherd/shepherd.go
@@ -0,0 +1,119 @@
+package shepherd
+
+import (
+	"context"
+	"fmt"
+	"net/netip"
+
+	"source.monogon.dev/cloud/bmaas/bmdb"
+	"source.monogon.dev/cloud/bmaas/bmdb/model"
+)
+
+var ErrMachineNotFound = fmt.Errorf("machine not found")
+var ErrNotImplemented = fmt.Errorf("not implemented")
+
+// ProviderID is an opaque unique identifier for a machine within a single
+// provider instance. It is generated by the Provider and usually the same
+// as the ID of the machine within the system that the Provider managed.
+// The Shepherd (and BMaaS in general) requires these IDs to be unique
+// within a provider and stable.
+type ProviderID string
+
+const InvalidProviderID ProviderID = "invalid"
+
+// IsValid reports whether the ProviderID is valid.
+func (p ProviderID) IsValid() bool {
+	return p != InvalidProviderID
+}
+
+// State defines in which state the machine is.
+// See the different states for more information.
+type State int
+
+const (
+	// StateUndefined is used as a placeholder to prevent that the default
+	// value can create any type of bad behaviour.
+	StateUndefined State = iota
+	// StatePossiblyUsed defines the state where a machine is possibly used,
+	// this is a state for use in stateless providers where the shepherd has
+	// to check against the bmdb if Machine.ID is already provisioned or not.
+	// These machines must have a valid ID and Addr.
+	StatePossiblyUsed
+	// StateKnownUnused defines the state where a machine is know to be free,
+	// e.g. a hardware reservation at equinix. These machines may not have an
+	// ID or Addr.
+	StateKnownUnused
+	// StateKnownUsed defines the state where a machine is known to be used,
+	// e.g. a deployed machine that is in use. These machines must have a
+	// valid ID and Addr.
+	StateKnownUsed
+)
+
+func (s State) String() string {
+	switch s {
+	case StateUndefined:
+		return "Undefined"
+	case StateKnownUnused:
+		return "KnownUnused"
+	case StateKnownUsed:
+		return "KnownUsed"
+	case StatePossiblyUsed:
+		return "PossiblyUsed"
+	default:
+		return fmt.Sprintf("<invalid value %d>", s)
+	}
+}
+
+type Machine interface {
+	// ID returns the provider id, see ProviderID for more information.
+	ID() ProviderID
+	// Addr returns the machines ip address that is reachable from the
+	// shepherd. It is used to connect to the machine via SSH to execute
+	// all takeover tasks, etc.
+	Addr() netip.Addr
+	// State returns the state in which the machine is
+	State() State
+}
+
+type CreateMachineRequest struct {
+	// UnusedMachine resembles a machine to use as deployment target.
+	UnusedMachine Machine
+}
+
+// Provider is the interface that is used to abstract the interaction between
+// the shepherd and machine providers like Equinix. All methods inside this
+// interface must not be called concurrently.
+type Provider interface {
+	// ListMachines returns all existing machines for a provider. Machines
+	// that are still in the state of being created by CreateMachine should
+	// not be returned.
+	ListMachines(context.Context) ([]Machine, error)
+
+	// GetMachine returns an existing machine for a provider. Machines
+	// that are still in the state of being created by CreateMachine should
+	// not be returned. If a there are no machines found after these filters
+	// an error should be returned.
+	GetMachine(context.Context, ProviderID) (Machine, error)
+
+	// CreateMachine creates a new machine with the given parameters and
+	// returns the created instance. The provider is required to create the
+	// entry into the machine table and MachineProvided tag. If there are no
+	// more machines avaiable, an error should be returned.
+	CreateMachine(context.Context, *bmdb.Session, CreateMachineRequest) (Machine, error)
+
+	// Type returns the value that represents this provider inside the database.
+	Type() model.Provider
+}
+
+type Recoverer interface {
+	Provider
+
+	// RebootMachine tries to bring a machine back from the dead by e.g. rebooting
+	RebootMachine(context.Context, ProviderID) error
+
+	// ReinstallMachine should reinstall the given machine and if the provider
+	// does not support reinstallation, the function should return an error
+	// stating this. If reinstalled, the installed tag should be updated to
+	// allow the reconcile loop to restart the takeover process.
+	ReinstallMachine(context.Context, ProviderID) error
+}