*: reflow comments to 80 characters
This reformats the entire Metropolis codebase to have comments no longer
than 80 characters, implementing CR/66.
This has been done half manually, as we don't have a good integration
between commentwrap/Bazel, but that can be implemented if we decide to
go for this tool/limit.
Change-Id: If1fff0b093ef806f5dc00551c11506e8290379d0
diff --git a/metropolis/node/build/genosrelease/main.go b/metropolis/node/build/genosrelease/main.go
index e19876e..ad6e3e2 100644
--- a/metropolis/node/build/genosrelease/main.go
+++ b/metropolis/node/build/genosrelease/main.go
@@ -14,8 +14,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// genosrelease provides rudimentary support to generate os-release files following the freedesktop spec
-// (https://www.freedesktop.org/software/systemd/man/os-release.html) from arguments and stamping
+// genosrelease provides rudimentary support to generate os-release files
+// following the freedesktop spec from arguments and stamping
+//
+// https://www.freedesktop.org/software/systemd/man/os-release.html
package main
import (
diff --git a/metropolis/node/build/mkerofs/main.go b/metropolis/node/build/mkerofs/main.go
index a05e440..ea89d67 100644
--- a/metropolis/node/build/mkerofs/main.go
+++ b/metropolis/node/build/mkerofs/main.go
@@ -14,8 +14,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// mkerofs takes a specification in the form of a prototext file (see fsspec next to this) and assembles an
-// EROFS filesystem according to it. The output is fully reproducible.
+// mkerofs takes a specification in the form of a prototext file (see fsspec
+// next to this) and assembles an EROFS filesystem according to it. The output
+// is fully reproducible.
package main
import (
@@ -99,9 +100,11 @@
children map[string]*entrySpec
}
-// pathRef gets the entrySpec at the leaf of the given path, inferring directories if necessary
+// pathRef gets the entrySpec at the leaf of the given path, inferring
+// directories if necessary
func (s *entrySpec) pathRef(p string) *entrySpec {
- // This block gets a path array starting at the root of the filesystem. The root folder is the zero-length array.
+ // This block gets a path array starting at the root of the filesystem. The
+ // root folder is the zero-length array.
pathParts := strings.Split(path.Clean("./"+p), "/")
if pathParts[0] == "." {
pathParts = pathParts[1:]
diff --git a/metropolis/node/build/mkimage/main.go b/metropolis/node/build/mkimage/main.go
index 5546055..f41d643 100644
--- a/metropolis/node/build/mkimage/main.go
+++ b/metropolis/node/build/mkimage/main.go
@@ -84,7 +84,8 @@
}
table := &gpt.Table{
- // This is appropriate at least for virtio disks. Might need to be adjusted for real ones.
+ // This is appropriate at least for virtio disks. Might need to be
+ // adjusted for real ones.
LogicalSectorSize: 512,
PhysicalSectorSize: 512,
ProtectiveMBR: true,
@@ -165,7 +166,8 @@
log.Fatalf("os.Open(%q): %v", src, err)
}
defer source.Close()
- // If this is streamed (e.g. using io.Copy) it exposes a bug in diskfs, so do it in one go.
+ // If this is streamed (e.g. using io.Copy) it exposes a bug in diskfs, so
+ // do it in one go.
data, err := ioutil.ReadAll(source)
if err != nil {
log.Fatalf("Reading %q: %v", src, err)
diff --git a/metropolis/node/core/consensus/ca/ca.go b/metropolis/node/core/consensus/ca/ca.go
index 6bcc19b..e0c56b0 100644
--- a/metropolis/node/core/consensus/ca/ca.go
+++ b/metropolis/node/core/consensus/ca/ca.go
@@ -73,19 +73,21 @@
CACert *x509.Certificate
CACertRaw []byte
- // bootstrapIssued are certificates that have been issued by the CA before it has been successfully Saved to etcd.
+ // bootstrapIssued are certificates that have been issued by the CA before
+ // it has been successfully Saved to etcd.
bootstrapIssued [][]byte
- // canBootstrapIssue is set on CAs that have been created by New and not yet stored to etcd. If not set,
- // certificates cannot be issued in-memory.
+ // canBootstrapIssue is set on CAs that have been created by New and not
+ // yet stored to etcd. If not set, certificates cannot be issued in-memory.
canBootstrapIssue bool
}
-// Workaround for https://github.com/golang/go/issues/26676 in Go's crypto/x509. Specifically Go
-// violates Section 4.2.1.2 of RFC 5280 without this.
+// Workaround for https://github.com/golang/go/issues/26676 in Go's
+// crypto/x509. Specifically Go violates Section 4.2.1.2 of RFC 5280 without
+// this.
// Fixed for 1.15 in https://go-review.googlesource.com/c/go/+/227098/.
//
-// Taken from https://github.com/FiloSottile/mkcert/blob/master/cert.go#L295 written by one of Go's
-// crypto engineers (BSD 3-clause).
+// Taken from https://github.com/FiloSottile/mkcert/blob/master/cert.go#L295
+// written by one of Go's crypto engineers (BSD 3-clause).
func calculateSKID(pubKey crypto.PublicKey) ([]byte, error) {
spkiASN1, err := x509.MarshalPKIXPublicKey(pubKey)
if err != nil {
@@ -104,8 +106,9 @@
return skid[:], nil
}
-// New creates a new certificate authority with the given common name. The newly created CA will be stored in memory
-// until committed to etcd by calling .Save.
+// New creates a new certificate authority with the given common name. The
+// newly created CA will be stored in memory until committed to etcd by calling
+// .Save.
func New(name string) (*CA, error) {
pubKey, privKey, err := ed25519.GenerateKey(rand.Reader)
if err != nil {
@@ -159,7 +162,8 @@
resp, err := kv.Txn(ctx).Then(
clientv3.OpGet(pathCACertificate),
clientv3.OpGet(pathCAKey),
- // We only read the CRL to ensure it exists on etcd (and early fail on inconsistency)
+ // We only read the CRL to ensure it exists on etcd (and early fail on
+ // inconsistency)
clientv3.OpGet(pathCACRL)).Commit()
if err != nil {
return nil, fmt.Errorf("failed to retrieve CA from etcd: %w", err)
@@ -198,7 +202,8 @@
}, nil
}
-// Save stores a newly created CA into etcd, committing both the CA data and any certificates issued until then.
+// Save stores a newly created CA into etcd, committing both the CA data and
+// any certificates issued until then.
func (c *CA) Save(ctx context.Context, kv clientv3.KV) error {
crl, err := c.makeCRL(nil)
if err != nil {
@@ -233,8 +238,9 @@
return nil
}
-// Issue issues a certificate. If kv is non-nil, the newly issued certificate will be immediately stored to etcd,
-// otherwise it will be kept in memory (until .Save is called). Certificates can only be issued to memory on
+// Issue issues a certificate. If kv is non-nil, the newly issued certificate
+// will be immediately stored to etcd, otherwise it will be kept in memory
+// (until .Save is called). Certificates can only be issued to memory on
// newly-created CAs that have not been saved to etcd yet.
func (c *CA) Issue(ctx context.Context, kv clientv3.KV, commonName string) (cert []byte, privkey []byte, err error) {
serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 127)
@@ -297,9 +303,10 @@
return crl, nil
}
-// Revoke revokes a certificate by hostname. The selected hostname will be added to the CRL stored in etcd. This call
-// might fail (safely) if a simultaneous revoke happened that caused the CRL to be bumped. The call can be then retried
-// safely.
+// Revoke revokes a certificate by hostname. The selected hostname will be
+// added to the CRL stored in etcd. This call might fail (safely) if a
+// simultaneous revoke happened that caused the CRL to be bumped. The call can
+// be then retried safely.
func (c *CA) Revoke(ctx context.Context, kv clientv3.KV, hostname string) error {
res, err := kv.Txn(ctx).Then(
clientv3.OpGet(pathCACRL),
@@ -383,8 +390,9 @@
return nil
}
-// WaitCRLChange returns a channel that will receive a CRLUpdate any time the remote CRL changed. Immediately after
-// calling this method, the current CRL is retrieved from the cluster and put into the channel.
+// WaitCRLChange returns a channel that will receive a CRLUpdate any time the
+// remote CRL changed. Immediately after calling this method, the current CRL
+// is retrieved from the cluster and put into the channel.
func (c *CA) WaitCRLChange(ctx context.Context, kv clientv3.KV, w clientv3.Watcher) <-chan CRLUpdate {
C := make(chan CRLUpdate)
@@ -424,16 +432,20 @@
return C
}
-// CRLUpdate is emitted for every remote CRL change, and spuriously on ever new WaitCRLChange.
+// CRLUpdate is emitted for every remote CRL change, and spuriously on ever new
+// WaitCRLChange.
type CRLUpdate struct {
- // The new (or existing, in the case of the first call) CRL. If nil, Err will be set.
+ // The new (or existing, in the case of the first call) CRL. If nil, Err
+ // will be set.
CRL []byte
- // If set, an error occurred and the WaitCRLChange call must be restarted. If set, CRL will be nil.
+ // If set, an error occurred and the WaitCRLChange call must be restarted.
+ // If set, CRL will be nil.
Err error
}
-// GetCurrentCRL returns the current CRL for the CA. This should only be used for one-shot operations like
-// bootstrapping a new node that doesn't yet have access to etcd - otherwise, WaitCRLChange shoulde be used.
+// GetCurrentCRL returns the current CRL for the CA. This should only be used
+// for one-shot operations like bootstrapping a new node that doesn't yet have
+// access to etcd - otherwise, WaitCRLChange shoulde be used.
func (c *CA) GetCurrentCRL(ctx context.Context, kv clientv3.KV) ([]byte, error) {
initial, err := kv.Get(ctx, pathCACRL)
if err != nil {
diff --git a/metropolis/node/core/consensus/consensus.go b/metropolis/node/core/consensus/consensus.go
index 683db19..ed44140 100644
--- a/metropolis/node/core/consensus/consensus.go
+++ b/metropolis/node/core/consensus/consensus.go
@@ -14,19 +14,22 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// Package consensus implements a managed etcd cluster member service, with a self-hosted CA system for issuing peer
-// certificates. Currently each Metropolis node runs an etcd member, and connects to the etcd member locally over a
-// domain socket.
+// Package consensus implements a managed etcd cluster member service, with a
+// self-hosted CA system for issuing peer certificates. Currently each
+// Metropolis node runs an etcd member, and connects to the etcd member locally
+// over a domain socket.
//
// The service supports two modes of startup:
-// - initializing a new cluster, by bootstrapping the CA in memory, starting a cluster, committing the CA to etcd
-// afterwards, and saving the new node's certificate to local storage
-// - joining an existing cluster, using certificates from local storage and loading the CA from etcd. This flow is also
-// used when the node joins a cluster for the first time (then the certificates required must be provisioned
-// externally before starting the consensus service).
+// - initializing a new cluster, by bootstrapping the CA in memory, starting a
+// cluster, committing the CA to etcd afterwards, and saving the new node's
+// certificate to local storage
+// - joining an existing cluster, using certificates from local storage and
+// loading the CA from etcd. This flow is also used when the node joins a
+// cluster for the first time (then the certificates required must be
+// provisioned externally before starting the consensus service).
//
-// Regardless of how the etcd member service was started, the resulting running service is further managed and used
-// in the same way.
+// Regardless of how the etcd member service was started, the resulting running
+// service is further managed and used in the same way.
//
package consensus
@@ -59,8 +62,9 @@
// The configuration with which the service was started. This is immutable.
config *Config
- // stateMu guards state. This is locked internally on public methods of Service that require access to state. The
- // state might be recreated on service restart.
+ // stateMu guards state. This is locked internally on public methods of
+ // Service that require access to state. The state might be recreated on
+ // service restart.
stateMu sync.Mutex
state *state
}
@@ -71,8 +75,8 @@
ready atomic.Bool
ca *ca.CA
- // cl is an etcd client that loops back to the localy running etcd server. This runs over the Client unix domain
- // socket that etcd starts.
+ // cl is an etcd client that loops back to the localy running etcd server.
+ // This runs over the Client unix domain socket that etcd starts.
cl *clientv3.Client
}
@@ -82,16 +86,19 @@
// Ephemeral directory for etcd.
Ephemeral *localstorage.EphemeralConsensusDirectory
- // Name is the cluster name. This must be the same amongst all etcd members within one cluster.
+ // Name is the cluster name. This must be the same amongst all etcd members
+ // within one cluster.
Name string
- // NewCluster selects whether the etcd member will start a new cluster and bootstrap a CA and the first member
- // certificate, or load existing PKI certificates from disk.
+ // NewCluster selects whether the etcd member will start a new cluster and
+ // bootstrap a CA and the first member certificate, or load existing PKI
+ // certificates from disk.
NewCluster bool
- // Port is the port at which this cluster member will listen for other members. If zero, defaults to the global
- // Metropolis setting.
+ // Port is the port at which this cluster member will listen for other
+ // members. If zero, defaults to the global Metropolis setting.
Port int
- // externalHost is used by tests to override the address at which etcd should listen for peer connections.
+ // externalHost is used by tests to override the address at which etcd
+ // should listen for peer connections.
externalHost string
}
@@ -101,8 +108,8 @@
}
}
-// configure transforms the service configuration into an embedded etcd configuration. This is pure and side effect
-// free.
+// configure transforms the service configuration into an embedded etcd
+// configuration. This is pure and side effect free.
func (s *Service) configure(ctx context.Context) (*embed.Config, error) {
if err := s.config.Ephemeral.MkdirAll(0700); err != nil {
return nil, fmt.Errorf("failed to create ephemeral directory: %w", err)
@@ -166,8 +173,8 @@
return cfg, nil
}
-// Run is a Supervisor runnable that starts the etcd member service. It will become healthy once the member joins the
-// cluster successfully.
+// Run is a Supervisor runnable that starts the etcd member service. It will
+// become healthy once the member joins the cluster successfully.
func (s *Service) Run(ctx context.Context) error {
st := &state{
ready: *atomic.NewBool(false),
@@ -298,8 +305,8 @@
return ctx.Err()
}
-// watchCRL is a sub-runnable of the etcd cluster member service that updates the on-local-storage CRL to match the
-// newest available version in etcd.
+// watchCRL is a sub-runnable of the etcd cluster member service that updates
+// the on-local-storage CRL to match the newest available version in etcd.
func (s *Service) watchCRL(ctx context.Context) error {
s.stateMu.Lock()
cl := s.state.cl
@@ -339,9 +346,10 @@
continue
}
- // We always call PromoteMember since the metadata necessary to decide if we should is private.
- // Luckily etcd already does sanity checks internally and will refuse to promote nodes that aren't
- // connected or are still behind on transactions.
+ // We always call PromoteMember since the metadata necessary to
+ // decide if we should is private. Luckily etcd already does
+ // sanity checks internally and will refuse to promote nodes that
+ // aren't connected or are still behind on transactions.
if _, err := st.etcd.Server.PromoteMember(ctx, uint64(member.ID)); err != nil {
supervisor.Logger(ctx).Infof("Failed to promote consensus node %s: %v", member.Name, err)
} else {
@@ -371,7 +379,8 @@
}
func (s *Service) WaitReady(ctx context.Context) error {
- // TODO(q3k): reimplement the atomic ready flag as an event synchronization mechanism
+ // TODO(q3k): reimplement the atomic ready flag as an event synchronization
+ // mechanism
if s.IsReady() {
return nil
}
@@ -411,8 +420,9 @@
return s.state.cl.Cluster
}
-// MemberInfo returns information about this etcd cluster member: its ID and name. This will block until this
-// information is available (ie. the cluster status is Ready).
+// MemberInfo returns information about this etcd cluster member: its ID and
+// name. This will block until this information is available (ie. the cluster
+// status is Ready).
func (s *Service) MemberInfo(ctx context.Context) (id uint64, name string, err error) {
if err = s.WaitReady(ctx); err != nil {
err = fmt.Errorf("when waiting for cluster readiness: %w", err)
diff --git a/metropolis/node/core/debug_service.go b/metropolis/node/core/debug_service.go
index 964642b..e0f7753 100644
--- a/metropolis/node/core/debug_service.go
+++ b/metropolis/node/core/debug_service.go
@@ -44,8 +44,10 @@
cluster *cluster.Manager
kubernetes *kubernetes.Service
logtree *logtree.LogTree
- // traceLock provides exclusive access to the Linux tracing infrastructure (ftrace)
- // This is a channel because Go's mutexes can't be cancelled or be acquired in a non-blocking way.
+ // traceLock provides exclusive access to the Linux tracing infrastructure
+ // (ftrace)
+ // This is a channel because Go's mutexes can't be cancelled or be acquired
+ // in a non-blocking way.
traceLock chan struct{}
}
@@ -192,7 +194,8 @@
}
}
-// Validate property names as they are used in path construction and we really don't want a path traversal vulnerability
+// Validate property names as they are used in path construction and we really
+// don't want a path traversal vulnerability
var safeTracingPropertyNamesRe = regexp.MustCompile("^[a-z0-9_]+$")
func writeTracingProperty(name string, value string) error {
diff --git a/metropolis/node/core/delve_enabled.go b/metropolis/node/core/delve_enabled.go
index 24ea470..9df937d 100644
--- a/metropolis/node/core/delve_enabled.go
+++ b/metropolis/node/core/delve_enabled.go
@@ -25,13 +25,16 @@
"source.monogon.dev/metropolis/node/core/network"
)
-// initializeDebugger attaches Delve to ourselves and exposes it on common.DebuggerPort
-// This is coupled to compilation_mode=dbg because otherwise Delve doesn't have the necessary DWARF debug info
+// initializeDebugger attaches Delve to ourselves and exposes it on
+// common.DebuggerPort
+// This is coupled to compilation_mode=dbg because otherwise Delve doesn't have
+// the necessary DWARF debug info
func initializeDebugger(networkSvc *network.Service) {
go func() {
- // This is intentionally delayed until network becomes available since Delve for some reason connects to itself
- // and in early-boot no network interface is available to do that through. Also external access isn't possible
- // early on anyways.
+ // This is intentionally delayed until network becomes available since
+ // Delve for some reason connects to itself and in early-boot no
+ // network interface is available to do that through. Also external
+ // access isn't possible early on anyways.
watcher := networkSvc.Watch()
_, err := watcher.Get(context.Background())
if err != nil {
diff --git a/metropolis/node/core/localstorage/crypt/blockdev.go b/metropolis/node/core/localstorage/crypt/blockdev.go
index 7f874b7..6ea1b49 100644
--- a/metropolis/node/core/localstorage/crypt/blockdev.go
+++ b/metropolis/node/core/localstorage/crypt/blockdev.go
@@ -31,10 +31,14 @@
)
var (
- // EFIPartitionType is the standardized partition type value for the EFI ESP partition. The human readable GUID is C12A7328-F81F-11D2-BA4B-00A0C93EC93B.
+ // EFIPartitionType is the standardized partition type value for the EFI
+ // ESP partition. The human readable GUID is
+ // C12A7328-F81F-11D2-BA4B-00A0C93EC93B.
EFIPartitionType = gpt.PartType{0x28, 0x73, 0x2a, 0xc1, 0x1f, 0xf8, 0xd2, 0x11, 0xba, 0x4b, 0x00, 0xa0, 0xc9, 0x3e, 0xc9, 0x3b}
- // NodeDataPartitionType is the partition type value for a Metropolis Node data partition. The human-readable GUID is 9eeec464-6885-414a-b278-4305c51f7966.
+ // NodeDataPartitionType is the partition type value for a Metropolis Node
+ // data partition. The human-readable GUID is
+ // 9eeec464-6885-414a-b278-4305c51f7966.
NodeDataPartitionType = gpt.PartType{0x64, 0xc4, 0xee, 0x9e, 0x85, 0x68, 0x4a, 0x41, 0xb2, 0x78, 0x43, 0x05, 0xc5, 0x1f, 0x79, 0x66}
)
@@ -43,9 +47,9 @@
NodeDataCryptPath = "/dev/data-crypt"
)
-// MakeBlockDevices looks for the ESP and the node data partition and maps them to ESPDevicePath and
-// NodeDataCryptPath respectively. This doesn't fail if it doesn't find the partitions, only if
-// something goes catastrophically wrong.
+// MakeBlockDevices looks for the ESP and the node data partition and maps them
+// to ESPDevicePath and NodeDataCryptPath respectively. This doesn't fail if it
+// doesn't find the partitions, only if something goes catastrophically wrong.
func MakeBlockDevices(ctx context.Context) error {
blockdevNames, err := ioutil.ReadDir("/sys/class/block")
if err != nil {
diff --git a/metropolis/node/core/localstorage/crypt/crypt.go b/metropolis/node/core/localstorage/crypt/crypt.go
index a6f5006..7b994d2 100644
--- a/metropolis/node/core/localstorage/crypt/crypt.go
+++ b/metropolis/node/core/localstorage/crypt/crypt.go
@@ -35,7 +35,7 @@
}
defer integrityPartition.Close()
// Based on structure defined in
- // https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/drivers/md/dm-integrity.c#n59
+ // https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/drivers/md/dm-integrity.c#n59
if _, err := integrityPartition.Seek(16, 0); err != nil {
return 0, err
}
diff --git a/metropolis/node/core/localstorage/crypt/crypt_debug.go b/metropolis/node/core/localstorage/crypt/crypt_debug.go
index db59809..2fbdc50 100644
--- a/metropolis/node/core/localstorage/crypt/crypt_debug.go
+++ b/metropolis/node/core/localstorage/crypt/crypt_debug.go
@@ -22,8 +22,8 @@
"golang.org/x/sys/unix"
)
-// CryptMap implements a debug version of CryptMap from crypt.go. It aliases the given baseName device into name
-// without any encryption.
+// CryptMap implements a debug version of CryptMap from crypt.go. It aliases
+// the given baseName device into name without any encryption.
func CryptMap(name string, baseName string, _ []byte) error {
var stat unix.Stat_t
if err := unix.Stat(baseName, &stat); err != nil {
@@ -36,8 +36,9 @@
return nil
}
-// CryptInit implements a debug version of CryptInit from crypt.go. It aliases the given baseName device into name
-// without any encryption. As an identity mapping doesn't need any initialization it doesn't do anything else.
+// CryptInit implements a debug version of CryptInit from crypt.go. It aliases
+// the given baseName device into name without any encryption. As an identity
+// mapping doesn't need any initialization it doesn't do anything else.
func CryptInit(name, baseName string, encryptionKey []byte) error {
return CryptMap(name, baseName, encryptionKey)
}
diff --git a/metropolis/node/core/localstorage/declarative/declarative.go b/metropolis/node/core/localstorage/declarative/declarative.go
index ce82c42..4f9b087 100644
--- a/metropolis/node/core/localstorage/declarative/declarative.go
+++ b/metropolis/node/core/localstorage/declarative/declarative.go
@@ -22,31 +22,38 @@
"strings"
)
-// Directory represents the intent of existence of a directory in a hierarchical filesystem (simplified to a tree).
-// This structure can be embedded and still be interpreted as a Directory for purposes of use within this library. Any
-// inner fields of such an embedding structure that are in turn (embedded) Directories or files will be treated as
-// children in the intent expressed by this Directory. All contained directory fields must have a `dir:"name"` struct
-// tag that names them, and all contained file fields must have a `file:"name"` struct tag.
+// Directory represents the intent of existence of a directory in a
+// hierarchical filesystem (simplified to a tree). This structure can be
+// embedded and still be interpreted as a Directory for purposes of use within
+// this library. Any inner fields of such an embedding structure that are in
+// turn (embedded) Directories or files will be treated as children in the
+// intent expressed by this Directory. All contained directory fields must have
+// a `dir:"name"` struct tag that names them, and all contained file fields
+// must have a `file:"name"` struct tag.
//
-// Creation and management of the directory at runtime is left to the implementing code. However, the DirectoryPlacement
-// implementation (set as the directory is placed onto a backing store) facilitates this management (by exposing methods
-// that mutate the backing store).
+// Creation and management of the directory at runtime is left to the
+// implementing code. However, the DirectoryPlacement implementation (set as
+// the directory is placed onto a backing store) facilitates this management
+// (by exposing methods that mutate the backing store).
type Directory struct {
DirectoryPlacement
}
-// File represents the intent of existence of a file. files are usually child structures in types that embed Directory.
-// File can also be embedded in another structure, and this embedding type will still be interpreted as a File for
-// purposes of use within this library.
+// File represents the intent of existence of a file. files are usually child
+// structures in types that embed Directory. File can also be embedded in
+// another structure, and this embedding type will still be interpreted as a
+// File for purposes of use within this library.
//
-// As with Directory, the runtime management of a File in a backing store is left to the implementing code, and the
-// embedded FilePlacement interface facilitates access to the backing store.
+// As with Directory, the runtime management of a File in a backing store is
+// left to the implementing code, and the embedded FilePlacement interface
+// facilitates access to the backing store.
type File struct {
FilePlacement
}
-// unpackDirectory takes a pointer to Directory or a pointer to a structure embedding Directory, and returns a
-// reflection Value that refers to the passed structure itself (not its pointer) and a plain Go pointer to the
+// unpackDirectory takes a pointer to Directory or a pointer to a structure
+// embedding Directory, and returns a reflection Value that refers to the
+// passed structure itself (not its pointer) and a plain Go pointer to the
// (embedded) Directory.
func unpackDirectory(d interface{}) (*reflect.Value, *Directory, error) {
td := reflect.TypeOf(d)
@@ -68,8 +75,9 @@
return &id, dir, nil
}
-// unpackFile takes a pointer to a File or a pointer to a structure embedding File, and returns a reflection Value that
-// refers to the passed structure itself (not its pointer) and a plain Go pointer to the (embedded) File.
+// unpackFile takes a pointer to a File or a pointer to a structure embedding
+// File, and returns a reflection Value that refers to the passed structure
+// itself (not its pointer) and a plain Go pointer to the (embedded) File.
func unpackFile(f interface{}) (*reflect.Value, *File, error) {
tf := reflect.TypeOf(f)
if tf.Kind() != reflect.Ptr {
@@ -91,8 +99,10 @@
}
-// subdirs takes a pointer to a Directory or pointer to a structure embedding Directory, and returns a pair of pointers
-// to Directory-like structures contained within that directory with corresponding names (based on struct tags).
+// subdirs takes a pointer to a Directory or pointer to a structure embedding
+// Directory, and returns a pair of pointers to Directory-like structures
+// contained within that directory with corresponding names (based on struct
+// tags).
func subdirs(d interface{}) ([]namedDirectory, error) {
s, _, err := unpackDirectory(d)
if err != nil {
@@ -117,8 +127,9 @@
directory interface{}
}
-// files takes a pointer to a File or pointer to a structure embedding File, and returns a pair of pointers
-// to Directory-like structures contained within that directory with corresponding names (based on struct tags).
+// files takes a pointer to a File or pointer to a structure embedding File,
+// and returns a pair of pointers to Directory-like structures contained within
+// that directory with corresponding names (based on struct tags).
func files(d interface{}) ([]namedFile, error) {
s, _, err := unpackDirectory(d)
if err != nil {
@@ -146,12 +157,13 @@
file *File
}
-// Validate checks that a given pointer to a Directory or pointer to a structure containing Directory does not contain
-// any programmer errors in its definition:
+// Validate checks that a given pointer to a Directory or pointer to a
+// structure containing Directory does not contain any programmer errors in its
+// definition:
// - all subdirectories/files must be named
// - all subdirectory/file names within a directory must be unique
-// - all subdirectory/file names within a directory must not contain the '/' character (as it is a common path
-// delimiter)
+// - all subdirectory/file names within a directory must not contain the '/'
+// character (as it is a common path delimiter)
func Validate(d interface{}) error {
names := make(map[string]bool)
diff --git a/metropolis/node/core/localstorage/declarative/placement.go b/metropolis/node/core/localstorage/declarative/placement.go
index c2ff53d..a59912c 100644
--- a/metropolis/node/core/localstorage/declarative/placement.go
+++ b/metropolis/node/core/localstorage/declarative/placement.go
@@ -21,21 +21,25 @@
"os"
)
-// A declarative Directory/File tree is an abstract definition until it's 'placed' on a backing file system.
-// By convention, all abstract definitions of hierarchies are stored as copiable structs, and only turned to pointers
-// when placed (ie., implementations like PlaceFS takes a *Directory, but Root as a declarative definition is defined as
-// non-pointer).
+// A declarative Directory/File tree is an abstract definition until it's
+// 'placed' on a backing file system. By convention, all abstract definitions
+// of hierarchies are stored as copiable structs, and only turned to pointers
+// when placed (ie., implementations like PlaceFS takes a *Directory, but Root
+// as a declarative definition is defined as non-pointer).
-// Placement is an interface available on Placed files and Directories. All *Placement interfaces on files/Directories
-// are only available on placed trees - eg., after a PlaceFS call. This is unfortunately not typesafe, callers need to
-// either be sure about placement, or check the interface for null.
+// Placement is an interface available on Placed files and Directories. All
+// *Placement interfaces on files/Directories are only available on placed
+// trees - eg., after a PlaceFS call. This is unfortunately not typesafe,
+// callers need to either be sure about placement, or check the interface for
+// null.
type Placement interface {
FullPath() string
RootRef() interface{}
}
-// FilePlacement is an interface available on Placed files. It is implemented by different placement backends, and
-// set on all files during placement by a given backend.
+// FilePlacement is an interface available on Placed files. It is implemented
+// by different placement backends, and set on all files during placement by a
+// given backend.
type FilePlacement interface {
Placement
Exists() (bool, error)
@@ -43,26 +47,30 @@
Write([]byte, os.FileMode) error
}
-// DirectoryPlacement is an interface available on Placed Directories. It is implemented by different placement
-// backends, and set on all directories during placement by a given backend.
+// DirectoryPlacement is an interface available on Placed Directories. It is
+// implemented by different placement backends, and set on all directories
+// during placement by a given backend.
type DirectoryPlacement interface {
Placement
- // MkdirAll creates this directory and all its parents on backing stores that have a physical directory
- // structure.
+ // MkdirAll creates this directory and all its parents on backing stores
+ // that have a physical directory structure.
MkdirAll(file os.FileMode) error
}
-// DirectoryPlacer is a placement backend-defined function that, given the path returned by the parent of a directory,
-// and the path to a directory, returns a DirectoryPlacement implementation for this directory. The new placement's
-// path (via .FullPath()) will be used for placement of directories/files within the new directory.
+// DirectoryPlacer is a placement backend-defined function that, given the path
+// returned by the parent of a directory, and the path to a directory, returns
+// a DirectoryPlacement implementation for this directory. The new placement's
+// path (via .FullPath()) will be used for placement of directories/files
+// within the new directory.
type DirectoryPlacer func(parent, this string) DirectoryPlacement
// FilePlacer is analogous to DirectoryPlacer, but for files.
type FilePlacer func(parent, this string) FilePlacement
-// place recursively places a pointer to a Directory or pointer to a structure embedding Directory into a given backend,
-// by calling DirectoryPlacer and FilePlacer where appropriate. This is done recursively across a declarative tree until
-// all children are placed.
+// place recursively places a pointer to a Directory or pointer to a structure
+// embedding Directory into a given backend, by calling DirectoryPlacer and
+// FilePlacer where appropriate. This is done recursively across a declarative
+// tree until all children are placed.
func place(d interface{}, parent, this string, dpl DirectoryPlacer, fpl FilePlacer) error {
_, dir, err := unpackDirectory(d)
if err != nil {
diff --git a/metropolis/node/core/localstorage/declarative/placement_local.go b/metropolis/node/core/localstorage/declarative/placement_local.go
index 3f7b1dd..43921cd 100644
--- a/metropolis/node/core/localstorage/declarative/placement_local.go
+++ b/metropolis/node/core/localstorage/declarative/placement_local.go
@@ -27,7 +27,8 @@
// FSRoot is a root of a storage backend that resides on the local filesystem.
type FSRoot struct {
- // The local path at which the declarative directory structure is located (eg. "/").
+ // The local path at which the declarative directory structure is located
+ // (eg. "/").
root string
}
@@ -83,8 +84,9 @@
return os.MkdirAll(f.FullPath(), perm)
}
-// PlaceFS takes a pointer to a Directory or a pointer to a structure embedding Directory and places it at a given
-// filesystem root. From this point on the given structure pointer has valid Placement interfaces.
+// PlaceFS takes a pointer to a Directory or a pointer to a structure embedding
+// Directory and places it at a given filesystem root. From this point on the
+// given structure pointer has valid Placement interfaces.
func PlaceFS(dd interface{}, root string) error {
r := &FSRoot{root}
pathFor := func(parent, this string) string {
diff --git a/metropolis/node/core/localstorage/directory_pki.go b/metropolis/node/core/localstorage/directory_pki.go
index 41b6729..b9ab3a0 100644
--- a/metropolis/node/core/localstorage/directory_pki.go
+++ b/metropolis/node/core/localstorage/directory_pki.go
@@ -44,7 +44,8 @@
// This has no SANs because it authenticates by public key, not by name
return x509.Certificate{
Subject: pkix.Name{
- // We identify nodes by their ID public keys (not hashed since a strong hash is longer and serves no benefit)
+ // We identify nodes by their ID public keys (not hashed since a
+ // strong hash is longer and serves no benefit)
CommonName: name,
},
IsCA: false,
@@ -137,8 +138,8 @@
}, nil
}
-// AllExist returns true if all PKI files (cert, key, CA cert) are present on the backing
-// store.
+// AllExist returns true if all PKI files (cert, key, CA cert) are present on
+// the backing store.
func (p *PKIDirectory) AllExist() (bool, error) {
for _, d := range []*declarative.File{&p.CACertificate, &p.Certificate, &p.Key} {
exists, err := d.Exists()
@@ -152,8 +153,8 @@
return true, nil
}
-// AllAbsent returns true if all PKI files (cert, key, CA cert) are missing from the backing
-// store.
+// AllAbsent returns true if all PKI files (cert, key, CA cert) are missing
+// from the backing store.
func (p *PKIDirectory) AllAbsent() (bool, error) {
for _, d := range []*declarative.File{&p.CACertificate, &p.Certificate, &p.Key} {
exists, err := d.Exists()
diff --git a/metropolis/node/core/localstorage/storage.go b/metropolis/node/core/localstorage/storage.go
index 0c4f641..e56b4eb 100644
--- a/metropolis/node/core/localstorage/storage.go
+++ b/metropolis/node/core/localstorage/storage.go
@@ -16,20 +16,24 @@
package localstorage
-// Localstorage is a replacement for the old 'storage' internal library. It is currently unused, but will become
-// so as the node code gets rewritten.
+// Localstorage is a replacement for the old 'storage' internal library. It is
+// currently unused, but will become so as the node code gets rewritten.
-// The library is centered around the idea of a declarative filesystem tree defined as mutually recursive Go structs.
-// This structure is then Placed onto an abstract real filesystem (eg. a local POSIX filesystem at /), and a handle
-// to that placed filesystem is then used by the consumers of this library to refer to subsets of the tree (that now
-// correspond to locations on a filesystem).
+// The library is centered around the idea of a declarative filesystem tree
+// defined as mutually recursive Go structs. This structure is then Placed
+// onto an abstract real filesystem (eg. a local POSIX filesystem at /), and a
+// handle to that placed filesystem is then used by the consumers of this
+// library to refer to subsets of the tree (that now correspond to locations on
+// a filesystem).
//
-// Every member of the storage hierarchy must either be, or inherit from Directory or File. In order to be placed
-// correctly, Directory embedding structures must use `dir:` or `file:` tags for child Directories and files
-// respectively. The content of the tag specifies the path part that this element will be placed at.
+// Every member of the storage hierarchy must either be, or inherit from
+// Directory or File. In order to be placed correctly, Directory embedding
+// structures must use `dir:` or `file:` tags for child Directories and files
+// respectively. The content of the tag specifies the path part that this
+// element will be placed at.
//
-// Full placement path(available via FullPath()) format is placement implementation-specific. However, they're always
-// strings.
+// Full placement path(available via FullPath()) format is placement
+// implementation-specific. However, they're always strings.
import (
"sync"
@@ -43,9 +47,11 @@
ESP ESPDirectory `dir:"esp"`
// Persistent Data partition, mounted from encrypted and authenticated storage.
Data DataDirectory `dir:"data"`
- // FHS-standard /etc directory, containes /etc/hosts, /etc/machine-id, and other compatibility files.
+ // FHS-standard /etc directory, containes /etc/hosts, /etc/machine-id, and
+ // other compatibility files.
Etc EtcDirectory `dir:"etc"`
- // Ephemeral data, used by runtime, stored in tmpfs. Things like sockets, temporary config files, etc.
+ // Ephemeral data, used by runtime, stored in tmpfs. Things like sockets,
+ // temporary config files, etc.
Ephemeral EphemeralDirectory `dir:"ephemeral"`
// FHS-standard /tmp directory, used by ioutil.TempFile.
Tmp TmpDirectory `dir:"tmp"`
@@ -60,15 +66,18 @@
Key declarative.File `file:"cert-key.pem"`
}
-// DataDirectory is an xfs partition mounted via cryptsetup/LUKS, with a key derived from {global,local}Unlock keys.
+// DataDirectory is an xfs partition mounted via cryptsetup/LUKS, with a key
+// derived from {global,local}Unlock keys.
type DataDirectory struct {
declarative.Directory
// flagLock locks canMount and mounted.
flagLock sync.Mutex
- // canMount is set by Root when it is initialized. It is required to be set for mounting the data directory.
+ // canMount is set by Root when it is initialized. It is required to be set
+ // for mounting the data directory.
canMount bool
- // mounted is set by DataDirectory when it is mounted. It ensures it's only mounted once.
+ // mounted is set by DataDirectory when it is mounted. It ensures it's only
+ // mounted once.
mounted bool
Containerd declarative.Directory `dir:"containerd"`
@@ -108,11 +117,13 @@
DevicePlugins struct {
declarative.Directory
- // Used by Kubelet, hardcoded relative to DataKubernetesKubeletDirectory
+ // Used by Kubelet, hardcoded relative to
+ // DataKubernetesKubeletDirectory
Kubelet declarative.File `file:"kubelet.sock"`
} `dir:"device-plugins"`
- // Pod logs, hardcoded to /data/kubelet/logs in @com_github_kubernetes//pkg/kubelet/kuberuntime:kuberuntime_manager.go
+ // Pod logs, hardcoded to /data/kubelet/logs in
+ // @com_github_kubernetes//pkg/kubelet/kuberuntime:kuberuntime_manager.go
Logs declarative.Directory `dir:"logs"`
Plugins struct {
@@ -134,8 +145,10 @@
type EtcDirectory struct {
declarative.Directory
- Hosts declarative.File `file:"hosts"` // Symlinked to /ephemeral/hosts, baked into the erofs system image
- MachineID declarative.File `file:"machine-id"` // Symlinked to /ephemeral/machine-id, baked into the erofs system image
+ // Symlinked to /ephemeral/hosts, baked into the erofs system image
+ Hosts declarative.File `file:"hosts"`
+ // Symlinked to /ephemeral/machine-id, baked into the erofs system image
+ MachineID declarative.File `file:"machine-id"`
}
type EphemeralDirectory struct {
diff --git a/metropolis/node/core/main.go b/metropolis/node/core/main.go
index 4051663..eb4c6c7 100644
--- a/metropolis/node/core/main.go
+++ b/metropolis/node/core/main.go
@@ -47,8 +47,10 @@
)
var (
- // kubernetesConfig is the static/global part of the Kubernetes service configuration. In the future, this might
- // be configurable by loading it from the EnrolmentConfig. Fow now, it's static and same across all clusters.
+ // kubernetesConfig is the static/global part of the Kubernetes service
+ // configuration. In the future, this might be configurable by loading it
+ // from the EnrolmentConfig. Fow now, it's static and same across all
+ // clusters.
kubernetesConfig = kubernetes.Config{
ServiceIPRange: net.IPNet{ // TODO(q3k): Decide if configurable / final value
IP: net.IP{10, 0, 255, 1},
@@ -69,7 +71,8 @@
}
unix.Sync()
// TODO(lorenz): Switch this to Reboot when init panics are less likely
- // Best effort, nothing we can do if this fails except printing the error to the console.
+ // Best effort, nothing we can do if this fails except printing the
+ // error to the console.
if err := unix.Reboot(unix.LINUX_REBOOT_CMD_POWER_OFF); err != nil {
panic(fmt.Sprintf("failed to halt node: %v\n", err))
}
@@ -97,7 +100,8 @@
panic(fmt.Errorf("could not set up basic mounts: %w", err))
}
- // Linux kernel default is 4096 which is far too low. Raise it to 1M which is what gVisor suggests.
+ // Linux kernel default is 4096 which is far too low. Raise it to 1M which
+ // is what gVisor suggests.
if err := unix.Setrlimit(unix.RLIMIT_NOFILE, &unix.Rlimit{Cur: 1048576, Max: 1048576}); err != nil {
logger.Fatalf("Failed to raise rlimits: %v", err)
}
@@ -113,7 +117,8 @@
networkSvc := network.New()
- // This function initializes a headless Delve if this is a debug build or does nothing if it's not
+ // This function initializes a headless Delve if this is a debug build or
+ // does nothing if it's not
initializeDebugger(networkSvc)
// Prepare local storage.
@@ -122,15 +127,17 @@
panic(fmt.Errorf("when placing root FS: %w", err))
}
- // trapdoor is a channel used to signal to the init service that a very low-level, unrecoverable failure
- // occured. This causes a GURU MEDITATION ERROR visible to the end user.
+ // trapdoor is a channel used to signal to the init service that a very
+ // low-level, unrecoverable failure occured. This causes a GURU MEDITATION
+ // ERROR visible to the end user.
trapdoor := make(chan struct{})
// Make context for supervisor. We cancel it when we reach the trapdoor.
ctxS, ctxC := context.WithCancel(context.Background())
- // Start root initialization code as a supervisor one-shot runnable. This means waiting for the network, starting
- // the cluster manager, and then starting all services related to the node's roles.
+ // Start root initialization code as a supervisor one-shot runnable. This
+ // means waiting for the network, starting the cluster manager, and then
+ // starting all services related to the node's roles.
// TODO(q3k): move this to a separate 'init' service.
supervisor.New(ctxS, func(ctx context.Context) error {
logger := supervisor.Logger(ctx)
@@ -143,8 +150,8 @@
return fmt.Errorf("when starting network: %w", err)
}
- // Start cluster manager. This kicks off cluster membership machinery, which will either start
- // a new cluster, enroll into one or join one.
+ // Start cluster manager. This kicks off cluster membership machinery,
+ // which will either start a new cluster, enroll into one or join one.
m := cluster.NewManager(root, networkSvc)
if err := supervisor.Run(ctx, "enrolment", m.Run); err != nil {
return fmt.Errorf("when starting enrolment: %w", err)
@@ -158,25 +165,27 @@
return fmt.Errorf("new couldn't find home in new cluster, aborting: %w", err)
}
- // We are now in a cluster. We can thus access our 'node' object and start all services that
- // we should be running.
+ // We are now in a cluster. We can thus access our 'node' object and
+ // start all services that we should be running.
logger.Info("Enrolment success, continuing startup.")
logger.Info(fmt.Sprintf("This node (%s) has roles:", status.Node.String()))
if cm := status.Node.ConsensusMember(); cm != nil {
- // There's no need to start anything for when we are a consensus member - the cluster
- // manager does this for us if necessary (as creating/enrolling/joining a cluster is
- // pretty tied into cluster lifecycle management).
+ // There's no need to start anything for when we are a consensus
+ // member - the cluster manager does this for us if necessary (as
+ // creating/enrolling/joining a cluster is pretty tied into cluster
+ // lifecycle management).
logger.Info(fmt.Sprintf(" - etcd consensus member"))
}
if kw := status.Node.KubernetesWorker(); kw != nil {
logger.Info(fmt.Sprintf(" - kubernetes worker"))
}
- // If we're supposed to be a kubernetes worker, start kubernetes services and containerd.
- // In the future, this might be split further into kubernetes control plane and data plane
- // roles.
- // TODO(q3k): watch on cluster status updates to start/stop kubernetes service.
+ // If we're supposed to be a kubernetes worker, start kubernetes
+ // services and containerd. In the future, this might be split further
+ // into kubernetes control plane and data plane roles.
+ // TODO(q3k): watch on cluster status updates to start/stop kubernetes
+ // service.
var containerdSvc *containerd.Service
var kubeSvc *kubernetes.Service
if kw := status.Node.KubernetesWorker(); kw != nil {
@@ -236,8 +245,9 @@
for {
select {
case <-trapdoor:
- // If the trapdoor got closed, we got stuck early enough in the boot process that we can't do anything about
- // it. Display a generic error message until we handle error conditions better.
+ // If the trapdoor got closed, we got stuck early enough in the
+ // boot process that we can't do anything about it. Display a
+ // generic error message until we handle error conditions better.
ctxC()
log.Printf(" ########################")
log.Printf(" # GURU MEDIATION ERROR #")
@@ -266,9 +276,11 @@
}
}
case unix.SIGURG:
- // Go 1.14 introduced asynchronous preemption, which uses SIGURG.
- // In order not to break backwards compatibility in the unlikely case
- // of an application actually using SIGURG on its own, they're not filtering them.
+ // Go 1.14 introduced asynchronous preemption, which uses
+ // SIGURG.
+ // In order not to break backwards compatibility in the
+ // unlikely case of an application actually using SIGURG on its
+ // own, they're not filtering them.
// (https://github.com/golang/go/issues/37942)
logger.V(5).Info("Ignoring SIGURG")
// TODO(lorenz): We can probably get more than just SIGCHLD as init, but I can't think
@@ -280,8 +292,9 @@
}
}
-// nodeCertificate creates a node key/certificate for a foreign node. This is duplicated code with localstorage's
-// PKIDirectory EnsureSelfSigned, but is temporary (and specific to 'golden tickets').
+// nodeCertificate creates a node key/certificate for a foreign node. This is
+// duplicated code with localstorage's PKIDirectory EnsureSelfSigned, but is
+// temporary (and specific to 'golden tickets').
func (s *debugService) nodeCertificate() (cert, key []byte, err error) {
pubKey, privKey, err := ed25519.GenerateKey(rand.Reader)
if err != nil {
diff --git a/metropolis/node/core/mounts.go b/metropolis/node/core/mounts.go
index 7a15c30..f8f0dae 100644
--- a/metropolis/node/core/mounts.go
+++ b/metropolis/node/core/mounts.go
@@ -27,8 +27,9 @@
"source.monogon.dev/metropolis/pkg/logtree"
)
-// setupMounts sets up basic mounts like sysfs, procfs, devtmpfs and cgroups. This should be called early during init
-// as a lot of processes depend on this being available.
+// setupMounts sets up basic mounts like sysfs, procfs, devtmpfs and cgroups.
+// This should be called early during init as a lot of processes depend on this
+// being available.
func setupMounts(log logtree.LeveledLogger) error {
// Set up target filesystems.
for _, el := range []struct {
@@ -50,7 +51,8 @@
}
}
- // Mount all available CGroups for v1 (v2 uses a single unified hierarchy and is not supported by our runtimes yet)
+ // Mount all available CGroups for v1 (v2 uses a single unified hierarchy
+ // and is not supported by our runtimes yet)
if err := unix.Mount("tmpfs", "/sys/fs/cgroup", "tmpfs", unix.MS_NOEXEC|unix.MS_NOSUID|unix.MS_NODEV, ""); err != nil {
panic(err)
}
diff --git a/metropolis/node/core/network/dhcp4c/callback/callback.go b/metropolis/node/core/network/dhcp4c/callback/callback.go
index de00383..c01cf45 100644
--- a/metropolis/node/core/network/dhcp4c/callback/callback.go
+++ b/metropolis/node/core/network/dhcp4c/callback/callback.go
@@ -14,13 +14,17 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// Package callback contains minimal callbacks for configuring the kernel with options received over DHCP.
+// Package callback contains minimal callbacks for configuring the kernel with
+// options received over DHCP.
//
-// These directly configure the relevant kernel subsytems and need to own certain parts of them as documented on a per-
-// callback basis to make sure that they can recover from restarts and crashes of the DHCP client.
-// The callbacks in here are not suitable for use in advanced network scenarios like running multiple DHCP clients
-// per interface via ClientIdentifier or when running an external FIB manager. In these cases it's advised to extract
-// the necessary information from the lease in your own callback and communicate it directly to the responsible entity.
+// These directly configure the relevant kernel subsytems and need to own
+// certain parts of them as documented on a per- callback basis to make sure
+// that they can recover from restarts and crashes of the DHCP client.
+// The callbacks in here are not suitable for use in advanced network scenarios
+// like running multiple DHCP clients per interface via ClientIdentifier or
+// when running an external FIB manager. In these cases it's advised to extract
+// the necessary information from the lease in your own callback and
+// communicate it directly to the responsible entity.
package callback
import (
@@ -61,8 +65,9 @@
return a.IP.Equal(b.IP) && aOnes == bOnes && aBits == bBits
}
-// ManageIP sets up and tears down the assigned IP address. It takes exclusive ownership of all IPv4 addresses
-// on the given interface which do not have IFA_F_PERMANENT set, so it's not possible to run multiple dynamic addressing
+// ManageIP sets up and tears down the assigned IP address. It takes exclusive
+// ownership of all IPv4 addresses on the given interface which do not have
+// IFA_F_PERMANENT set, so it's not possible to run multiple dynamic addressing
// clients on a single interface.
func ManageIP(iface netlink.Link) dhcp4c.LeaseCallback {
return func(old, new *dhcp4c.Lease) error {
@@ -75,9 +80,11 @@
for _, addr := range addrs {
if addr.Flags&unix.IFA_F_PERMANENT == 0 {
- // Linux identifies addreses by IP, mask and peer (see net/ipv4/devinet.find_matching_ifa in Linux 5.10)
- // So don't touch addresses which match on these properties as AddrReplace will atomically reconfigure
- // them anyways without interrupting things.
+ // Linux identifies addreses by IP, mask and peer (see
+ // net/ipv4/devinet.find_matching_ifa in Linux 5.10).
+ // So don't touch addresses which match on these properties as
+ // AddrReplace will atomically reconfigure them anyways without
+ // interrupting things.
if isIPNetEqual(addr.IPNet, newNet) && addr.Peer == nil && new != nil {
continue
}
@@ -104,8 +111,9 @@
}
}
-// ManageDefaultRoute manages a default route through the first router offered by DHCP. It does nothing if DHCP
-// doesn't provide any routers. It takes ownership of all RTPROTO_DHCP routes on the given interface, so it's not
+// ManageDefaultRoute manages a default route through the first router offered
+// by DHCP. It does nothing if DHCP doesn't provide any routers. It takes
+// ownership of all RTPROTO_DHCP routes on the given interface, so it's not
// possible to run multiple DHCP clients on the given interface.
func ManageDefaultRoute(iface netlink.Link) dhcp4c.LeaseCallback {
return func(old, new *dhcp4c.Lease) error {
@@ -120,8 +128,8 @@
}
ipv4DefaultRoute := net.IPNet{IP: net.IPv4zero, Mask: net.CIDRMask(0, 32)}
for _, route := range dhcpRoutes {
- // Don't remove routes which can be atomically replaced by RouteReplace to prevent potential traffic
- // disruptions.
+ // Don't remove routes which can be atomically replaced by
+ // RouteReplace to prevent potential traffic disruptions.
if !isIPNetEqual(&ipv4DefaultRoute, route.Dst) && newRouter != nil {
continue
}
diff --git a/metropolis/node/core/network/dhcp4c/callback/callback_test.go b/metropolis/node/core/network/dhcp4c/callback/callback_test.go
index 01611c3..7fb8875 100644
--- a/metropolis/node/core/network/dhcp4c/callback/callback_test.go
+++ b/metropolis/node/core/network/dhcp4c/callback/callback_test.go
@@ -63,7 +63,9 @@
oldLease, newLease *dhcp4c.Lease
expectedAddrs []netlink.Addr
}{
- { // Lifetimes are necessary, otherwise the Kernel sets the IFA_F_PERMANENT flag behind our back
+ // Lifetimes are necessary, otherwise the Kernel sets the
+ // IFA_F_PERMANENT flag behind our back.
+ {
name: "RemoveOldIPs",
initialAddrs: []netlink.Addr{{IPNet: &testNet1, ValidLft: 60}, {IPNet: &testNet2, ValidLft: 60}},
oldLease: nil,
@@ -149,12 +151,15 @@
if os.Getenv("IN_KTEST") != "true" {
t.Skip("Not in ktest")
}
- // testRoute is only used as a route destination and not configured on any interface.
+ // testRoute is only used as a route destination and not configured on any
+ // interface.
testRoute := net.IPNet{IP: net.IP{10, 0, 3, 0}, Mask: net.CIDRMask(24, 32)}
- // A test interface is set up for each test and assigned testNet1 and testNet2 so that testNet1Router and
- // testNet2Router are valid gateways for routes in this environment. A LinkIndex of -1 is replaced by the correct
- // link index for this test interface at runtime for both initialRoutes and expectedRoutes.
+ // A test interface is set up for each test and assigned testNet1 and
+ // testNet2 so that testNet1Router and testNet2Router are valid gateways
+ // for routes in this environment. A LinkIndex of -1 is replaced by the
+ // correct link index for this test interface at runtime for both
+ // initialRoutes and expectedRoutes.
var tests = []struct {
name string
initialRoutes []netlink.Route
@@ -167,8 +172,10 @@
oldLease: nil,
newLease: leaseAddRouter(trivialLeaseFromNet(testNet1), testNet1Router),
expectedRoutes: []netlink.Route{{
- Protocol: unix.RTPROT_DHCP,
- Dst: nil, // Linux weirdly retuns no RTA_DST for default routes, but one for everything else
+ Protocol: unix.RTPROT_DHCP,
+ // Linux weirdly returns no RTA_DST for default routes, but one
+ // for everything else.
+ Dst: nil,
Gw: testNet1Router,
Src: testNet1.IP,
Table: mainRoutingTable,
diff --git a/metropolis/node/core/network/dhcp4c/dhcpc.go b/metropolis/node/core/network/dhcp4c/dhcpc.go
index 5a32b5a..9bb70dd 100644
--- a/metropolis/node/core/network/dhcp4c/dhcpc.go
+++ b/metropolis/node/core/network/dhcp4c/dhcpc.go
@@ -14,10 +14,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// Package dhcp4c implements a DHCPv4 Client as specified in RFC2131 (with some notable deviations).
-// It implements only the DHCP state machine itself, any configuration other than the interface IP
-// address (which is always assigned in DHCP and necessary for the protocol to work) is exposed
-// as [informers/observables/watchable variables/???] to consumers who then deal with it.
+// Package dhcp4c implements a DHCPv4 Client as specified in RFC2131 (with some
+// notable deviations). It implements only the DHCP state machine itself, any
+// configuration other than the interface IP address (which is always assigned
+// in DHCP and necessary for the protocol to work) is exposed as
+// [informers/observables/watchable variables/???] to consumers who then deal
+// with it.
package dhcp4c
import (
@@ -41,19 +43,23 @@
type state int
const (
- // stateDiscovering sends broadcast DHCPDISCOVER messages to the network and waits for either a DHCPOFFER or
- // (in case of Rapid Commit) DHCPACK.
+ // stateDiscovering sends broadcast DHCPDISCOVER messages to the network
+ // and waits for either a DHCPOFFER or (in case of Rapid Commit) DHCPACK.
stateDiscovering state = iota
- // stateRequesting sends broadcast DHCPREQUEST messages containing the server identifier for the selected lease and
- // waits for a DHCPACK or a DHCPNAK. If it doesn't get either it transitions back into discovering.
+ // stateRequesting sends broadcast DHCPREQUEST messages containing the
+ // server identifier for the selected lease and waits for a DHCPACK or a
+ // DHCPNAK. If it doesn't get either it transitions back into discovering.
stateRequesting
- // stateBound just waits until RenewDeadline (derived from RenewTimeValue, half the lifetime by default) expires.
+ // stateBound just waits until RenewDeadline (derived from RenewTimeValue,
+ // half the lifetime by default) expires.
stateBound
- // stateRenewing sends unicast DHCPREQUEST messages to the currently-selected server and waits for either a DHCPACK
- // or DHCPNAK message. On DHCPACK it transitions to bound, otherwise to discovering.
+ // stateRenewing sends unicast DHCPREQUEST messages to the
+ // currently-selected server and waits for either a DHCPACK or DHCPNAK
+ // message. On DHCPACK it transitions to bound, otherwise to discovering.
stateRenewing
- // stateRebinding sends broadcast DHCPREQUEST messages to the network and waits for either a DHCPACK or DHCPNAK from
- // any server. Response processing is identical to stateRenewing.
+ // stateRebinding sends broadcast DHCPREQUEST messages to the network and
+ // waits for either a DHCPACK or DHCPNAK from any server. Response
+ // processing is identical to stateRenewing.
stateRebinding
)
@@ -74,42 +80,52 @@
}
}
-// This only requests SubnetMask and IPAddressLeaseTime as renewal and rebinding times are fine if
-// they are just defaulted. They are respected (if valid, otherwise they are clamped to the nearest
-// valid value) if sent by the server.
+// This only requests SubnetMask and IPAddressLeaseTime as renewal and
+// rebinding times are fine if they are just defaulted. They are respected (if
+// valid, otherwise they are clamped to the nearest valid value) if sent by the
+// server.
var internalOptions = dhcpv4.OptionCodeList{dhcpv4.OptionSubnetMask, dhcpv4.OptionIPAddressLeaseTime}
-// Transport represents a mechanism over which DHCP messages can be exchanged with a server.
+// Transport represents a mechanism over which DHCP messages can be exchanged
+// with a server.
type Transport interface {
- // Send attempts to send the given DHCP payload message to the transport target once. An empty return value
- // does not indicate that the message was successfully received.
+ // Send attempts to send the given DHCP payload message to the transport
+ // target once. An empty return value does not indicate that the message
+ // was successfully received.
Send(payload *dhcpv4.DHCPv4) error
- // SetReceiveDeadline sets a deadline for Receive() calls after which they return with DeadlineExceededErr
+ // SetReceiveDeadline sets a deadline for Receive() calls after which they
+ // return with DeadlineExceededErr
SetReceiveDeadline(time.Time) error
- // Receive waits for a DHCP message to arrive and returns it. If the deadline expires without a message arriving
- // it will return DeadlineExceededErr. If the message is completely malformed it will an instance of
- // InvalidMessageError.
+ // Receive waits for a DHCP message to arrive and returns it. If the
+ // deadline expires without a message arriving it will return
+ // DeadlineExceededErr. If the message is completely malformed it will an
+ // instance of InvalidMessageError.
Receive() (*dhcpv4.DHCPv4, error)
- // Close closes the given transport. Calls to any of the above methods will fail if the transport is closed.
- // Specific transports can be reopened after being closed.
+ // Close closes the given transport. Calls to any of the above methods will
+ // fail if the transport is closed. Specific transports can be reopened
+ // after being closed.
Close() error
}
-// UnicastTransport represents a mechanism over which DHCP messages can be exchanged with a single server over an
-// arbitrary IPv4-based network. Implementers need to support servers running outside the local network via a router.
+// UnicastTransport represents a mechanism over which DHCP messages can be
+// exchanged with a single server over an arbitrary IPv4-based network.
+// Implementers need to support servers running outside the local network via a
+// router.
type UnicastTransport interface {
Transport
- // Open connects the transport to a new unicast target. Can only be called after calling Close() or after creating
- // a new transport.
+ // Open connects the transport to a new unicast target. Can only be called
+ // after calling Close() or after creating a new transport.
Open(serverIP, bindIP net.IP) error
}
-// BroadcastTransport represents a mechanism over which DHCP messages can be exchanged with all servers on a Layer 2
-// broadcast domain. Implementers need to support sending and receiving messages without any IP being configured on
+// BroadcastTransport represents a mechanism over which DHCP messages can be
+// exchanged with all servers on a Layer 2 broadcast domain. Implementers need
+// to support sending and receiving messages without any IP being configured on
// the interface.
type BroadcastTransport interface {
Transport
- // Open connects the transport. Can only be called after calling Close() or after creating a new transport.
+ // Open connects the transport. Can only be called after calling Close() or
+ // after creating a new transport.
Open() error
}
@@ -117,26 +133,29 @@
// Client implements a DHCPv4 client.
//
-// Note that the size of all data sent to the server (RequestedOptions, ClientIdentifier,
-// VendorClassIdentifier and ExtraRequestOptions) should be kept reasonably small (<500 bytes) in
-// order to maximize the chance that requests can be properly transmitted.
+// Note that the size of all data sent to the server (RequestedOptions,
+// ClientIdentifier, VendorClassIdentifier and ExtraRequestOptions) should be
+// kept reasonably small (<500 bytes) in order to maximize the chance that
+// requests can be properly transmitted.
type Client struct {
- // RequestedOptions contains a list of extra options this client is interested in
+ // RequestedOptions contains a list of extra options this client is
+ // interested in
RequestedOptions dhcpv4.OptionCodeList
// ClientIdentifier is used by the DHCP server to identify this client.
// If empty, on Ethernet the MAC address is used instead.
ClientIdentifier []byte
- // VendorClassIdentifier is used by the DHCP server to identify options specific to this type of
- // clients and to populate the vendor-specific option (43).
+ // VendorClassIdentifier is used by the DHCP server to identify options
+ // specific to this type of clients and to populate the vendor-specific
+ // option (43).
VendorClassIdentifier string
// ExtraRequestOptions are extra options sent to the server.
ExtraRequestOptions dhcpv4.Options
- // Backoff strategies for each state. These all have sane defaults, override them only if
- // necessary.
+ // Backoff strategies for each state. These all have sane defaults,
+ // override them only if necessary.
DiscoverBackoff backoff.BackOff
AcceptOfferBackoff backoff.BackOff
RenewBackoff backoff.BackOff
@@ -170,13 +189,14 @@
leaseRenewDeadline time.Time
}
-// newDefaultBackoff returns an infinitely-retrying randomized exponential backoff with a
-// DHCP-appropriate InitialInterval
+// newDefaultBackoff returns an infinitely-retrying randomized exponential
+// backoff with a DHCP-appropriate InitialInterval
func newDefaultBackoff() *backoff.ExponentialBackOff {
b := backoff.NewExponentialBackOff()
b.MaxElapsedTime = 0 // No Timeout
- // Lots of servers wait 1s for existing users of an IP. Wait at least for that and keep some
- // slack for randomization, communication and processing overhead.
+ // Lots of servers wait 1s for existing users of an IP. Wait at least for
+ // that and keep some slack for randomization, communication and processing
+ // overhead.
b.InitialInterval = 1400 * time.Millisecond
b.MaxInterval = 30 * time.Second
b.RandomizationFactor = 0.2
@@ -184,12 +204,14 @@
}
// NewClient instantiates (but doesn't start) a new DHCPv4 client.
-// To have a working client it's required to set LeaseCallback to something that is capable of configuring the IP
-// address on the given interface. Unless managed through external means like a routing protocol, setting the default
-// route is also required. A simple example with the callback package thus looks like this:
-// c := dhcp4c.NewClient(yourInterface)
-// c.LeaseCallback = callback.Compose(callback.ManageIP(yourInterface), callback.ManageDefaultRoute(yourInterface))
-// c.Run(ctx)
+// To have a working client it's required to set LeaseCallback to something
+// that is capable of configuring the IP address on the given interface. Unless
+// managed through external means like a routing protocol, setting the default
+// route is also required. A simple example with the callback package thus
+// looks like this:
+// c := dhcp4c.NewClient(yourInterface)
+// c.LeaseCallback = callback.Compose(callback.ManageIP(yourInterface), callback.ManageDefaultRoute(yourInterface))
+// c.Run(ctx)
func NewClient(iface *net.Interface) (*Client, error) {
broadcastConn := transport.NewBroadcastTransport(iface)
@@ -227,9 +249,10 @@
}, nil
}
-// acceptableLease checks if the given lease is valid enough to even be processed. This is
-// intentionally not exposed to users because under certain cirumstances it can end up acquiring all
-// available IP addresses from a server.
+// acceptableLease checks if the given lease is valid enough to even be
+// processed. This is intentionally not exposed to users because under certain
+// cirumstances it can end up acquiring all available IP addresses from a
+// server.
func (c *Client) acceptableLease(offer *dhcpv4.DHCPv4) bool {
// RFC2131 Section 4.3.1 Table 3
if offer.ServerIdentifier() == nil || offer.ServerIdentifier().To4() == nil {
@@ -241,29 +264,34 @@
return false
}
- // Ignore IPs that are in no way valid for an interface (multicast, loopback, ...)
+ // Ignore IPs that are in no way valid for an interface (multicast,
+ // loopback, ...)
if offer.YourIPAddr.To4() == nil || (!offer.YourIPAddr.IsGlobalUnicast() && !offer.YourIPAddr.IsLinkLocalUnicast()) {
return false
}
- // Technically the options Requested IP address, Parameter request list, Client identifier
- // and Maximum message size should be refused (MUST NOT), but in the interest of interopatibilty
- // let's simply remove them if they are present.
+ // Technically the options Requested IP address, Parameter request list,
+ // Client identifier and Maximum message size should be refused (MUST NOT),
+ // but in the interest of interopatibilty let's simply remove them if they
+ // are present.
delete(offer.Options, dhcpv4.OptionRequestedIPAddress.Code())
delete(offer.Options, dhcpv4.OptionParameterRequestList.Code())
delete(offer.Options, dhcpv4.OptionClientIdentifier.Code())
delete(offer.Options, dhcpv4.OptionMaximumDHCPMessageSize.Code())
- // Clamp rebindinding times longer than the lease time. Otherwise the state machine might misbehave.
+ // Clamp rebindinding times longer than the lease time. Otherwise the state
+ // machine might misbehave.
if offer.IPAddressRebindingTime(0) > offer.IPAddressLeaseTime(0) {
offer.UpdateOption(dhcpv4.OptGeneric(dhcpv4.OptionRebindingTimeValue, dhcpv4.Duration(offer.IPAddressLeaseTime(0)).ToBytes()))
}
- // Clamp renewal times longer than the rebinding time. Otherwise the state machine might misbehave.
+ // Clamp renewal times longer than the rebinding time. Otherwise the state
+ // machine might misbehave.
if offer.IPAddressRenewalTime(0) > offer.IPAddressRebindingTime(0) {
offer.UpdateOption(dhcpv4.OptGeneric(dhcpv4.OptionRenewTimeValue, dhcpv4.Duration(offer.IPAddressRebindingTime(0)).ToBytes()))
}
- // Normalize two options that can be represented either inline or as options.
+ // Normalize two options that can be represented either inline or as
+ // options.
if len(offer.ServerHostName) > 0 {
offer.Options[uint8(dhcpv4.OptionTFTPServerName)] = []byte(offer.ServerHostName)
}
@@ -298,9 +326,10 @@
return xid, nil
}
-// As most servers out there cannot do reassembly, let's just hope for the best and
-// provide the local interface MTU. If the packet is too big it won't work anyways.
-// Also clamp to the biggest representable MTU in DHCPv4 (2 bytes unsigned int).
+// As most servers out there cannot do reassembly, let's just hope for the best
+// and provide the local interface MTU. If the packet is too big it won't work
+// anyways. Also clamp to the biggest representable MTU in DHCPv4 (2 bytes
+// unsigned int).
func (c *Client) maxMsgSize() uint16 {
if c.iface.MTU < math.MaxUint16 {
return uint16(c.iface.MTU)
@@ -346,8 +375,9 @@
}, nil
}
-// transactionStateSpec describes a state which is driven by a DHCP message transaction (sending a
-// specific message and then transitioning into a different state depending on the received messages)
+// transactionStateSpec describes a state which is driven by a DHCP message
+// transaction (sending a specific message and then transitioning into a
+// different state depending on the received messages)
type transactionStateSpec struct {
// ctx is a context for canceling the process
ctx context.Context
@@ -355,33 +385,38 @@
// transport is used to send and receive messages in this state
transport Transport
- // stateDeadline is a fixed external deadline for how long the FSM can remain in this state.
- // If it's exceeded the stateDeadlineExceeded callback is called and responsible for
- // transitioning out of this state. It can be left empty to signal that there's no external
- // deadline for the state.
+ // stateDeadline is a fixed external deadline for how long the FSM can
+ // remain in this state.
+ // If it's exceeded the stateDeadlineExceeded callback is called and
+ // responsible for transitioning out of this state. It can be left empty to
+ // signal that there's no external deadline for the state.
stateDeadline time.Time
- // backoff controls how long to wait for answers until handing control back to the FSM.
- // Since the FSM hasn't advanced until then this means we just get called again and retransmit.
+ // backoff controls how long to wait for answers until handing control back
+ // to the FSM.
+ // Since the FSM hasn't advanced until then this means we just get called
+ // again and retransmit.
backoff backoff.BackOff
- // requestType is the type of DHCP request sent out in this state. This is used to populate
- // the default options for the message.
+ // requestType is the type of DHCP request sent out in this state. This is
+ // used to populate the default options for the message.
requestType dhcpv4.MessageType
- // setExtraOptions can modify the request and set extra options before transmitting. Returning
- // an error here aborts the FSM an can be used to terminate when no valid request can be
- // constructed.
+ // setExtraOptions can modify the request and set extra options before
+ // transmitting. Returning an error here aborts the FSM an can be used to
+ // terminate when no valid request can be constructed.
setExtraOptions func(msg *dhcpv4.DHCPv4) error
- // handleMessage gets called for every parseable (not necessarily valid) DHCP message received
- // by the transport. It should return an error for every message that doesn't advance the
- // state machine and no error for every one that does. It is responsible for advancing the FSM
- // if the required information is present.
+ // handleMessage gets called for every parseable (not necessarily valid)
+ // DHCP message received by the transport. It should return an error for
+ // every message that doesn't advance the state machine and no error for
+ // every one that does. It is responsible for advancing the FSM if the
+ // required information is present.
handleMessage func(msg *dhcpv4.DHCPv4, sentTime time.Time) error
- // stateDeadlineExceeded gets called if either the backoff returns backoff.Stop or the
- // stateDeadline runs out. It is responsible for advancing the FSM into the next state.
+ // stateDeadlineExceeded gets called if either the backoff returns
+ // backoff.Stop or the stateDeadline runs out. It is responsible for
+ // advancing the FSM into the next state.
stateDeadlineExceeded func() error
}
@@ -405,9 +440,10 @@
receiveDeadline = earliestDeadline(s.stateDeadline, receiveDeadline)
}
- // Jump out if deadline expires in less than 10ms. Minimum lease time is 1s and if we have less
- // than 10ms to wait for an answer before switching state it makes no sense to send out another
- // request. This nearly eliminates the problem of sending two different requests back-to-back.
+ // Jump out if deadline expires in less than 10ms. Minimum lease time is 1s
+ // and if we have less than 10ms to wait for an answer before switching
+ // state it makes no sense to send out another request. This nearly
+ // eliminates the problem of sending two different requests back-to-back.
if receiveDeadline.Add(-10 * time.Millisecond).Before(sentTime) {
return s.stateDeadlineExceeded()
}
diff --git a/metropolis/node/core/network/dhcp4c/dhcpc_test.go b/metropolis/node/core/network/dhcp4c/dhcpc_test.go
index b99558c..97c1b2b 100644
--- a/metropolis/node/core/network/dhcp4c/dhcpc_test.go
+++ b/metropolis/node/core/network/dhcp4c/dhcpc_test.go
@@ -172,7 +172,8 @@
assert.Equal(t, dhcpv4.MessageTypeDiscover, mt.sentPacket.MessageType())
}
-// TestAcceptableLease tests if a minimal valid lease is accepted by acceptableLease
+// TestAcceptableLease tests if a minimal valid lease is accepted by
+// acceptableLease
func TestAcceptableLease(t *testing.T) {
c := Client{}
offer := &dhcpv4.DHCPv4{
@@ -223,8 +224,8 @@
return o
}
-// TestDiscoverOffer tests if the DHCP state machine in discovering state properly selects the first valid lease
-// and transitions to requesting state
+// TestDiscoverOffer tests if the DHCP state machine in discovering state
+// properly selects the first valid lease and transitions to requesting state
func TestDiscoverRequesting(t *testing.T) {
p := newPuppetClient(stateDiscovering)
@@ -250,8 +251,8 @@
assert.Equal(t, testIP, p.c.offer.YourIPAddr, "DHCP client requested invalid offer")
}
-// TestOfferBound tests if the DHCP state machine in requesting state processes a valid DHCPACK and transitions to
-// bound state.
+// TestOfferBound tests if the DHCP state machine in requesting state processes
+// a valid DHCPACK and transitions to bound state.
func TestRequestingBound(t *testing.T) {
p := newPuppetClient(stateRequesting)
@@ -276,8 +277,8 @@
assert.Equal(t, testIP, p.c.lease.YourIPAddr, "DHCP client requested invalid offer")
}
-// TestRequestingDiscover tests if the DHCP state machine in requesting state transitions back to discovering if it
-// takes too long to get a valid DHCPACK.
+// TestRequestingDiscover tests if the DHCP state machine in requesting state
+// transitions back to discovering if it takes too long to get a valid DHCPACK.
func TestRequestingDiscover(t *testing.T) {
p := newPuppetClient(stateRequesting)
@@ -306,8 +307,9 @@
assert.Equal(t, stateDiscovering, p.c.state, "DHCP client didn't switch back to offer after requesting expired")
}
-// TestDiscoverRapidCommit tests if the DHCP state machine in discovering state transitions directly to bound if a
-// rapid commit response (DHCPACK) is received.
+// TestDiscoverRapidCommit tests if the DHCP state machine in discovering state
+// transitions directly to bound if a rapid commit response (DHCPACK) is
+// received.
func TestDiscoverRapidCommit(t *testing.T) {
testIP := net.IP{192, 0, 2, 2}
offer := newResponse(dhcpv4.MessageTypeAck)
@@ -341,8 +343,9 @@
return fmt.Sprintf("Test Option %d", uint8(o))
}
-// TestBoundRenewingBound tests if the DHCP state machine in bound correctly transitions to renewing after
-// leaseBoundDeadline expires, sends a DHCPREQUEST and after it gets a DHCPACK response calls LeaseCallback and
+// TestBoundRenewingBound tests if the DHCP state machine in bound correctly
+// transitions to renewing after leaseBoundDeadline expires, sends a
+// DHCPREQUEST and after it gets a DHCPACK response calls LeaseCallback and
// transitions back to bound with correct new deadlines.
func TestBoundRenewingBound(t *testing.T) {
offer := newResponse(dhcpv4.MessageTypeAck)
@@ -364,7 +367,8 @@
if err := p.c.runState(context.Background()); err != nil {
t.Error(err)
}
- p.ft.Advance(5 * time.Millisecond) // We cannot intercept time.After so we just advance the clock by the time slept
+ // We cannot intercept time.After so we just advance the clock by the time slept
+ p.ft.Advance(5 * time.Millisecond)
assert.Equal(t, stateRenewing, p.c.state, "DHCP client not renewing")
offer.UpdateOption(dhcpv4.OptGeneric(TestOption(1), []byte{0x12}))
p.umt.sendPackets(offer)
@@ -384,8 +388,9 @@
assert.Equal(t, dhcpv4.MessageTypeRequest, p.umt.sentPacket.MessageType(), "Invalid message type for renewal")
}
-// TestRenewingRebinding tests if the DHCP state machine in renewing state correctly sends DHCPREQUESTs and transitions
-// to the rebinding state when it hasn't received a valid response until the deadline expires.
+// TestRenewingRebinding tests if the DHCP state machine in renewing state
+// correctly sends DHCPREQUESTs and transitions to the rebinding state when it
+// hasn't received a valid response until the deadline expires.
func TestRenewingRebinding(t *testing.T) {
offer := newResponse(dhcpv4.MessageTypeAck)
testIP := net.IP{192, 0, 2, 2}
@@ -431,8 +436,9 @@
assert.True(t, p.umt.closed)
}
-// TestRebindingBound tests if the DHCP state machine in rebinding state sends DHCPREQUESTs to the network and if
-// it receives a valid DHCPACK correctly transitions back to bound state.
+// TestRebindingBound tests if the DHCP state machine in rebinding state sends
+// DHCPREQUESTs to the network and if it receives a valid DHCPACK correctly
+// transitions back to bound state.
func TestRebindingBound(t *testing.T) {
offer := newResponse(dhcpv4.MessageTypeAck)
testIP := net.IP{192, 0, 2, 2}
@@ -471,8 +477,9 @@
assert.Equal(t, stateBound, p.c.state, "DHCP client didn't go back to bound")
}
-// TestRebindingBound tests if the DHCP state machine in rebinding state transitions to discovering state if
-// leaseDeadline expires and calls LeaseCallback with an empty new lease.
+// TestRebindingBound tests if the DHCP state machine in rebinding state
+// transitions to discovering state if leaseDeadline expires and calls
+// LeaseCallback with an empty new lease.
func TestRebindingDiscovering(t *testing.T) {
offer := newResponse(dhcpv4.MessageTypeAck)
testIP := net.IP{192, 0, 2, 2}
diff --git a/metropolis/node/core/network/dhcp4c/doc.go b/metropolis/node/core/network/dhcp4c/doc.go
index b270c7b..49aece5 100644
--- a/metropolis/node/core/network/dhcp4c/doc.go
+++ b/metropolis/node/core/network/dhcp4c/doc.go
@@ -14,40 +14,55 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// Package dhcp4c provides a client implementation of the DHCPv4 protocol (RFC2131) and a few extensions for Linux-based
-// systems.
+// Package dhcp4c provides a client implementation of the DHCPv4 protocol
+// (RFC2131) and a few extensions for Linux-based systems.
// The code is split into three main parts:
// - The core DHCP state machine, which lives in dhcpc.go
// - Mechanisms to send and receive DHCP messages, which live in transport/
-// - Standard callbacks which implement necessary kernel configuration steps in a simple and standalone way living in
-// callback/
+// - Standard callbacks which implement necessary kernel configuration steps in
+// a simple and standalone way living in callback/
//
-// Since the DHCP protocol is ugly and underspecified (see https://tools.ietf.org/html/draft-ietf-dhc-implementation-02
-// for a subset of known issues), this client slightly bends the specification in the following cases:
-// - IP fragmentation for DHCP messages is not supported for both sending and receiving messages
-// This is because the major servers (ISC, dnsmasq, ...) do not implement it and just drop fragmented packets, so it
-// would be counterproductive to try to send them. The client just attempts to send the full message and hopes it
-// passes through to the server.
-// - The suggested timeouts and wait periods have been tightened significantly. When the standard was written 10Mbps
-// Ethernet with hubs was a common interconnect. Using these would make the client extremely slow on today's
+// Since the DHCP protocol is ugly and underspecified (see
+// https://tools.ietf.org/html/draft-ietf-dhc-implementation-02 for a subset of
+// known issues), this client slightly bends the specification in the following
+// cases:
+// - IP fragmentation for DHCP messages is not supported for both sending and
+// receiving messages This is because the major servers (ISC, dnsmasq, ...)
+// do not implement it and just drop fragmented packets, so it would be
+// counterproductive to try to send them. The client just attempts to send
+// the full message and hopes it passes through to the server.
+// - The suggested timeouts and wait periods have been tightened significantly.
+// When the standard was written 10Mbps Ethernet with hubs was a common
+// interconnect. Using these would make the client extremely slow on today's
// 1Gbps+ networks.
-// - Wrong data in DHCP responses is fixed up if possible. This fixing includes dropping prohibited options, clamping
-// semantically invalid data and defaulting not set options as far as it's possible. Non-recoverable responses
-// (for example because a non-Unicast IP is handed out or lease time is not set or zero) are still ignored.
-// All data which can be stored in both DHCP fields and options is also normalized to the corresponding option.
-// - Duplicate Address Detection is not implemented by default. It's slow, hard to implement correctly and generally
-// not necessary on modern networks as the servers already waste time checking for duplicate addresses. It's possible
-// to hook it in via a LeaseCallback if necessary in a given application.
+// - Wrong data in DHCP responses is fixed up if possible. This fixing includes
+// dropping prohibited options, clamping semantically invalid data and
+// defaulting not set options as far as it's possible. Non-recoverable
+// responses (for example because a non-Unicast IP is handed out or lease
+// time is not set or zero) are still ignored. All data which can be stored
+// in both DHCP fields and options is also normalized to the corresponding
+// option.
+// - Duplicate Address Detection is not implemented by default. It's slow, hard
+// to implement correctly and generally not necessary on modern networks as
+// the servers already waste time checking for duplicate addresses. It's
+// possible to hook it in via a LeaseCallback if necessary in a given
+// application.
//
-// Operationally, there's one known caveat to using this client: If the lease offered during the select phase (in a
-// DHCPOFFER) is not the same as the one sent in the following DHCPACK the first one might be acceptable, but the second
-// one might not be. This can cause pathological behavior where the client constantly switches between discovering and
-// requesting states. Depending on the reuse policies on the DHCP server this can cause the client to consume all
-// available IP addresses. Sadly there's no good way of fixing this within the boundaries of the protocol. A DHCPRELEASE
-// for the adresse would need to be unicasted so the unaccepable address would need to be configured which can be either
-// impossible if it's not valid or not acceptable from a security standpoint (for example because it overlaps with a
-// prefix used internally) and a DHCPDECLINE would cause the server to blacklist the IP thus also depleting the IP pool.
-// This could be potentially avoided by originating DHCPRELEASE packages from a userspace transport, but said transport
-// would need to be routing- and PMTU-aware which would make it even more complicated than the existing
+// Operationally, there's one known caveat to using this client: If the lease
+// offered during the select phase (in a DHCPOFFER) is not the same as the one
+// sent in the following DHCPACK the first one might be acceptable, but the
+// second one might not be. This can cause pathological behavior where the
+// client constantly switches between discovering and requesting states.
+// Depending on the reuse policies on the DHCP server this can cause the client
+// to consume all available IP addresses. Sadly there's no good way of fixing
+// this within the boundaries of the protocol. A DHCPRELEASE for the adresse
+// would need to be unicasted so the unaccepable address would need to be
+// configured which can be either impossible if it's not valid or not
+// acceptable from a security standpoint (for example because it overlaps with
+// a prefix used internally) and a DHCPDECLINE would cause the server to
+// blacklist the IP thus also depleting the IP pool.
+// This could be potentially avoided by originating DHCPRELEASE packages from a
+// userspace transport, but said transport would need to be routing- and
+// PMTU-aware which would make it even more complicated than the existing
// BroadcastTransport.
package dhcp4c
diff --git a/metropolis/node/core/network/dhcp4c/lease.go b/metropolis/node/core/network/dhcp4c/lease.go
index c56270c..63c51be 100644
--- a/metropolis/node/core/network/dhcp4c/lease.go
+++ b/metropolis/node/core/network/dhcp4c/lease.go
@@ -24,23 +24,27 @@
"github.com/insomniacslk/dhcp/dhcpv4"
)
-// Lease represents a DHCPv4 lease. It only consists of an IP, an expiration timestamp and options as all other
-// relevant parts of the message have been normalized into their respective options. It also contains some smart
-// getters for commonly-used options which extract only valid information from options.
+// Lease represents a DHCPv4 lease. It only consists of an IP, an expiration
+// timestamp and options as all other relevant parts of the message have been
+// normalized into their respective options. It also contains some smart
+// getters for commonly-used options which extract only valid information from
+// options.
type Lease struct {
AssignedIP net.IP
ExpiresAt time.Time
Options dhcpv4.Options
}
-// SubnetMask returns the SubnetMask option or the default mask if not set or invalid.
+// SubnetMask returns the SubnetMask option or the default mask if not set or
+// invalid.
// It returns nil if the lease is nil.
func (l *Lease) SubnetMask() net.IPMask {
if l == nil {
return nil
}
mask := net.IPMask(dhcpv4.GetIP(dhcpv4.OptionSubnetMask, l.Options))
- if _, bits := mask.Size(); bits != 32 { // If given mask is not valid, use the default mask
+ // If given mask is not valid, use the default mask.
+ if _, bits := mask.Size(); bits != 32 {
mask = l.AssignedIP.DefaultMask()
}
return mask
@@ -58,7 +62,8 @@
}
}
-// Router returns the first valid router from the DHCP router option or nil if none such exists.
+// Router returns the first valid router from the DHCP router option or nil if
+// none such exists.
// It returns nil if the lease is nil.
func (l *Lease) Router() net.IP {
if l == nil {
@@ -101,7 +106,8 @@
return binary.BigEndian.Uint32(ip4)
}
-// DNSServers returns all unique valid DNS servers from the DHCP DomainNameServers options.
+// DNSServers returns all unique valid DNS servers from the DHCP
+// DomainNameServers options.
// It returns nil if the lease is nil.
func (l *Lease) DNSServers() DNSServers {
if l == nil {
diff --git a/metropolis/node/core/network/dhcp4c/transport/transport.go b/metropolis/node/core/network/dhcp4c/transport/transport.go
index 8f5f791..32b2bd7 100644
--- a/metropolis/node/core/network/dhcp4c/transport/transport.go
+++ b/metropolis/node/core/network/dhcp4c/transport/transport.go
@@ -14,8 +14,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// Package transport contains Linux-based transports for the DHCP broadcast and unicast
-// specifications.
+// Package transport contains Linux-based transports for the DHCP broadcast and
+// unicast specifications.
package transport
import (
diff --git a/metropolis/node/core/network/dhcp4c/transport/transport_broadcast.go b/metropolis/node/core/network/dhcp4c/transport/transport_broadcast.go
index 79fad7d..6d051ee 100644
--- a/metropolis/node/core/network/dhcp4c/transport/transport_broadcast.go
+++ b/metropolis/node/core/network/dhcp4c/transport/transport_broadcast.go
@@ -31,7 +31,8 @@
)
const (
- // RFC2474 Section 4.2.2.1 with reference to RFC791 Section 3.1 (Network Control Precedence)
+ // RFC2474 Section 4.2.2.1 with reference to RFC791 Section 3.1 (Network
+ // Control Precedence)
dscpCS7 = 0x7 << 3
// IPv4 MTU
@@ -49,13 +50,13 @@
// BPF filter for UDP in IPv4 with destination port 68 (DHCP Client)
//
-// This is used to make the kernel drop non-DHCP traffic for us so that we don't have to handle
-// excessive unrelated traffic flowing on a given link which might overwhelm the single-threaded
-// receiver.
+// This is used to make the kernel drop non-DHCP traffic for us so that we
+// don't have to handle excessive unrelated traffic flowing on a given link
+// which might overwhelm the single-threaded receiver.
var bpfFilterInstructions = []bpf.Instruction{
// Check IP protocol version equals 4 (first 4 bits of the first byte)
- // With Ethernet II framing, this is more of a sanity check. We already request the kernel
- // to only return EtherType 0x0800 (IPv4) frames.
+ // With Ethernet II framing, this is more of a sanity check. We already
+ // request the kernel to only return EtherType 0x0800 (IPv4) frames.
bpf.LoadAbsolute{Off: 0, Size: 1},
bpf.ALUOpConstant{Op: bpf.ALUOpAnd, Val: 0xf0}, // SubnetMask second 4 bits
bpf.JumpIf{Cond: bpf.JumpEqual, Val: 4 << 4, SkipTrue: 1},
@@ -85,8 +86,9 @@
var bpfFilter = mustAssemble(bpfFilterInstructions)
-// BroadcastTransport implements a DHCP transport based on a custom IP/UDP stack fulfilling the
-// specific requirements for broadcasting DHCP packets (like all-zero source address, no ARP, ...)
+// BroadcastTransport implements a DHCP transport based on a custom IP/UDP
+// stack fulfilling the specific requirements for broadcasting DHCP packets
+// (like all-zero source address, no ARP, ...)
type BroadcastTransport struct {
rawConn *raw.Conn
iface *net.Interface
@@ -122,13 +124,17 @@
}
ipLayer := &layers.IPv4{
- Version: 4,
- TOS: dscpCS7 << 2, // Shift left of ECN field
- TTL: 1, // These packets should never be routed (their IP headers contain garbage)
+ Version: 4,
+ // Shift left of ECN field
+ TOS: dscpCS7 << 2,
+ // These packets should never be routed (their IP headers contain
+ // garbage)
+ TTL: 1,
Protocol: layers.IPProtocolUDP,
- Flags: layers.IPv4DontFragment, // Most DHCP servers don't support fragmented packets
- DstIP: net.IPv4bcast,
- SrcIP: net.IPv4zero,
+ // Most DHCP servers don't support fragmented packets.
+ Flags: layers.IPv4DontFragment,
+ DstIP: net.IPv4bcast,
+ SrcIP: net.IPv4zero,
}
udpLayer := &layers.UDP{
DstPort: 67,
diff --git a/metropolis/node/core/network/dhcp4c/transport/transport_unicast.go b/metropolis/node/core/network/dhcp4c/transport/transport_unicast.go
index bf2b3a4..8093e70 100644
--- a/metropolis/node/core/network/dhcp4c/transport/transport_unicast.go
+++ b/metropolis/node/core/network/dhcp4c/transport/transport_unicast.go
@@ -29,8 +29,8 @@
"golang.org/x/sys/unix"
)
-// UnicastTransport implements a DHCP transport based on a normal Linux UDP socket with some custom
-// socket options to influence DSCP and routing.
+// UnicastTransport implements a DHCP transport based on a normal Linux UDP
+// socket with some custom socket options to influence DSCP and routing.
type UnicastTransport struct {
udpConn *net.UDPConn
targetIP net.IP
diff --git a/metropolis/node/core/network/dns/coredns.go b/metropolis/node/core/network/dns/coredns.go
index 4ec0869..b57c080 100644
--- a/metropolis/node/core/network/dns/coredns.go
+++ b/metropolis/node/core/network/dns/coredns.go
@@ -51,9 +51,10 @@
}
// New creates a new CoreDNS service.
-// The given channel can then be used to dynamically register and unregister directives in the configuaration.
-// To register a new directive, send an ExtraDirective on the channel. To remove it again, use CancelDirective()
-// to create a removal message.
+// The given channel can then be used to dynamically register and unregister
+// directives in the configuaration.
+// To register a new directive, send an ExtraDirective on the channel. To
+// remove it again, use CancelDirective() to create a removal message.
func New(directiveRegistration chan *ExtraDirective) *Service {
return &Service{
directives: map[string]ExtraDirective{},
@@ -83,7 +84,8 @@
}
}
-// Run runs the DNS service consisting of the CoreDNS process and the directive registration process
+// Run runs the DNS service consisting of the CoreDNS process and the directive
+// registration process
func (s *Service) Run(ctx context.Context) error {
supervisor.Run(ctx, "coredns", s.runCoreDNS)
supervisor.Run(ctx, "registration", s.runRegistration)
@@ -116,8 +118,9 @@
return supervisor.RunCommand(ctx, s.cmd)
}
-// runRegistration runs the background registration runnable which has a different lifecycle from the CoreDNS
-// runnable. It is responsible for managing dynamic directives.
+// runRegistration runs the background registration runnable which has a
+// different lifecycle from the CoreDNS runnable. It is responsible for
+// managing dynamic directives.
func (s *Service) runRegistration(ctx context.Context) error {
supervisor.Signal(ctx, supervisor.SignalHealthy)
for {
@@ -138,7 +141,8 @@
} else {
s.directives[d.ID] = *d
}
- // If the process is not currenty running we're relying on corefile regeneration on startup
+ // If the process is not currenty running we're relying on corefile
+ // regeneration on startup
if s.cmd != nil && s.cmd.Process != nil && s.cmd.ProcessState == nil {
s.args.ArgPath("Corefile", s.makeCorefile(s.args))
if err := s.cmd.Process.Signal(syscall.SIGUSR1); err != nil {
diff --git a/metropolis/node/core/network/dns/directives.go b/metropolis/node/core/network/dns/directives.go
index 72c4f29..ff25ae8 100644
--- a/metropolis/node/core/network/dns/directives.go
+++ b/metropolis/node/core/network/dns/directives.go
@@ -24,18 +24,21 @@
// Type ExtraDirective contains additional config directives for CoreDNS.
type ExtraDirective struct {
- // ID is the identifier of this directive. There can only be one directive with a given ID active at once.
- // The ID is also used to identify which directive to purge.
+ // ID is the identifier of this directive. There can only be one directive
+ // with a given ID active at once. The ID is also used to identify which
+ // directive to purge.
ID string
- // directive contains a full CoreDNS directive as a string. It can also use the $FILE(<filename>) macro,
- // which will be expanded to the path of a file from the files field.
+ // directive contains a full CoreDNS directive as a string. It can also use
+ // the $FILE(<filename>) macro, which will be expanded to the path of a
+ // file from the files field.
directive string
- // files contains additional files used in the configuration. The map key is used as the filename.
+ // files contains additional files used in the configuration. The map key
+ // is used as the filename.
files map[string][]byte
}
-// NewUpstreamDirective creates a forward with no fallthrough that forwards all requests not yet matched to the given
-// upstream DNS servers.
+// NewUpstreamDirective creates a forward with no fallthrough that forwards all
+// requests not yet matched to the given upstream DNS servers.
func NewUpstreamDirective(dnsServers []net.IP) *ExtraDirective {
strb := strings.Builder{}
if len(dnsServers) > 0 {
@@ -59,9 +62,11 @@
}
`
-// NewKubernetesDirective creates a directive running a "Kubernetes DNS-Based Service Discovery" [1] compliant service
-// under clusterDomain. The given kubeconfig needs at least read access to services, endpoints and endpointslices.
-// [1] https://github.com/kubernetes/dns/blob/master/docs/specification.md
+// NewKubernetesDirective creates a directive running a "Kubernetes DNS-Based
+// Service Discovery" compliant service under clusterDomain. The given
+// kubeconfig needs at least read access to services, endpoints and
+// endpointslices.
+// [1] https://github.com/kubernetes/dns/blob/master/docs/specification.md
func NewKubernetesDirective(clusterDomain string, kubeconfig []byte) *ExtraDirective {
return &ExtraDirective{
ID: "k8s-clusterdns",
diff --git a/metropolis/node/core/network/main.go b/metropolis/node/core/network/main.go
index 49ace2e..6940da6 100644
--- a/metropolis/node/core/network/main.go
+++ b/metropolis/node/core/network/main.go
@@ -49,7 +49,8 @@
// dhcp client for the 'main' interface of the node.
dhcp *dhcp4c.Client
- // nftConn is a shared file descriptor handle to nftables, automatically initialized on first use.
+ // nftConn is a shared file descriptor handle to nftables, automatically
+ // initialized on first use.
nftConn nftables.Conn
natTable *nftables.Table
natPostroutingChain *nftables.Chain
@@ -75,7 +76,8 @@
DNSServers dhcp4c.DNSServers
}
-// Watcher allows network Service consumers to watch for updates of the current Status.
+// Watcher allows network Service consumers to watch for updates of the current
+// Status.
type Watcher struct {
watcher event.Watcher
}
@@ -112,7 +114,8 @@
s.dnsReg <- d
}
-// nfifname converts an interface name into 16 bytes padded with zeroes (for nftables)
+// nfifname converts an interface name into 16 bytes padded with zeroes (for
+// nftables)
func nfifname(n string) []byte {
b := make([]byte, 16)
copy(b, []byte(n+"\x00"))
@@ -180,7 +183,8 @@
// sysctlOptions contains sysctl options to apply
type sysctlOptions map[string]string
-// apply attempts to apply all options in sysctlOptions. It aborts on the first one which returns an error when applying.
+// apply attempts to apply all options in sysctlOptions. It aborts on the first
+// one which returns an error when applying.
func (o sysctlOptions) apply() error {
for name, value := range o {
filePath := path.Join("/proc/sys/", strings.ReplaceAll(name, ".", "/"))
@@ -221,12 +225,14 @@
sysctlOpts := sysctlOptions{
// Enable IP forwarding for our pods
"net.ipv4.ip_forward": "1",
- // Enable strict reverse path filtering on all interfaces (important for spoofing prevention from Pods with CAP_NET_ADMIN)
+ // Enable strict reverse path filtering on all interfaces (important
+ // for spoofing prevention from Pods with CAP_NET_ADMIN)
"net.ipv4.conf.all.rp_filter": "1",
// Disable source routing
"net.ipv4.conf.all.accept_source_route": "0",
- // Increase Linux socket kernel buffer sizes to 16MiB (needed for fast datacenter networks)
+ // Increase Linux socket kernel buffer sizes to 16MiB (needed for fast
+ // datacenter networks)
"net.core.rmem_max": "16777216",
"net.core.wmem_max": "16777216",
"net.ipv4.tcp_rmem": "4096 87380 16777216",
diff --git a/metropolis/node/kubernetes/clusternet/clusternet.go b/metropolis/node/kubernetes/clusternet/clusternet.go
index 74fe1ba..85a78a1 100644
--- a/metropolis/node/kubernetes/clusternet/clusternet.go
+++ b/metropolis/node/kubernetes/clusternet/clusternet.go
@@ -14,15 +14,21 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// Package clusternet implements a WireGuard-based overlay network for Kubernetes. It relies on controller-manager's
-// IPAM to assign IP ranges to nodes and on Kubernetes' Node objects to distribute the Node IPs and public keys.
+// Package clusternet implements a WireGuard-based overlay network for
+// Kubernetes. It relies on controller-manager's IPAM to assign IP ranges to
+// nodes and on Kubernetes' Node objects to distribute the Node IPs and public
+// keys.
//
-// It sets up a single WireGuard network interface and routes the entire ClusterCIDR into that network interface,
-// relying on WireGuard's AllowedIPs mechanism to look up the correct peer node to send the traffic to. This means
-// that the routing table doesn't change and doesn't have to be separately managed. When clusternet is started
-// it annotates its WireGuard public key onto its node object.
-// For each node object that's created or updated on the K8s apiserver it checks if a public key annotation is set and
-// if yes a peer with that public key, its InternalIP as endpoint and the CIDR for that node as AllowedIPs is created.
+// It sets up a single WireGuard network interface and routes the entire
+// ClusterCIDR into that network interface, relying on WireGuard's AllowedIPs
+// mechanism to look up the correct peer node to send the traffic to. This
+// means that the routing table doesn't change and doesn't have to be
+// separately managed. When clusternet is started it annotates its WireGuard
+// public key onto its node object.
+// For each node object that's created or updated on the K8s apiserver it
+// checks if a public key annotation is set and if yes a peer with that public
+// key, its InternalIP as endpoint and the CIDR for that node as AllowedIPs is
+// created.
package clusternet
import (
@@ -45,8 +51,8 @@
common "source.monogon.dev/metropolis/node"
"source.monogon.dev/metropolis/node/core/localstorage"
- "source.monogon.dev/metropolis/pkg/logtree"
"source.monogon.dev/metropolis/pkg/jsonpatch"
+ "source.monogon.dev/metropolis/pkg/logtree"
"source.monogon.dev/metropolis/pkg/supervisor"
)
@@ -67,7 +73,8 @@
logger logtree.LeveledLogger
}
-// ensureNode creates/updates the corresponding WireGuard peer entry for the given node objet
+// ensureNode creates/updates the corresponding WireGuard peer entry for the
+// given node objet
func (s *Service) ensureNode(newNode *corev1.Node) error {
if newNode.Name == s.NodeName {
// Node doesn't need to connect to itself
@@ -108,8 +115,8 @@
}
allowedIPs = append(allowedIPs, net.IPNet{IP: internalIP, Mask: net.CIDRMask(32, 32)})
s.logger.V(1).Infof("Adding/Updating WireGuard peer node %s, endpoint %s, allowedIPs %+v", newNode.Name, internalIP.String(), allowedIPs)
- // WireGuard's kernel side has create/update semantics on peers by default. So we can just add the peer multiple
- // times to update it.
+ // WireGuard's kernel side has create/update semantics on peers by default.
+ // So we can just add the peer multiple times to update it.
err = s.wgClient.ConfigureDevice(clusterNetDeviceName, wgtypes.Config{
Peers: []wgtypes.PeerConfig{{
PublicKey: pubKey,
@@ -124,7 +131,8 @@
return nil
}
-// removeNode removes the corresponding WireGuard peer entry for the given node object
+// removeNode removes the corresponding WireGuard peer entry for the given node
+// object
func (s *Service) removeNode(oldNode *corev1.Node) error {
if oldNode.Name == s.NodeName {
// Node doesn't need to connect to itself
@@ -150,7 +158,8 @@
return nil
}
-// ensureOnDiskKey loads the private key from disk or (if none exists) generates one and persists it.
+// ensureOnDiskKey loads the private key from disk or (if none exists)
+// generates one and persists it.
func (s *Service) ensureOnDiskKey() error {
keyRaw, err := s.DataDirectory.Key.Read()
if os.IsNotExist(err) {
@@ -176,7 +185,8 @@
return nil
}
-// annotateThisNode annotates the node (as defined by NodeName) with the wireguard public key of this node.
+// annotateThisNode annotates the node (as defined by NodeName) with the
+// wireguard public key of this node.
func (s *Service) annotateThisNode(ctx context.Context) error {
patch := []jsonpatch.JsonPatchOp{{
Operation: "add",
diff --git a/metropolis/node/kubernetes/containerd/main.go b/metropolis/node/kubernetes/containerd/main.go
index 6b99081..c3dd4a0 100644
--- a/metropolis/node/kubernetes/containerd/main.go
+++ b/metropolis/node/kubernetes/containerd/main.go
@@ -76,10 +76,12 @@
n, err := io.Copy(supervisor.RawLogger(ctx), fifo)
if n == 0 && err == nil {
- // Hack because pipes/FIFOs can return zero reads when nobody is writing. To avoid busy-looping,
- // sleep a bit before retrying. This does not loose data since the FIFO internal buffer will
- // stall writes when it becomes full. 10ms maximum stall in a non-latency critical process (reading
- // debug logs) is not an issue for us.
+ // Hack because pipes/FIFOs can return zero reads when nobody
+ // is writing. To avoid busy-looping, sleep a bit before
+ // retrying. This does not loose data since the FIFO internal
+ // buffer will stall writes when it becomes full. 10ms maximum
+ // stall in a non-latency critical process (reading debug logs)
+ // is not an issue for us.
time.Sleep(10 * time.Millisecond)
} else if err != nil {
return fmt.Errorf("log pump failed: %v", err)
@@ -88,14 +90,18 @@
}
}
-// runPreseed loads OCI bundles in tar form from preseedNamespacesDir into containerd at startup.
-// This can be run multiple times, containerd will automatically dedup the layers.
-// containerd uses namespaces to keep images (and everything else) separate so to define where the images will be loaded
-// to they need to be in a folder named after the namespace they should be loaded into.
-// containerd's CRI plugin (which is built as part of containerd) uses a hardcoded namespace ("k8s.io") for everything
-// accessed through CRI, so if an image should be available on K8s it needs to be in that namespace.
-// As an example if image helloworld should be loaded for use with Kubernetes, the OCI bundle needs to be at
-// <preseedNamespacesDir>/k8s.io/helloworld.tar. No tagging beyond what's in the bundle is performed.
+// runPreseed loads OCI bundles in tar form from preseedNamespacesDir into
+// containerd at startup.
+// This can be run multiple times, containerd will automatically dedup the
+// layers. containerd uses namespaces to keep images (and everything else)
+// separate so to define where the images will be loaded to they need to be in
+// a folder named after the namespace they should be loaded into. containerd's
+// CRI plugin (which is built as part of containerd) uses a hardcoded namespace
+// ("k8s.io") for everything accessed through CRI, so if an image should be
+// available on K8s it needs to be in that namespace.
+// As an example if image helloworld should be loaded for use with Kubernetes,
+// the OCI bundle needs to be at <preseedNamespacesDir>/k8s.io/helloworld.tar.
+// No tagging beyond what's in the bundle is performed.
func (s *Service) runPreseed(ctx context.Context) error {
client, err := ctr.New(s.EphemeralVolume.ClientSocket.FullPath())
if err != nil {
@@ -126,8 +132,9 @@
if err != nil {
return fmt.Errorf("failed to open preseed image \"%v\": %w", image.Name(), err)
}
- // defer in this loop is fine since we're never going to preseed more than ~1M images which is where our
- // file descriptor limit is.
+ // defer in this loop is fine since we're never going to preseed
+ // more than ~1M images which is where our file descriptor limit
+ // is.
defer imageFile.Close()
importedImages, err := client.Import(ctxWithNS, imageFile)
if err != nil {
diff --git a/metropolis/node/kubernetes/csi.go b/metropolis/node/kubernetes/csi.go
index efd8af4..4893254 100644
--- a/metropolis/node/kubernetes/csi.go
+++ b/metropolis/node/kubernetes/csi.go
@@ -39,9 +39,10 @@
"source.monogon.dev/metropolis/pkg/supervisor"
)
-// Derived from K8s spec for acceptable names, but shortened to 130 characters to avoid issues with
-// maximum path length. We don't provision longer names so this applies only if you manually create
-// a volume with a name of more than 130 characters.
+// Derived from K8s spec for acceptable names, but shortened to 130 characters
+// to avoid issues with maximum path length. We don't provision longer names so
+// this applies only if you manually create a volume with a name of more than
+// 130 characters.
var acceptableNames = regexp.MustCompile("^[a-z][a-z0-9-.]{0,128}[a-z0-9]$")
type csiPluginServer struct {
@@ -64,8 +65,8 @@
pluginServer := grpc.NewServer()
csi.RegisterIdentityServer(pluginServer, s)
csi.RegisterNodeServer(pluginServer, s)
- // Enable graceful shutdown since we don't have long-running RPCs and most of them shouldn't and can't be
- // cancelled anyways.
+ // Enable graceful shutdown since we don't have long-running RPCs and most
+ // of them shouldn't and can't be cancelled anyways.
if err := supervisor.Run(ctx, "csi-node", supervisor.GRPCServer(pluginServer, pluginListener, true)); err != nil {
return err
}
diff --git a/metropolis/node/kubernetes/hyperkube/main.go b/metropolis/node/kubernetes/hyperkube/main.go
index 3b4ac08..10c7a2d 100644
--- a/metropolis/node/kubernetes/hyperkube/main.go
+++ b/metropolis/node/kubernetes/hyperkube/main.go
@@ -56,9 +56,9 @@
hyperkubeCommand, allCommandFns := NewHyperKubeCommand()
- // TODO: once we switch everything over to Cobra commands, we can go back to calling
- // cliflag.InitFlags() (by removing its pflag.Parse() call). For now, we have to set the
- // normalize func and add the go flag set by hand.
+ // TODO: once we switch everything over to Cobra commands, we can go back
+ // to calling cliflag.InitFlags() (by removing its pflag.Parse() call). For
+ // now, we have to set the normalize func and add the go flag set by hand.
pflag.CommandLine.SetNormalizeFunc(cliflag.WordSepNormalizeFunc)
pflag.CommandLine.AddGoFlagSet(goflag.CommandLine)
// cliflag.InitFlags()
@@ -89,8 +89,8 @@
// NewHyperKubeCommand is the entry point for hyperkube
func NewHyperKubeCommand() (*cobra.Command, []func() *cobra.Command) {
- // these have to be functions since the command is polymorphic. Cobra wants you to be top level
- // command to get executed
+ // these have to be functions since the command is polymorphic. Cobra wants
+ // you to be top level command to get executed
apiserver := func() *cobra.Command { return kubeapiserver.NewAPIServerCommand() }
controller := func() *cobra.Command { return kubecontrollermanager.NewControllerManagerCommand() }
scheduler := func() *cobra.Command { return kubescheduler.NewSchedulerCommand() }
diff --git a/metropolis/node/kubernetes/kubelet.go b/metropolis/node/kubernetes/kubelet.go
index 953a201..d966e5d 100644
--- a/metropolis/node/kubernetes/kubelet.go
+++ b/metropolis/node/kubernetes/kubelet.go
@@ -102,8 +102,8 @@
"memory": "300Mi",
},
- // We're not going to use this, but let's make it point to a known-empty directory in case anybody manages to
- // trigger it.
+ // We're not going to use this, but let's make it point to a
+ // known-empty directory in case anybody manages to trigger it.
VolumePluginDir: s.EphemeralDirectory.FlexvolumePlugins.FullPath(),
}
}
diff --git a/metropolis/node/kubernetes/nfproxy/nfproxy.go b/metropolis/node/kubernetes/nfproxy/nfproxy.go
index ac13af1..5fcc5b5 100644
--- a/metropolis/node/kubernetes/nfproxy/nfproxy.go
+++ b/metropolis/node/kubernetes/nfproxy/nfproxy.go
@@ -14,8 +14,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// Package nfproxy is a Kubernetes Service IP proxy based exclusively on the Linux nftables interface.
-// It uses netfilter's NAT capabilities to accept traffic on service IPs and DNAT it to the respective endpoint.
+// Package nfproxy is a Kubernetes Service IP proxy based exclusively on the
+// Linux nftables interface. It uses netfilter's NAT capabilities to accept
+// traffic on service IPs and DNAT it to the respective endpoint.
package nfproxy
import (
@@ -42,7 +43,8 @@
)
type Service struct {
- // Traffic in ClusterCIDR is assumed to be originated inside the cluster and will not be SNATed
+ // Traffic in ClusterCIDR is assumed to be originated inside the cluster
+ // and will not be SNATed
ClusterCIDR net.IPNet
// A Kubernetes ClientSet with read access to endpoints and services
ClientSet kubernetes.Interface
diff --git a/metropolis/node/kubernetes/pki/kubernetes.go b/metropolis/node/kubernetes/pki/kubernetes.go
index 467f718..0e59306 100644
--- a/metropolis/node/kubernetes/pki/kubernetes.go
+++ b/metropolis/node/kubernetes/pki/kubernetes.go
@@ -56,9 +56,11 @@
// APIServer client certificate used to authenticate to kubelets.
APIServerKubeletClient KubeCertificateName = "apiserver-kubelet-client"
- // Kubernetes Controller manager client certificate, used to authenticate to the apiserver.
+ // Kubernetes Controller manager client certificate, used to authenticate
+ // to the apiserver.
ControllerManagerClient KubeCertificateName = "controller-manager-client"
- // Kubernetes Controller manager server certificate, used to run its HTTP server.
+ // Kubernetes Controller manager server certificate, used to run its HTTP
+ // server.
ControllerManager KubeCertificateName = "controller-manager"
// Kubernetes Scheduler client certificate, used to authenticate to the apiserver.
@@ -66,12 +68,12 @@
// Kubernetes scheduler server certificate, used to run its HTTP server.
Scheduler KubeCertificateName = "scheduler"
- // Root-on-kube (system:masters) client certificate. Used to control the apiserver (and resources) by Metropolis
- // internally.
+ // Root-on-kube (system:masters) client certificate. Used to control the
+ // apiserver (and resources) by Metropolis internally.
Master KubeCertificateName = "master"
// OpenAPI Kubernetes Aggregation CA.
- // See: https://kubernetes.io/docs/tasks/extend-kubernetes/configure-aggregation-layer/#ca-reusage-and-conflicts
+ // https://kubernetes.io/docs/tasks/extend-kubernetes/configure-aggregation-layer/#ca-reusage-and-conflicts
AggregationCA KubeCertificateName = "aggregation-ca"
FrontProxyClient KubeCertificateName = "front-proxy-client"
)
@@ -79,8 +81,9 @@
const (
// etcdPrefix is where all the PKI data is stored in etcd.
etcdPrefix = "/kube-pki/"
- // serviceAccountKeyName is the etcd path part that is used to store the ServiceAccount authentication secret.
- // This is not a certificate, just an RSA key.
+ // serviceAccountKeyName is the etcd path part that is used to store the
+ // ServiceAccount authentication secret. This is not a certificate, just an
+ // RSA key.
serviceAccountKeyName = "service-account-privkey"
)
@@ -116,7 +119,8 @@
"kubernetes.default.svc.cluster.local",
"localhost",
},
- []net.IP{{10, 0, 255, 1}, {127, 0, 0, 1}}, // TODO(q3k): add service network internal apiserver address
+ // TODO(q3k): add service network internal apiserver address
+ []net.IP{{10, 0, 255, 1}, {127, 0, 0, 1}},
))
make(IdCA, APIServerKubeletClient, opki.Client("metropolis:apiserver-kubelet-client", nil))
make(IdCA, ControllerManagerClient, opki.Client("system:kube-controller-manager", nil))
@@ -131,7 +135,8 @@
return &pki
}
-// EnsureAll ensures that all static certificates (and the serviceaccount key) are present on etcd.
+// EnsureAll ensures that all static certificates (and the serviceaccount key)
+// are present on etcd.
func (k *PKI) EnsureAll(ctx context.Context) error {
for n, v := range k.Certificates {
k.logger.Infof("Ensuring %s exists", string(n))
@@ -147,8 +152,8 @@
return nil
}
-// Kubeconfig generates a kubeconfig blob for a given certificate name. The same lifetime semantics as in .Certificate
-// apply.
+// Kubeconfig generates a kubeconfig blob for a given certificate name. The
+// same lifetime semantics as in .Certificate apply.
func (k *PKI) Kubeconfig(ctx context.Context, name KubeCertificateName) ([]byte, error) {
c, ok := k.Certificates[name]
if !ok {
@@ -157,9 +162,11 @@
return Kubeconfig(ctx, k.KV, c)
}
-// Certificate retrieves an x509 DER-encoded (but not PEM-wrapped) key and certificate for a given certificate name.
-// If the requested certificate is volatile, it will be created on demand. Otherwise it will be created on etcd (if not
-// present), and retrieved from there.
+// Certificate retrieves an x509 DER-encoded (but not PEM-wrapped) key and
+// certificate for a given certificate name.
+// If the requested certificate is volatile, it will be created on demand.
+// Otherwise it will be created on etcd (if not present), and retrieved from
+// there.
func (k *PKI) Certificate(ctx context.Context, name KubeCertificateName) (cert, key []byte, err error) {
c, ok := k.Certificates[name]
if !ok {
@@ -168,7 +175,8 @@
return c.Ensure(ctx, k.KV)
}
-// Kubeconfig generates a kubeconfig blob for this certificate. The same lifetime semantics as in .Ensure apply.
+// Kubeconfig generates a kubeconfig blob for this certificate. The same
+// lifetime semantics as in .Ensure apply.
func Kubeconfig(ctx context.Context, kv clientv3.KV, c *opki.Certificate) ([]byte, error) {
cert, key, err := c.Ensure(ctx, kv)
@@ -204,11 +212,12 @@
return clientcmd.Write(*kubeconfig)
}
-// ServiceAccountKey retrieves (and possibly generates and stores on etcd) the Kubernetes service account key. The
-// returned data is ready to be used by Kubernetes components (in PKIX form).
+// ServiceAccountKey retrieves (and possibly generates and stores on etcd) the
+// Kubernetes service account key. The returned data is ready to be used by
+// Kubernetes components (in PKIX form).
func (k *PKI) ServiceAccountKey(ctx context.Context) ([]byte, error) {
- // TODO(q3k): this should be abstracted away once we abstract away etcd access into a library with try-or-create
- // semantics.
+ // TODO(q3k): this should be abstracted away once we abstract away etcd
+ // access into a library with try-or-create semantics.
path := fmt.Sprintf("%s%s.der", etcdPrefix, serviceAccountKeyName)
// Try loading key from etcd.
diff --git a/metropolis/node/kubernetes/plugins/kvmdevice/kvmdevice.go b/metropolis/node/kubernetes/plugins/kvmdevice/kvmdevice.go
index a437973..ed47f74 100644
--- a/metropolis/node/kubernetes/plugins/kvmdevice/kvmdevice.go
+++ b/metropolis/node/kubernetes/plugins/kvmdevice/kvmdevice.go
@@ -14,10 +14,13 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// Package kvmdevice implements a Kubernetes device plugin for the virtual KVM device. Using the device plugin API
-// allows us to take advantage of the scheduler to locate pods on machines eligible for KVM and also allows granular
-// access control to KVM using quotas instead of needing privileged access.
-// Since KVM devices are virtual, this plugin emulates a huge number of them so that we never run out.
+// Package kvmdevice implements a Kubernetes device plugin for the virtual KVM
+// device. Using the device plugin API allows us to take advantage of the
+// scheduler to locate pods on machines eligible for KVM and also allows
+// granular access control to KVM using quotas instead of needing privileged
+// access.
+// Since KVM devices are virtual, this plugin emulates a huge number of them so
+// that we never run out.
package kvmdevice
import (
@@ -110,8 +113,9 @@
return &response, nil
}
-// deviceNumberFromString gets a Linux device number from a string containing two decimal numbers representing the major
-// and minor device numbers separated by a colon. Whitespace is ignored.
+// deviceNumberFromString gets a Linux device number from a string containing
+// two decimal numbers representing the major and minor device numbers
+// separated by a colon. Whitespace is ignored.
func deviceNumberFromString(s string) (uint64, error) {
kvmDevParts := strings.Split(s, ":")
if len(kvmDevParts) != 2 {
diff --git a/metropolis/node/kubernetes/provisioner.go b/metropolis/node/kubernetes/provisioner.go
index 0aa5c66..42edf77 100644
--- a/metropolis/node/kubernetes/provisioner.go
+++ b/metropolis/node/kubernetes/provisioner.go
@@ -46,13 +46,16 @@
"source.monogon.dev/metropolis/pkg/supervisor"
)
-// ONCHANGE(//metropolis/node/kubernetes/reconciler:resources_csi.go): needs to match csiProvisionerServerName declared.
+// ONCHANGE(//metropolis/node/kubernetes/reconciler:resources_csi.go): needs to
+// match csiProvisionerServerName declared.
const csiProvisionerServerName = "dev.monogon.metropolis.vfs"
-// csiProvisionerServer is responsible for the provisioning and deprovisioning of CSI-based container volumes. It runs on all
-// nodes and watches PVCs for ones assigned to the node it's running on and fulfills the provisioning request by
-// creating a directory, applying a quota and creating the corresponding PV. When the PV is released and its retention
-// policy is Delete, the directory and the PV resource are deleted.
+// csiProvisionerServer is responsible for the provisioning and deprovisioning
+// of CSI-based container volumes. It runs on all nodes and watches PVCs for
+// ones assigned to the node it's running on and fulfills the provisioning
+// request by creating a directory, applying a quota and creating the
+// corresponding PV. When the PV is released and its retention policy is
+// Delete, the directory and the PV resource are deleted.
type csiProvisionerServer struct {
NodeName string
Kubernetes kubernetes.Interface
@@ -68,13 +71,16 @@
logger logtree.LeveledLogger
}
-// runCSIProvisioner runs the main provisioning machinery. It consists of a bunch of informers which keep track of
-// the events happening on the Kubernetes control plane and informs us when something happens. If anything happens to
-// PVCs or PVs, we enqueue the identifier of that resource in a work queue. Queues are being worked on by only one
-// worker to limit load and avoid complicated locking infrastructure. Failed items are requeued.
+// runCSIProvisioner runs the main provisioning machinery. It consists of a
+// bunch of informers which keep track of the events happening on the
+// Kubernetes control plane and informs us when something happens. If anything
+// happens to PVCs or PVs, we enqueue the identifier of that resource in a work
+// queue. Queues are being worked on by only one worker to limit load and avoid
+// complicated locking infrastructure. Failed items are requeued.
func (p *csiProvisionerServer) Run(ctx context.Context) error {
- // The recorder is used to log Kubernetes events for successful or failed volume provisions. These events then
- // show up in `kubectl describe pvc` and can be used by admins to debug issues with this provisioner.
+ // The recorder is used to log Kubernetes events for successful or failed
+ // volume provisions. These events then show up in `kubectl describe pvc`
+ // and can be used by admins to debug issues with this provisioner.
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: p.Kubernetes.CoreV1().Events("")})
p.recorder = eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: csiProvisionerServerName, Host: p.NodeName})
@@ -119,7 +125,8 @@
return nil
}
-// isOurPVC checks if the given PVC is is to be provisioned by this provisioner and has been scheduled onto this node
+// isOurPVC checks if the given PVC is is to be provisioned by this provisioner
+// and has been scheduled onto this node
func (p *csiProvisionerServer) isOurPVC(pvc *v1.PersistentVolumeClaim) bool {
if pvc.ObjectMeta.Annotations["volume.beta.kubernetes.io/storage-provisioner"] != csiProvisionerServerName {
return false
@@ -130,7 +137,8 @@
return true
}
-// isOurPV checks if the given PV has been provisioned by this provisioner and has been scheduled onto this node
+// isOurPV checks if the given PV has been provisioned by this provisioner and
+// has been scheduled onto this node
func (p *csiProvisionerServer) isOurPV(pv *v1.PersistentVolume) bool {
if pv.ObjectMeta.Annotations["pv.kubernetes.io/provisioned-by"] != csiProvisionerServerName {
return false
@@ -161,8 +169,8 @@
p.pvQueue.Add(key)
}
-// processQueueItems gets items from the given work queue and calls the process function for each of them. It self-
-// terminates once the queue is shut down.
+// processQueueItems gets items from the given work queue and calls the process
+// function for each of them. It self- terminates once the queue is shut down.
func (p *csiProvisionerServer) processQueueItems(queue workqueue.RateLimitingInterface, process func(key string) error) {
for {
obj, shutdown := queue.Get()
@@ -194,8 +202,8 @@
return filepath.Join(p.VolumesDirectory.FullPath(), volumeID)
}
-// processPVC looks at a single PVC item from the queue, determines if it needs to be provisioned and logs the
-// provisioning result to the recorder
+// processPVC looks at a single PVC item from the queue, determines if it needs
+// to be provisioned and logs the provisioning result to the recorder
func (p *csiProvisionerServer) processPVC(key string) error {
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
@@ -223,8 +231,9 @@
}
if storageClass.Provisioner != csiProvisionerServerName {
- // We're not responsible for this PVC. Can only happen if controller-manager makes a mistake
- // setting the annotations, but we're bailing here anyways for safety.
+ // We're not responsible for this PVC. Can only happen if
+ // controller-manager makes a mistake setting the annotations, but
+ // we're bailing here anyways for safety.
return nil
}
@@ -239,8 +248,9 @@
return nil
}
-// provisionPVC creates the directory where the volume lives, sets a quota for the requested amount of storage and
-// creates the PV object representing this new volume
+// provisionPVC creates the directory where the volume lives, sets a quota for
+// the requested amount of storage and creates the PV object representing this
+// new volume
func (p *csiProvisionerServer) provisionPVC(pvc *v1.PersistentVolumeClaim, storageClass *storagev1.StorageClass) error {
claimRef, err := ref.GetReference(scheme.Scheme, pvc)
if err != nil {
@@ -335,8 +345,9 @@
return nil
}
-// processPV looks at a single PV item from the queue and checks if it has been released and needs to be deleted. If yes
-// it deletes the associated quota, directory and the PV object and logs the result to the recorder.
+// processPV looks at a single PV item from the queue and checks if it has been
+// released and needs to be deleted. If yes it deletes the associated quota,
+// directory and the PV object and logs the result to the recorder.
func (p *csiProvisionerServer) processPV(key string) error {
_, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
@@ -362,7 +373,8 @@
switch *pv.Spec.VolumeMode {
case "", v1.PersistentVolumeFilesystem:
if err := fsquota.SetQuota(volumePath, 0, 0); err != nil {
- // We record these here manually since a successful deletion removes the PV we'd be attaching them to
+ // We record these here manually since a successful deletion
+ // removes the PV we'd be attaching them to.
p.recorder.Eventf(pv, v1.EventTypeWarning, "DeprovisioningFailed", "Failed to remove quota: %v", err)
return fmt.Errorf("failed to remove quota: %w", err)
}
diff --git a/metropolis/node/kubernetes/reconciler/reconciler.go b/metropolis/node/kubernetes/reconciler/reconciler.go
index 51cc248..1828060 100644
--- a/metropolis/node/kubernetes/reconciler/reconciler.go
+++ b/metropolis/node/kubernetes/reconciler/reconciler.go
@@ -48,20 +48,25 @@
}
const (
- // BuiltinLabelKey is used as a k8s label to mark built-in objects (ie., managed by the reconciler)
+ // BuiltinLabelKey is used as a k8s label to mark built-in objects (ie.,
+ // managed by the reconciler)
BuiltinLabelKey = "metropolis.monogon.dev/builtin"
- // BuiltinLabelValue is used as a k8s label value, under the BuiltinLabelKey key.
+ // BuiltinLabelValue is used as a k8s label value, under the
+ // BuiltinLabelKey key.
BuiltinLabelValue = "true"
- // BuiltinRBACPrefix is used to prefix all built-in objects that are part of the rbac/v1 API (eg.
- // {Cluster,}Role{Binding,} objects). This corresponds to the colon-separated 'namespaces' notation used by
+ // BuiltinRBACPrefix is used to prefix all built-in objects that are part
+ // of the rbac/v1 API (eg. {Cluster,}Role{Binding,} objects). This
+ // corresponds to the colon-separated 'namespaces' notation used by
// Kubernetes system (system:) objects.
BuiltinRBACPrefix = "metropolis:"
)
-// builtinLabels makes a kubernetes-compatible label dictionary (key->value) that is used to mark objects that are
-// built-in into Metropolis (ie., managed by the reconciler). These are then subsequently retrieved by listBuiltins.
-// The extra argument specifies what other labels are to be merged into the the labels dictionary, for convenience. If
-// nil or empty, no extra labels will be applied.
+// builtinLabels makes a kubernetes-compatible label dictionary (key->value)
+// that is used to mark objects that are built-in into Metropolis (ie., managed
+// by the reconciler). These are then subsequently retrieved by listBuiltins.
+// The extra argument specifies what other labels are to be merged into the the
+// labels dictionary, for convenience. If nil or empty, no extra labels will be
+// applied.
func builtinLabels(extra map[string]string) map[string]string {
l := map[string]string{
BuiltinLabelKey: BuiltinLabelValue,
@@ -74,32 +79,39 @@
return l
}
-// listBuiltins returns a k8s client ListOptions structure that allows to retrieve all objects that are built-in into
-// Metropolis currently present in the API server (ie., ones that are to be managed by the reconciler). These are
-// created by applying builtinLabels to their metadata labels.
+// listBuiltins returns a k8s client ListOptions structure that allows to
+// retrieve all objects that are built-in into Metropolis currently present in
+// the API server (ie., ones that are to be managed by the reconciler). These
+// are created by applying builtinLabels to their metadata labels.
var listBuiltins = meta.ListOptions{
LabelSelector: fmt.Sprintf("%s=%s", BuiltinLabelKey, BuiltinLabelValue),
}
-// builtinRBACName returns a name that is compatible with colon-delimited 'namespaced' objects, a la system:*.
-// These names are to be used by all builtins created as part of the rbac/v1 Kubernetes API.
+// builtinRBACName returns a name that is compatible with colon-delimited
+// 'namespaced' objects, a la system:*.
+// These names are to be used by all builtins created as part of the rbac/v1
+// Kubernetes API.
func builtinRBACName(name string) string {
return BuiltinRBACPrefix + name
}
-// resource is a type of resource to be managed by the reconciler. All builti-ins/reconciled objects must implement
-// this interface to be managed correctly by the reconciler.
+// resource is a type of resource to be managed by the reconciler. All
+// builti-ins/reconciled objects must implement this interface to be managed
+// correctly by the reconciler.
type resource interface {
- // List returns a list of names of objects current present on the target (ie. k8s API server).
+ // List returns a list of names of objects current present on the target
+ // (ie. k8s API server).
List(ctx context.Context) ([]string, error)
- // Create creates an object on the target. The el interface{} argument is the black box object returned by the
- // Expected() call.
+ // Create creates an object on the target. The el interface{} argument is
+ // the black box object returned by the Expected() call.
Create(ctx context.Context, el interface{}) error
// Delete delete an object, by name, from the target.
Delete(ctx context.Context, name string) error
- // Expected returns a map of all objects expected to be present on the target. The keys are names (which must
- // correspond to the names returned by List() and used by Delete(), and the values are blackboxes that will then
- // be passed to the Create() call if their corresponding key (name) does not exist on the target.
+ // Expected returns a map of all objects expected to be present on the
+ // target. The keys are names (which must correspond to the names returned
+ // by List() and used by Delete(), and the values are blackboxes that will
+ // then be passed to the Create() call if their corresponding key (name)
+ // does not exist on the target.
Expected() map[string]interface{}
}
diff --git a/metropolis/node/kubernetes/reconciler/reconciler_test.go b/metropolis/node/kubernetes/reconciler/reconciler_test.go
index b58d4af..ba2f4e8 100644
--- a/metropolis/node/kubernetes/reconciler/reconciler_test.go
+++ b/metropolis/node/kubernetes/reconciler/reconciler_test.go
@@ -28,9 +28,10 @@
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
)
-// kubernetesMeta unwraps an interface{} that might contain a Kubernetes resource of type that is managed by the
-// reconciler. Any time a new Kubernetes type is managed by the reconciler, the following switch should be extended
-// to cover that type.
+// kubernetesMeta unwraps an interface{} that might contain a Kubernetes
+// resource of type that is managed by the reconciler. Any time a new
+// Kubernetes type is managed by the reconciler, the following switch should be
+// extended to cover that type.
func kubernetesMeta(v interface{}) *meta.ObjectMeta {
switch v2 := v.(type) {
case *rbac.ClusterRole:
@@ -49,9 +50,11 @@
return nil
}
-// TestExpectedNamedCorrectly ensures that all the Expected objects of all resource types have a correspondence between
-// their returned key and inner name. This contract must be met in order for the reconciler to not create runaway
-// resources. This assumes all managed resources are Kubernetes resources.
+// TestExpectedNamedCorrectly ensures that all the Expected objects of all
+// resource types have a correspondence between their returned key and inner
+// name. This contract must be met in order for the reconciler to not create
+// runaway resources. This assumes all managed resources are Kubernetes
+// resources.
func TestExpectedNamedCorrectly(t *testing.T) {
for reconciler, r := range allResources(nil) {
for outer, v := range r.Expected() {
@@ -68,10 +71,13 @@
}
}
-// TestExpectedLabeledCorrectly ensures that all the Expected objects of all resource types have a Kubernetes metadata
-// label that signifies it's a builtin object, to be retrieved afterwards. This contract must be met in order for the
-// reconciler to not keep overwriting objects (and possibly failing), when a newly created object is not then
-// retrievable using a selector corresponding to this label. This assumes all managed resources are Kubernetes objects.
+// TestExpectedLabeledCorrectly ensures that all the Expected objects of all
+// resource types have a Kubernetes metadata label that signifies it's a
+// builtin object, to be retrieved afterwards. This contract must be met in
+// order for the reconciler to not keep overwriting objects (and possibly
+// failing), when a newly created object is not then retrievable using a
+// selector corresponding to this label. This assumes all managed resources are
+// Kubernetes objects.
func TestExpectedLabeledCorrectly(t *testing.T) {
for reconciler, r := range allResources(nil) {
for outer, v := range r.Expected() {
@@ -88,8 +94,9 @@
}
}
-// testResource is a resource type used for testing. The inner type is a string that is equal to its name (key).
-// It simulates a target (ie. k8s apiserver mock) that always acts nominally (all resources are created, deleted as
+// testResource is a resource type used for testing. The inner type is a string
+// that is equal to its name (key). It simulates a target (ie. k8s apiserver
+// mock) that always acts nominally (all resources are created, deleted as
// requested, and the state is consistent with requests).
type testResource struct {
// current is the simulated state of resources in the target.
@@ -124,7 +131,8 @@
return exp
}
-// newTestResource creates a test resource with a list of expected resource strings.
+// newTestResource creates a test resource with a list of expected resource
+// strings.
func newTestResource(want ...string) *testResource {
expected := make(map[string]string)
for _, w := range want {
@@ -136,8 +144,9 @@
}
}
-// currentDiff returns a human-readable string showing the different between the current state and the given resource
-// strings. If no difference is present, the returned string is empty.
+// currentDiff returns a human-readable string showing the different between
+// the current state and the given resource strings. If no difference is
+// present, the returned string is empty.
func (r *testResource) currentDiff(want ...string) string {
expected := make(map[string]string)
for _, w := range want {
@@ -154,8 +163,8 @@
return ""
}
-// TestBasicReconciliation ensures that the reconcile function does manipulate a target state based on a set of
-// expected resources.
+// TestBasicReconciliation ensures that the reconcile function does manipulate
+// a target state based on a set of expected resources.
func TestBasicReconciliation(t *testing.T) {
ctx := context.Background()
r := newTestResource("foo", "bar", "baz")
diff --git a/metropolis/node/kubernetes/reconciler/resources_csi.go b/metropolis/node/kubernetes/reconciler/resources_csi.go
index c7f7b2b..04d52a8 100644
--- a/metropolis/node/kubernetes/reconciler/resources_csi.go
+++ b/metropolis/node/kubernetes/reconciler/resources_csi.go
@@ -24,9 +24,11 @@
"k8s.io/client-go/kubernetes"
)
-// TODO(q3k): this is duplicated with //metropolis/node/kubernetes:provisioner.go; integrate this once provisioner.go
-// gets moved into a subpackage.
-// ONCHANGE(//metropolis/node/kubernetes:provisioner.go): needs to match csiProvisionerName declared.
+// TODO(q3k): this is duplicated with
+// //metropolis/node/kubernetes:provisioner.go; integrate this once
+// provisioner.go gets moved into a subpackage.
+// ONCHANGE(//metropolis/node/kubernetes:provisioner.go): needs to match
+// csiProvisionerName declared.
const csiProvisionerName = "dev.monogon.metropolis.vfs"
type resourceCSIDrivers struct {
diff --git a/metropolis/node/kubernetes/service.go b/metropolis/node/kubernetes/service.go
index 5c8b037..fe701e6 100644
--- a/metropolis/node/kubernetes/service.go
+++ b/metropolis/node/kubernetes/service.go
@@ -206,7 +206,8 @@
return nil
}
-// GetDebugKubeconfig issues a kubeconfig for an arbitrary given identity. Useful for debugging and testing.
+// GetDebugKubeconfig issues a kubeconfig for an arbitrary given identity.
+// Useful for debugging and testing.
func (s *Service) GetDebugKubeconfig(ctx context.Context, request *apb.GetDebugKubeconfigRequest) (*apb.GetDebugKubeconfigResponse, error) {
client, err := s.c.KPKI.VolatileClient(ctx, request.Id, request.Groups)
if err != nil {