*: reflow comments to 80 characters
This reformats the entire Metropolis codebase to have comments no longer
than 80 characters, implementing CR/66.
This has been done half manually, as we don't have a good integration
between commentwrap/Bazel, but that can be implemented if we decide to
go for this tool/limit.
Change-Id: If1fff0b093ef806f5dc00551c11506e8290379d0
diff --git a/metropolis/pkg/devicemapper/devicemapper.go b/metropolis/pkg/devicemapper/devicemapper.go
index 2687e3a..d56e8f9 100644
--- a/metropolis/pkg/devicemapper/devicemapper.go
+++ b/metropolis/pkg/devicemapper/devicemapper.go
@@ -122,8 +122,8 @@
}
}
-// stringToDelimitedBuf copies src to dst and returns an error if len(src) > len(dst),
-// or when the string contains a null byte.
+// stringToDelimitedBuf copies src to dst and returns an error if len(src) >
+// len(dst), or when the string contains a null byte.
func stringToDelimitedBuf(dst []byte, src string) error {
if len(src) > len(dst)-1 {
return fmt.Errorf("string longer than target buffer (%v > %v)", len(src), len(dst)-1)
diff --git a/metropolis/pkg/erofs/compression.go b/metropolis/pkg/erofs/compression.go
index 58b2f4b..dca9946 100644
--- a/metropolis/pkg/erofs/compression.go
+++ b/metropolis/pkg/erofs/compression.go
@@ -21,8 +21,8 @@
import "encoding/binary"
-// mapHeader is a legacy but still-used advisory structure at the start of a compressed VLE block. It contains constant
-// values as annotated.
+// mapHeader is a legacy but still-used advisory structure at the start of a
+// compressed VLE block. It contains constant values as annotated.
type mapHeader struct {
Reserved uint32 // 0
Advise uint16 // 1
diff --git a/metropolis/pkg/erofs/defs.go b/metropolis/pkg/erofs/defs.go
index b547867..85898bf 100644
--- a/metropolis/pkg/erofs/defs.go
+++ b/metropolis/pkg/erofs/defs.go
@@ -16,11 +16,13 @@
package erofs
-// This file contains definitions coming from the in-Kernel implementation of the EROFS filesystem.
-// All definitions come from @linux//fs/erofs:erofs_fs.h unless stated otherwise.
+// This file contains definitions coming from the in-Kernel implementation of
+// the EROFS filesystem. All definitions come from @linux//fs/erofs:erofs_fs.h
+// unless stated otherwise.
-// Magic contains the 4 magic bytes starting at position 1024 identifying an EROFS filesystem.
-// Defined in @linux//include/uapi/linux/magic.h EROFS_SUPER_MAGIC_V1
+// Magic contains the 4 magic bytes starting at position 1024 identifying an
+// EROFS filesystem. Defined in @linux//include/uapi/linux/magic.h
+// EROFS_SUPER_MAGIC_V1
var Magic = [4]byte{0xe2, 0xe1, 0xf5, 0xe0}
const blockSizeBits = 12
diff --git a/metropolis/pkg/erofs/defs_test.go b/metropolis/pkg/erofs/defs_test.go
index e32e155..1d31bff 100644
--- a/metropolis/pkg/erofs/defs_test.go
+++ b/metropolis/pkg/erofs/defs_test.go
@@ -24,8 +24,8 @@
"github.com/stretchr/testify/assert"
)
-// These test that the specified structures serialize to the same number of bytes as the ones in the
-// EROFS kernel module.
+// These test that the specified structures serialize to the same number of
+// bytes as the ones in the EROFS kernel module.
func TestSuperblockSize(t *testing.T) {
var buf bytes.Buffer
diff --git a/metropolis/pkg/erofs/erofs.go b/metropolis/pkg/erofs/erofs.go
index af6ad1c..3e4ce89 100644
--- a/metropolis/pkg/erofs/erofs.go
+++ b/metropolis/pkg/erofs/erofs.go
@@ -29,19 +29,22 @@
// Writer writes a new EROFS filesystem.
type Writer struct {
w io.WriteSeeker
- // fixDirectoryEntry contains for each referenced path where it is referenced from. Since self-references
- // are required anyways (for the "." and ".." entries) we let the user write files in any order and just
- // point the directory entries to the right target nid and file type on Close().
+ // fixDirectoryEntry contains for each referenced path where it is
+ // referenced from. Since self-references are required anyways (for the "."
+ // and ".." entries) we let the user write files in any order and just
+ // point the directory entries to the right target nid and file type on
+ // Close().
fixDirectoryEntry map[string][]direntFixupLocation
pathInodeMeta map[string]*uncompressedInodeMeta
- // legacyInodeIndex stores the next legacy (32-bit) inode to be allocated. 64 bit inodes are automatically
- // calculated by EROFS on mount.
+ // legacyInodeIndex stores the next legacy (32-bit) inode to be allocated.
+ // 64 bit inodes are automatically calculated by EROFS on mount.
legacyInodeIndex uint32
blockAllocatorIndex uint32
metadataBlocksFree metadataBlocksMeta
}
-// NewWriter creates a new EROFS filesystem writer. The given WriteSeeker needs to be at the start.
+// NewWriter creates a new EROFS filesystem writer. The given WriteSeeker needs
+// to be at the start.
func NewWriter(w io.WriteSeeker) (*Writer, error) {
erofsWriter := &Writer{
w: w,
@@ -56,17 +59,20 @@
return nil, fmt.Errorf("failed to write initial padding: %w", err)
}
if err := binary.Write(erofsWriter.w, binary.LittleEndian, &superblock{
- Magic: Magic,
- BlockSizeBits: blockSizeBits,
- RootNodeNumber: 36, // 1024 (padding) + 128 (superblock) / 32, not eligible for fixup as different int size
+ Magic: Magic,
+ BlockSizeBits: blockSizeBits,
+ // 1024 (padding) + 128 (superblock) / 32, not eligible for fixup as
+ // different int size
+ RootNodeNumber: 36,
}); err != nil {
return nil, fmt.Errorf("failed to write superblock: %w", err)
}
return erofsWriter, nil
}
-// allocateMetadata allocates metadata space of size bytes with a given alignment and seeks to the first byte of the
-// newly-allocated metadata space. It also returns the position of that first byte.
+// allocateMetadata allocates metadata space of size bytes with a given
+// alignment and seeks to the first byte of the newly-allocated metadata space.
+// It also returns the position of that first byte.
func (w *Writer) allocateMetadata(size int, alignment uint16) (int64, error) {
if size > BlockSize {
panic("cannot allocate a metadata object bigger than BlockSize bytes")
@@ -90,9 +96,10 @@
return pos, nil
}
-// allocateBlocks allocates n new BlockSize-sized block and seeks to the beginning of the first newly-allocated block.
-// It also returns the first newly-allocated block number. The caller is expected to write these blocks completely
-// before calling allocateBlocks again.
+// allocateBlocks allocates n new BlockSize-sized block and seeks to the
+// beginning of the first newly-allocated block. It also returns the first
+// newly-allocated block number. The caller is expected to write these blocks
+// completely before calling allocateBlocks again.
func (w *Writer) allocateBlocks(n uint32) (uint32, error) {
if _, err := w.w.Seek(int64(w.blockAllocatorIndex)*BlockSize, io.SeekStart); err != nil {
return 0, fmt.Errorf("cannot seek to end of last block, check write alignment: %w", err)
@@ -113,18 +120,20 @@
return i
}
-// CreateFile adds a new file to the EROFS. It returns a WriteCloser to which the file contents should be written and
-// which then needs to be closed. The last writer obtained by calling CreateFile() needs to be closed first before
-// opening a new one. The given pathname needs to be referenced by a directory created using Create(), otherwise it will
-// not be accessible.
+// CreateFile adds a new file to the EROFS. It returns a WriteCloser to which
+// the file contents should be written and which then needs to be closed. The
+// last writer obtained by calling CreateFile() needs to be closed first before
+// opening a new one. The given pathname needs to be referenced by a directory
+// created using Create(), otherwise it will not be accessible.
func (w *Writer) CreateFile(pathname string, meta *FileMeta) io.WriteCloser {
return w.create(pathname, meta)
}
-// Create adds a new non-file inode to the EROFS. This includes directories, device nodes, symlinks and FIFOs.
-// The first call to Create() needs to be with pathname "." and a directory inode.
-// The given pathname needs to be referenced by a directory, otherwise it will not be accessible (with the exception of
-// the directory ".").
+// Create adds a new non-file inode to the EROFS. This includes directories,
+// device nodes, symlinks and FIFOs. The first call to Create() needs to be
+// with pathname "." and a directory inode. The given pathname needs to be
+// referenced by a directory, otherwise it will not be accessible (with the
+// exception of the directory ".").
func (w *Writer) Create(pathname string, inode Inode) error {
iw := w.create(pathname, inode)
switch i := inode.(type) {
@@ -140,8 +149,9 @@
return iw.Close()
}
-// Close finishes writing an EROFS filesystem. Errors by this function need to be handled as they indicate if the
-// written filesystem is consistent (i.e. there are no directory entries pointing to nonexistent inodes).
+// Close finishes writing an EROFS filesystem. Errors by this function need to
+// be handled as they indicate if the written filesystem is consistent (i.e.
+// there are no directory entries pointing to nonexistent inodes).
func (w *Writer) Close() error {
for targetPath, entries := range w.fixDirectoryEntry {
for _, entry := range entries {
@@ -157,8 +167,9 @@
return nil
}
-// uncompressedInodeMeta tracks enough metadata about a written inode to be able to point dirents to it and to provide
-// a WriteSeeker into the inode itself.
+// uncompressedInodeMeta tracks enough metadata about a written inode to be
+// able to point dirents to it and to provide a WriteSeeker into the inode
+// itself.
type uncompressedInodeMeta struct {
nid uint64
ftype uint8
@@ -188,8 +199,9 @@
func (a *uncompressedInodeMeta) Write(p []byte) (int, error) {
if a.currentOffset < a.blockLength {
- // TODO(lorenz): Handle the special case where a directory inode is spread across multiple
- // blocks (depending on other factors this occurs around ~200 direct children).
+ // TODO(lorenz): Handle the special case where a directory inode is
+ // spread across multiple blocks (depending on other factors this
+ // occurs around ~200 direct children).
return 0, errors.New("relocating dirents in multi-block directory inodes is unimplemented")
}
if _, err := a.writer.w.Seek(a.inlineStart+a.currentOffset, io.SeekStart); err != nil {
@@ -204,8 +216,9 @@
entryIndex uint16
}
-// direntFixup overrides nid and file type from the path the dirent is pointing to. The given iw is expected to be at
-// the start of the dirent inode to be fixed up.
+// direntFixup overrides nid and file type from the path the dirent is pointing
+// to. The given iw is expected to be at the start of the dirent inode to be
+// fixed up.
func direntFixup(iw io.WriteSeeker, entryIndex int64, meta *uncompressedInodeMeta) error {
if _, err := iw.Seek(entryIndex*12, io.SeekStart); err != nil {
return fmt.Errorf("failed to seek to dirent: %w", err)
@@ -227,12 +240,14 @@
freeBytes uint16
}
-// metadataBlocksMeta contains metadata about all metadata blocks, most importantly the amount of free
-// bytes in each block. This is not a map for reproducibility (map ordering).
+// metadataBlocksMeta contains metadata about all metadata blocks, most
+// importantly the amount of free bytes in each block. This is not a map for
+// reproducibility (map ordering).
type metadataBlocksMeta []metadataBlockMeta
-// findBlock returns the absolute position where `size` bytes with the specified alignment can still fit.
-// If there is not enough space in any metadata block it returns false as the second return value.
+// findBlock returns the absolute position where `size` bytes with the
+// specified alignment can still fit. If there is not enough space in any
+// metadata block it returns false as the second return value.
func (m metadataBlocksMeta) findBlock(size uint16, alignment uint16) (int64, bool) {
for i, blockMeta := range m {
freeBytesAligned := blockMeta.freeBytes - (blockMeta.freeBytes % alignment)
diff --git a/metropolis/pkg/erofs/inode_types.go b/metropolis/pkg/erofs/inode_types.go
index 05b0f54..bac29c5 100644
--- a/metropolis/pkg/erofs/inode_types.go
+++ b/metropolis/pkg/erofs/inode_types.go
@@ -28,12 +28,14 @@
"golang.org/x/sys/unix"
)
-// Inode specifies an interface that all inodes that can be written to an EROFS filesystem implement.
+// Inode specifies an interface that all inodes that can be written to an EROFS
+// filesystem implement.
type Inode interface {
inode() *inodeCompact
}
-// Base contains generic inode metadata independent from the specific inode type.
+// Base contains generic inode metadata independent from the specific inode
+// type.
type Base struct {
Permissions uint16
UID, GID uint16
@@ -47,8 +49,8 @@
}
}
-// Directory represents a directory inode. The Children property contains the directories' direct children (just the
-// name, not the full path).
+// Directory represents a directory inode. The Children property contains the
+// directories' direct children (just the name, not the full path).
type Directory struct {
Base
Children []string
@@ -59,7 +61,8 @@
}
func (d *Directory) writeTo(w *uncompressedInodeWriter) error {
- // children is d.Children with appended backrefs (. and ..), copied to not pollute source
+ // children is d.Children with appended backrefs (. and ..), copied to not
+ // pollute source
children := make([]string, len(d.Children))
copy(children, d.Children)
children = append(children, ".", "..")
@@ -97,7 +100,8 @@
return nil
}
-// CharacterDevice represents a Unix character device inode with major and minor numbers.
+// CharacterDevice represents a Unix character device inode with major and
+// minor numbers.
type CharacterDevice struct {
Base
Major uint32
@@ -110,7 +114,8 @@
return i
}
-// CharacterDevice represents a Unix block device inode with major and minor numbers.
+// CharacterDevice represents a Unix block device inode with major and minor
+// numbers.
type BlockDevice struct {
Base
Major uint32
@@ -141,7 +146,8 @@
return s.baseInode(unix.S_IFSOCK)
}
-// SymbolicLink represents a symbolic link/symlink to another inode. Target is the literal string target of the symlink.
+// SymbolicLink represents a symbolic link/symlink to another inode. Target is
+// the literal string target of the symlink.
type SymbolicLink struct {
Base
Target string
@@ -156,8 +162,9 @@
return err
}
-// FileMeta represents the metadata of a regular file. In this case the contents are written to a Writer returned by the
-// CreateFile function on the EROFS Writer and not included in the structure itself.
+// FileMeta represents the metadata of a regular file. In this case the
+// contents are written to a Writer returned by the CreateFile function on the
+// EROFS Writer and not included in the structure itself.
type FileMeta struct {
Base
}
diff --git a/metropolis/pkg/erofs/uncompressed_inode_writer.go b/metropolis/pkg/erofs/uncompressed_inode_writer.go
index df89fec..97aefc0 100644
--- a/metropolis/pkg/erofs/uncompressed_inode_writer.go
+++ b/metropolis/pkg/erofs/uncompressed_inode_writer.go
@@ -24,9 +24,10 @@
"math"
)
-// uncompressedInodeWriter exposes a io.Write-style interface for a single uncompressed inode. It splits the Write-calls
-// into blocks and writes both the blocks and inode metadata. It is required to call Close() to ensure everything is
-// properly written down before writing another inode.
+// uncompressedInodeWriter exposes a io.Write-style interface for a single
+// uncompressed inode. It splits the Write-calls into blocks and writes both
+// the blocks and inode metadata. It is required to call Close() to ensure
+// everything is properly written down before writing another inode.
type uncompressedInodeWriter struct {
buf bytes.Buffer
writer *Writer
diff --git a/metropolis/pkg/fileargs/fileargs.go b/metropolis/pkg/fileargs/fileargs.go
index 26c054b..bec8fca 100644
--- a/metropolis/pkg/fileargs/fileargs.go
+++ b/metropolis/pkg/fileargs/fileargs.go
@@ -31,8 +31,9 @@
// DefaultSize is the default size limit for FileArgs
const DefaultSize = 4 * 1024 * 1024
-// TempDirectory is the directory where FileArgs will mount the actual files to. Defaults to
-// os.TempDir() but can be globally overridden by the application before any FileArgs are used.
+// TempDirectory is the directory where FileArgs will mount the actual files
+// to. Defaults to os.TempDir() but can be globally overridden by the
+// application before any FileArgs are used.
var TempDirectory = os.TempDir()
type FileArgs struct {
@@ -40,14 +41,15 @@
lastError error
}
-// New initializes a new set of file-based arguments. Remember to call Close() if you're done
-// using it, otherwise this leaks memory and mounts.
+// New initializes a new set of file-based arguments. Remember to call Close()
+// if you're done using it, otherwise this leaks memory and mounts.
func New() (*FileArgs, error) {
return NewWithSize(DefaultSize)
}
-// NewWthSize is the same as new, but with a custom size limit. Please be aware that this data
-// cannot be swapped out and using a size limit that's too high can deadlock your kernel.
+// NewWthSize is the same as new, but with a custom size limit. Please be aware
+// that this data cannot be swapped out and using a size limit that's too high
+// can deadlock your kernel.
func NewWithSize(size uint64) (*FileArgs, error) {
randomNameRaw := make([]byte, 128/8)
if _, err := io.ReadFull(rand.Reader, randomNameRaw); err != nil {
@@ -57,7 +59,8 @@
if err := os.MkdirAll(tmpPath, 0700); err != nil {
return nil, err
}
- // This uses ramfs instead of tmpfs because we never want to swap this for security reasons
+ // This uses ramfs instead of tmpfs because we never want to swap this for
+ // security reasons
if err := unix.Mount("none", tmpPath, "ramfs", unix.MS_NOEXEC|unix.MS_NOSUID|unix.MS_NODEV, fmt.Sprintf("size=%v", size)); err != nil {
return nil, err
}
@@ -66,8 +69,8 @@
}, nil
}
-// ArgPath returns the path of the temporary file for this argument. It names the temporary
-// file according to name.
+// ArgPath returns the path of the temporary file for this argument. It names
+// the temporary file according to name.
func (f *FileArgs) ArgPath(name string, content []byte) string {
if f.lastError != nil {
return ""
@@ -83,8 +86,11 @@
return path
}
-// FileOpt returns a full option with the temporary file name already filled in.
-// Example: `FileOpt("--testopt", "test.txt", []byte("hello")) == "--testopt=/tmp/daf8ed.../test.txt"`
+// FileOpt returns a full option with the temporary file name already filled
+// in. Example:
+//
+// option := FileOpt("--testopt", "test.txt", []byte("hello"))
+// option == "--testopt=/tmp/daf8ed.../test.txt"
func (f *FileArgs) FileOpt(optName, fileName string, content []byte) string {
return fmt.Sprintf("%v=%v", optName, f.ArgPath(fileName, content))
}
diff --git a/metropolis/pkg/freeport/freeport.go b/metropolis/pkg/freeport/freeport.go
index bd047b5..da52311 100644
--- a/metropolis/pkg/freeport/freeport.go
+++ b/metropolis/pkg/freeport/freeport.go
@@ -21,10 +21,12 @@
"net"
)
-// AllocateTCPPort allocates a TCP port on the looopback address, and starts a temporary listener on it. That listener
-// is returned to the caller alongside with the allocated port number. The listener must be closed right before
-// the port is used by the caller. This naturally still leaves a race condition window where that port number
-// might be snatched up by some other process, but there doesn't seem to be a better way to do this.
+// AllocateTCPPort allocates a TCP port on the looopback address, and starts a
+// temporary listener on it. That listener is returned to the caller alongside with
+// the allocated port number. The listener must be closed right before the port is
+// used by the caller. This naturally still leaves a race condition window where
+// that port number might be snatched up by some other process, but there doesn't
+// seem to be a better way to do this.
func AllocateTCPPort() (uint16, io.Closer, error) {
addr, err := net.ResolveTCPAddr("tcp", "127.0.0.1:0")
if err != nil {
@@ -38,8 +40,9 @@
return uint16(l.Addr().(*net.TCPAddr).Port), l, nil
}
-// MustConsume takes the result of AllocateTCPPort, closes the listener and returns the allocated port.
-// If anything goes wrong (port could not be allocated or closed) it will panic.
+// MustConsume takes the result of AllocateTCPPort, closes the listener and returns
+// the allocated port. If anything goes wrong (port could not be allocated or
+// closed) it will panic.
func MustConsume(port uint16, lis io.Closer, err error) int {
if err != nil {
panic(err)
diff --git a/metropolis/pkg/fsquota/fsinfo.go b/metropolis/pkg/fsquota/fsinfo.go
index f885d51..ecbaecf 100644
--- a/metropolis/pkg/fsquota/fsinfo.go
+++ b/metropolis/pkg/fsquota/fsinfo.go
@@ -24,9 +24,9 @@
"golang.org/x/sys/unix"
)
-// This requires fsinfo() support, which is not yet in any stable kernel.
-// Our kernel has that syscall backported. This would otherwise be an extremely expensive
-// operation and also involve lots of logic from our side.
+// This requires fsinfo() support, which is not yet in any stable kernel. Our
+// kernel has that syscall backported. This would otherwise be an extremely
+// expensive operation and also involve lots of logic from our side.
// From syscall_64.tbl
const sys_fsinfo = 441
diff --git a/metropolis/pkg/fsquota/fsquota.go b/metropolis/pkg/fsquota/fsquota.go
index 3c0c578..263dd48 100644
--- a/metropolis/pkg/fsquota/fsquota.go
+++ b/metropolis/pkg/fsquota/fsquota.go
@@ -14,11 +14,11 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// Package fsquota provides a simplified interface to interact with Linux's filesystem qouta API.
-// It only supports setting quotas on directories, not groups or users.
-// Quotas need to be already enabled on the filesystem to be able to use them using this package.
-// See the quotactl package if you intend to use this on a filesystem where quotas need to be
-// enabled manually.
+// Package fsquota provides a simplified interface to interact with Linux's
+// filesystem qouta API. It only supports setting quotas on directories, not
+// groups or users. Quotas need to be already enabled on the filesystem to be
+// able to use them using this package. See the quotactl package if you intend
+// to use this on a filesystem where quotas need to be enabled manually.
package fsquota
import (
@@ -32,10 +32,11 @@
"source.monogon.dev/metropolis/pkg/fsquota/quotactl"
)
-// SetQuota sets the quota of bytes and/or inodes in a given path. To not set a limit, set the
-// corresponding argument to zero. Setting both arguments to zero removes the quota entirely.
-// This function can only be called on an empty directory. It can't be used to create a quota
-// below a directory which already has a quota since Linux doesn't offer hierarchical quotas.
+// SetQuota sets the quota of bytes and/or inodes in a given path. To not set a
+// limit, set the corresponding argument to zero. Setting both arguments to
+// zero removes the quota entirely. This function can only be called on an
+// empty directory. It can't be used to create a quota below a directory which
+// already has a quota since Linux doesn't offer hierarchical quotas.
func SetQuota(path string, maxBytes uint64, maxInodes uint64) error {
dir, err := os.Open(path)
if err != nil {
@@ -61,10 +62,12 @@
var lastID uint32 = attrs.ProjectID
if lastID == 0 {
- // No project/quota exists for this directory, assign a new project quota
- // TODO(lorenz): This is racy, but the kernel does not support atomically assigning
- // quotas. So this needs to be added to the kernels setquota interface. Due to the short
- // time window and infrequent calls this should not be an immediate issue.
+ // No project/quota exists for this directory, assign a new project
+ // quota.
+ // TODO(lorenz): This is racy, but the kernel does not support
+ // atomically assigning quotas. So this needs to be added to the
+ // kernels setquota interface. Due to the short time window and
+ // infrequent calls this should not be an immediate issue.
for {
quota, err := quotactl.GetNextQuota(source, quotactl.QuotaTypeProject, lastID)
if err == unix.ENOENT || err == unix.ESRCH {
@@ -115,7 +118,8 @@
InodesUsed uint64
}
-// GetQuota returns the current active quota and its utilization at the given path
+// GetQuota returns the current active quota and its utilization at the given
+// path
func GetQuota(path string) (*Quota, error) {
dir, err := os.Open(path)
if err != nil {
diff --git a/metropolis/pkg/fsquota/fsquota_test.go b/metropolis/pkg/fsquota/fsquota_test.go
index 4729dac..392a0e9 100644
--- a/metropolis/pkg/fsquota/fsquota_test.go
+++ b/metropolis/pkg/fsquota/fsquota_test.go
@@ -29,9 +29,9 @@
"golang.org/x/sys/unix"
)
-// withinTolerance is a helper for asserting that a value is within a certain percentage of the
-// expected value. The tolerance is specified as a float between 0 (exact match)
-// and 1 (between 0 and twice the expected value).
+// withinTolerance is a helper for asserting that a value is within a certain
+// percentage of the expected value. The tolerance is specified as a float
+// between 0 (exact match) and 1 (between 0 and twice the expected value).
func withinTolerance(t *testing.T, expected uint64, actual uint64, tolerance float64, name string) {
t.Helper()
delta := uint64(math.Round(float64(expected) * tolerance))
@@ -131,8 +131,9 @@
require.Equal(t, uint64(bytesQuota), quotaUtil.Bytes, "bytes quota readback incorrect")
require.Equal(t, uint64(inodesQuota), quotaUtil.Inodes, "inodes quota readback incorrect")
- // Give 10% tolerance for quota used values to account for metadata overhead and internal
- // structures that are also in there. If it's out by more than that it's an issue anyways.
+ // Give 10% tolerance for quota used values to account for metadata
+ // overhead and internal structures that are also in there. If it's out
+ // by more than that it's an issue anyways.
withinTolerance(t, uint64(len(sizeFileData)), quotaUtil.BytesUsed, 0.1, "BytesUsed")
// Write 50 inodes for a total of 51 (with the 512K file)
diff --git a/metropolis/pkg/fsquota/quotactl/quotactl.go b/metropolis/pkg/fsquota/quotactl/quotactl.go
index a2edfa7..337daaa 100644
--- a/metropolis/pkg/fsquota/quotactl/quotactl.go
+++ b/metropolis/pkg/fsquota/quotactl/quotactl.go
@@ -14,9 +14,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// Package quotactl implements a low-level wrapper around the modern portion of Linux's
-// quotactl() syscall. See the fsquota package for a nicer interface to the most common part
-// of this API.
+// Package quotactl implements a low-level wrapper around the modern portion of
+// Linux's quotactl() syscall. See the fsquota package for a nicer interface to
+// the most common part of this API.
package quotactl
import (
@@ -212,7 +212,8 @@
return nil
}
-// Sync syncs disk copy of filesystems quotas. If device is empty it syncs all filesystems.
+// Sync syncs disk copy of filesystems quotas. If device is empty it syncs all
+// filesystems.
func Sync(device string) error {
if device != "" {
devArg, err := unix.BytePtrFromString(device)
diff --git a/metropolis/pkg/jsonpatch/jsonpatch.go.go b/metropolis/pkg/jsonpatch/jsonpatch.go.go
index 9682980..be3d302 100644
--- a/metropolis/pkg/jsonpatch/jsonpatch.go.go
+++ b/metropolis/pkg/jsonpatch/jsonpatch.go.go
@@ -14,7 +14,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// Package jsonpatch contains data structures and encoders for JSON Patch (RFC 6902) and JSON Pointers (RFC 6901)
+// Package jsonpatch contains data structures and encoders for JSON Patch (RFC
+// 6902) and JSON Pointers (RFC 6901)
package jsonpatch
import "strings"
@@ -27,7 +28,8 @@
Value interface{} `json:"value,omitempty"`
}
-// EncodeJSONRefToken encodes a JSON reference token as part of a JSON Pointer (RFC 6901 Section 2)
+// EncodeJSONRefToken encodes a JSON reference token as part of a JSON Pointer
+// (RFC 6901 Section 2)
func EncodeJSONRefToken(token string) string {
x := strings.ReplaceAll(token, "~", "~0")
return strings.ReplaceAll(x, "/", "~1")
diff --git a/metropolis/pkg/logbuffer/linebuffer.go b/metropolis/pkg/logbuffer/linebuffer.go
index 8048604..6fd9a62 100644
--- a/metropolis/pkg/logbuffer/linebuffer.go
+++ b/metropolis/pkg/logbuffer/linebuffer.go
@@ -25,7 +25,8 @@
apb "source.monogon.dev/metropolis/proto/api"
)
-// Line is a line stored in the log buffer - a string, that has been perhaps truncated (due to exceeded limits).
+// Line is a line stored in the log buffer - a string, that has been perhaps
+// truncated (due to exceeded limits).
type Line struct {
Data string
OriginalLength int
@@ -36,8 +37,8 @@
return l.OriginalLength > len(l.Data)
}
-// String returns the line with an ellipsis at the end (...) if the line has been truncated, or the original line
-// otherwise.
+// String returns the line with an ellipsis at the end (...) if the line has been
+// truncated, or the original line otherwise.
func (l *Line) String() string {
if l.Truncated() {
return l.Data + "..."
@@ -68,24 +69,27 @@
}, nil
}
-// LineBuffer is a io.WriteCloser that will call a given callback every time a line is completed.
+// LineBuffer is a io.WriteCloser that will call a given callback every time a line
+// is completed.
type LineBuffer struct {
maxLineLength int
cb LineBufferCallback
mu sync.Mutex
cur strings.Builder
- // length is the length of the line currently being written - this will continue to increase, even if the string
- // exceeds maxLineLength.
+ // length is the length of the line currently being written - this will continue to
+ // increase, even if the string exceeds maxLineLength.
length int
closed bool
}
-// LineBufferCallback is a callback that will get called any time the line is completed. The function must not cause another
-// write to the LineBuffer, or the program will deadlock.
+// LineBufferCallback is a callback that will get called any time the line is
+// completed. The function must not cause another write to the LineBuffer, or the
+// program will deadlock.
type LineBufferCallback func(*Line)
-// NewLineBuffer creates a new LineBuffer with a given line length limit and callback.
+// NewLineBuffer creates a new LineBuffer with a given line length limit and
+// callback.
func NewLineBuffer(maxLineLength int, cb LineBufferCallback) *LineBuffer {
return &LineBuffer{
maxLineLength: maxLineLength,
@@ -93,7 +97,8 @@
}
}
-// writeLimited writes to the internal buffer, making sure that its size does not exceed the maxLineLength.
+// writeLimited writes to the internal buffer, making sure that its size does not
+// exceed the maxLineLength.
func (l *LineBuffer) writeLimited(data []byte) {
l.length += len(data)
if l.cur.Len()+len(data) > l.maxLineLength {
@@ -144,8 +149,8 @@
return len(data), nil
}
-// Close will emit any leftover data in the buffer to the callback. Subsequent calls to Write will fail. Subsequent calls to Close
-// will also fail.
+// Close will emit any leftover data in the buffer to the callback. Subsequent
+// calls to Write will fail. Subsequent calls to Close will also fail.
func (l *LineBuffer) Close() error {
if l.closed {
return fmt.Errorf("already closed")
diff --git a/metropolis/pkg/logbuffer/logbuffer.go b/metropolis/pkg/logbuffer/logbuffer.go
index ce47816..cd18420 100644
--- a/metropolis/pkg/logbuffer/logbuffer.go
+++ b/metropolis/pkg/logbuffer/logbuffer.go
@@ -14,10 +14,11 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// Package logbuffer implements a fixed-size in-memory ring buffer for line-separated logs.
-// It implements io.Writer and splits the data into lines. The lines are kept in a ring where the
-// oldest are overwritten once it's full. It allows retrieval of the last n lines. There is a built-in
-// line length limit to bound the memory usage at maxLineLength * size.
+// Package logbuffer implements a fixed-size in-memory ring buffer for
+// line-separated logs. It implements io.Writer and splits the data into lines.
+// The lines are kept in a ring where the oldest are overwritten once it's
+// full. It allows retrieval of the last n lines. There is a built-in line
+// length limit to bound the memory usage at maxLineLength * size.
package logbuffer
import (
@@ -32,7 +33,8 @@
*LineBuffer
}
-// New creates a new LogBuffer with a given ringbuffer size and maximum line length.
+// New creates a new LogBuffer with a given ringbuffer size and maximum line
+// length.
func New(size, maxLineLength int) *LogBuffer {
lb := &LogBuffer{
content: make([]Line, size),
@@ -49,7 +51,8 @@
b.length++
}
-// capToContentLength caps the number of requested lines to what is actually available
+// capToContentLength caps the number of requested lines to what is actually
+// available
func (b *LogBuffer) capToContentLength(n int) int {
// If there aren't enough lines to read, reduce the request size
if n > b.length {
@@ -62,8 +65,9 @@
return n
}
-// ReadLines reads the last n lines from the buffer in chronological order. If n is bigger than the
-// ring buffer or the number of available lines only the number of stored lines are returned.
+// ReadLines reads the last n lines from the buffer in chronological order. If
+// n is bigger than the ring buffer or the number of available lines only the
+// number of stored lines are returned.
func (b *LogBuffer) ReadLines(n int) []Line {
b.mu.RLock()
defer b.mu.RUnlock()
@@ -78,13 +82,14 @@
return outArray
}
-// ReadLinesTruncated works exactly the same as ReadLines, but adds an ellipsis at the end of every
-// line that was truncated because it was over MaxLineLength
+// ReadLinesTruncated works exactly the same as ReadLines, but adds an ellipsis
+// at the end of every line that was truncated because it was over
+// MaxLineLength
func (b *LogBuffer) ReadLinesTruncated(n int, ellipsis string) []string {
b.mu.RLock()
defer b.mu.RUnlock()
- // This does not use ReadLines() to prevent excessive reference copying and associated GC pressure
- // since it could process a lot of lines.
+ // This does not use ReadLines() to prevent excessive reference copying and
+ // associated GC pressure since it could process a lot of lines.
n = b.capToContentLength(n)
diff --git a/metropolis/pkg/logtree/journal.go b/metropolis/pkg/logtree/journal.go
index 78c55a1..d29fdaa 100644
--- a/metropolis/pkg/logtree/journal.go
+++ b/metropolis/pkg/logtree/journal.go
@@ -22,18 +22,20 @@
"sync"
)
-// DN is the Distinguished Name, a dot-delimited path used to address loggers within a LogTree. For example, "foo.bar"
-// designates the 'bar' logger node under the 'foo' logger node under the root node of the logger. An empty string is
-// the root node of the tree.
+// DN is the Distinguished Name, a dot-delimited path used to address loggers
+// within a LogTree. For example, "foo.bar" designates the 'bar' logger node
+// under the 'foo' logger node under the root node of the logger. An empty
+// string is the root node of the tree.
type DN string
var (
ErrInvalidDN = errors.New("invalid DN")
)
-// Path return the parts of a DN, ie. all the elements of the dot-delimited DN path. For the root node, an empty list
-// will be returned. An error will be returned if the DN is invalid (contains empty parts, eg. `foo..bar`, `.foo` or
-// `foo.`.
+// Path return the parts of a DN, ie. all the elements of the dot-delimited DN
+// path. For the root node, an empty list will be returned. An error will be
+// returned if the DN is invalid (contains empty parts, eg. `foo..bar`, `.foo`
+// or `foo.`.
func (d DN) Path() ([]string, error) {
if d == "" {
return nil, nil
@@ -47,12 +49,14 @@
return parts, nil
}
-// journal is the main log recording structure of logtree. It manages linked lists containing the actual log entries,
-// and implements scans across them. It does not understand the hierarchical nature of logtree, and instead sees all
-// entries as part of a global linked list and a local linked list for a given DN.
+// journal is the main log recording structure of logtree. It manages linked lists
+// containing the actual log entries, and implements scans across them. It does not
+// understand the hierarchical nature of logtree, and instead sees all entries as
+// part of a global linked list and a local linked list for a given DN.
//
-// The global linked list is represented by the head/tail pointers in journal and nextGlobal/prevGlobal pointers in
-// entries. The local linked lists are represented by heads[DN]/tails[DN] pointers in journal and nextLocal/prevLocal
+// The global linked list is represented by the head/tail pointers in journal and
+// nextGlobal/prevGlobal pointers in entries. The local linked lists are
+// represented by heads[DN]/tails[DN] pointers in journal and nextLocal/prevLocal
// pointers in entries:
//
// .------------. .------------. .------------.
@@ -70,37 +74,42 @@
// | | |
// ( head ) ( tails[Z] ) ( tail )
// ( heads[A.B] ) ( heads[Z] ) ( tails[A.B] )
-//
type journal struct {
- // mu locks the rest of the structure. It must be taken during any operation on the journal.
+ // mu locks the rest of the structure. It must be taken during any operation on the
+ // journal.
mu sync.RWMutex
- // tail is the side of the global linked list that contains the newest log entry, ie. the one that has been pushed
- // the most recently. It can be nil when no log entry has yet been pushed. The global linked list contains all log
- // entries pushed to the journal.
+ // tail is the side of the global linked list that contains the newest log entry,
+ // ie. the one that has been pushed the most recently. It can be nil when no log
+ // entry has yet been pushed. The global linked list contains all log entries
+ // pushed to the journal.
tail *entry
- // head is the side of the global linked list that contains the oldest log entry. It can be nil when no log entry
- // has yet been pushed.
+ // head is the side of the global linked list that contains the oldest log entry.
+ // It can be nil when no log entry has yet been pushed.
head *entry
- // tails are the tail sides of a local linked list for a given DN, ie. the sides that contain the newest entry. They
- // are nil if there are no log entries for that DN.
+ // tails are the tail sides of a local linked list for a given DN, ie. the sides
+ // that contain the newest entry. They are nil if there are no log entries for that
+ // DN.
tails map[DN]*entry
- // heads are the head sides of a local linked list for a given DN, ie. the sides that contain the oldest entry. They
- // are nil if there are no log entries for that DN.
+ // heads are the head sides of a local linked list for a given DN, ie. the sides
+ // that contain the oldest entry. They are nil if there are no log entries for that
+ // DN.
heads map[DN]*entry
- // quota is a map from DN to quota structure, representing the quota policy of a particular DN-designated logger.
+ // quota is a map from DN to quota structure, representing the quota policy of a
+ // particular DN-designated logger.
quota map[DN]*quota
- // subscribers are observer to logs. New log entries get emitted to channels present in the subscriber structure,
- // after filtering them through subscriber-provided filters (eg. to limit events to subtrees that interest that
- // particular subscriber).
+ // subscribers are observer to logs. New log entries get emitted to channels
+ // present in the subscriber structure, after filtering them through subscriber-
+ // provided filters (eg. to limit events to subtrees that interest that particular
+ // subscriber).
subscribers []*subscriber
}
-// newJournal creates a new empty journal. All journals are independent from eachother, and as such, all LogTrees are
-// also independent.
+// newJournal creates a new empty journal. All journals are independent from
+// eachother, and as such, all LogTrees are also independent.
func newJournal() *journal {
return &journal{
tails: make(map[DN]*entry),
@@ -110,7 +119,8 @@
}
}
-// filter is a predicate that returns true if a log subscriber or reader is interested in a given log entry.
+// filter is a predicate that returns true if a log subscriber or reader is
+// interested in a given log entry.
type filter func(*entry) bool
// filterAll returns a filter that accepts all log entries.
@@ -118,16 +128,18 @@
return func(*entry) bool { return true }
}
-// filterExact returns a filter that accepts only log entries at a given exact DN. This filter should not be used in
-// conjunction with journal.scanEntries - instead, journal.getEntries should be used, as it is much faster.
+// filterExact returns a filter that accepts only log entries at a given exact
+// DN. This filter should not be used in conjunction with journal.scanEntries
+// - instead, journal.getEntries should be used, as it is much faster.
func filterExact(dn DN) filter {
return func(e *entry) bool {
return e.origin == dn
}
}
-// filterSubtree returns a filter that accepts all log entries at a given DN and sub-DNs. For example, filterSubtree at
-// "foo.bar" would allow entries at "foo.bar", "foo.bar.baz", but not "foo" or "foo.barr".
+// filterSubtree returns a filter that accepts all log entries at a given DN and
+// sub-DNs. For example, filterSubtree at "foo.bar" would allow entries at
+// "foo.bar", "foo.bar.baz", but not "foo" or "foo.barr".
func filterSubtree(root DN) filter {
if root == "" {
return filterAll()
@@ -150,8 +162,9 @@
}
}
-// filterSeverity returns a filter that accepts log entries at a given severity level or above. See the Severity type
-// for more information about severity levels.
+// filterSeverity returns a filter that accepts log entries at a given severity
+// level or above. See the Severity type for more information about severity
+// levels.
func filterSeverity(atLeast Severity) filter {
return func(e *entry) bool {
return e.leveled != nil && e.leveled.severity.AtLeast(atLeast)
@@ -166,10 +179,11 @@
return e.leveled != nil
}
-// scanEntries does a linear scan through the global entry list and returns all entries that match the given filters. If
-// retrieving entries for an exact event, getEntries should be used instead, as it will leverage DN-local linked lists
-// to retrieve them faster.
-// journal.mu must be taken at R or RW level when calling this function.
+// scanEntries does a linear scan through the global entry list and returns all
+// entries that match the given filters. If retrieving entries for an exact event,
+// getEntries should be used instead, as it will leverage DN-local linked lists to
+// retrieve them faster. journal.mu must be taken at R or RW level when calling
+// this function.
func (j *journal) scanEntries(filters ...filter) (res []*entry) {
cur := j.tail
for {
@@ -191,10 +205,12 @@
}
}
-// getEntries returns all entries at a given DN. This is faster than a scanEntries(filterExact), as it uses the special
-// local linked list pointers to traverse the journal. Additional filters can be passed to further limit the entries
-// returned, but a scan through this DN's local linked list will be performed regardless.
-// journal.mu must be taken at R or RW level when calling this function.
+// getEntries returns all entries at a given DN. This is faster than a
+// scanEntries(filterExact), as it uses the special local linked list pointers to
+// traverse the journal. Additional filters can be passed to further limit the
+// entries returned, but a scan through this DN's local linked list will be
+// performed regardless. journal.mu must be taken at R or RW level when calling
+// this function.
func (j *journal) getEntries(exact DN, filters ...filter) (res []*entry) {
cur := j.tails[exact]
for {
diff --git a/metropolis/pkg/logtree/journal_entry.go b/metropolis/pkg/logtree/journal_entry.go
index d81b687..d51d406 100644
--- a/metropolis/pkg/logtree/journal_entry.go
+++ b/metropolis/pkg/logtree/journal_entry.go
@@ -18,43 +18,49 @@
import "source.monogon.dev/metropolis/pkg/logbuffer"
-// entry is a journal entry, representing a single log event (encompassed in a Payload) at a given DN.
-// See the journal struct for more information about the global/local linked lists.
+// entry is a journal entry, representing a single log event (encompassed in a
+// Payload) at a given DN. See the journal struct for more information about the
+// global/local linked lists.
type entry struct {
- // origin is the DN at which the log entry was recorded, or conversely, in which DN it will be available at.
+ // origin is the DN at which the log entry was recorded, or conversely, in which DN
+ // it will be available at.
origin DN
- // journal is the parent journal of this entry. An entry can belong only to a single journal. This pointer is used
- // to mutate the journal's head/tail pointers when unlinking an entry.
+ // journal is the parent journal of this entry. An entry can belong only to a
+ // single journal. This pointer is used to mutate the journal's head/tail pointers
+ // when unlinking an entry.
journal *journal
- // leveled is the leveled log entry for this entry, if this log entry was emitted by leveled logging. Otherwise it
- // is nil.
+ // leveled is the leveled log entry for this entry, if this log entry was emitted
+ // by leveled logging. Otherwise it is nil.
leveled *LeveledPayload
- // raw is the raw log entry for this entry, if this log entry was emitted by raw logging. Otherwise it is nil.
+ // raw is the raw log entry for this entry, if this log entry was emitted by raw
+ // logging. Otherwise it is nil.
raw *logbuffer.Line
- // prevGlobal is the previous entry in the global linked list, or nil if this entry is the oldest entry in the
- // global linked list.
+ // prevGlobal is the previous entry in the global linked list, or nil if this entry
+ // is the oldest entry in the global linked list.
prevGlobal *entry
- // nextGlobal is the next entry in the global linked list, or nil if this entry is the newest entry in the global
- // linked list.
+ // nextGlobal is the next entry in the global linked list, or nil if this entry is
+ // the newest entry in the global linked list.
nextGlobal *entry
- // prevLocal is the previous entry in this entry DN's local linked list, or nil if this entry is the oldest entry in
- // this local linked list.
+ // prevLocal is the previous entry in this entry DN's local linked list, or nil if
+ // this entry is the oldest entry in this local linked list.
prevLocal *entry
- // prevLocal is the next entry in this entry DN's local linked list, or nil if this entry is the newest entry in
- // this local linked list.
+ // prevLocal is the next entry in this entry DN's local linked list, or nil if this
+ // entry is the newest entry in this local linked list.
nextLocal *entry
- // seqLocal is a counter within a local linked list that increases by one each time a new log entry is added. It is
- // used to quickly establish local linked list sizes (by subtracting seqLocal from both ends). This setup allows for
- // O(1) length calculation for local linked lists as long as entries are only unlinked from the head or tail (which
- // is the case in the current implementation).
+ // seqLocal is a counter within a local linked list that increases by one each time
+ // a new log entry is added. It is used to quickly establish local linked list
+ // sizes (by subtracting seqLocal from both ends). This setup allows for O(1)
+ // length calculation for local linked lists as long as entries are only unlinked
+ // from the head or tail (which is the case in the current implementation).
seqLocal uint64
}
-// external returns a LogEntry object for this entry, ie. the public version of this object, without fields relating to
-// the parent journal, linked lists, sequences, etc. These objects are visible to library consumers.
+// external returns a LogEntry object for this entry, ie. the public version of
+// this object, without fields relating to the parent journal, linked lists,
+// sequences, etc. These objects are visible to library consumers.
func (e *entry) external() *LogEntry {
return &LogEntry{
DN: e.origin,
@@ -63,9 +69,8 @@
}
}
-// unlink removes this entry from both global and local linked lists, updating the journal's head/tail pointers if
-// needed.
-// journal.mu must be taken as RW
+// unlink removes this entry from both global and local linked lists, updating the
+// journal's head/tail pointers if needed. journal.mu must be taken as RW
func (e *entry) unlink() {
// Unlink from the global linked list.
if e.prevGlobal != nil {
@@ -102,7 +107,8 @@
type quota struct {
// origin is the exact DN that this quota applies to.
origin DN
- // max is the maximum count of log entries permitted for this DN - ie, the maximum size of the local linked list.
+ // max is the maximum count of log entries permitted for this DN - ie, the maximum
+ // size of the local linked list.
max uint64
}
@@ -143,12 +149,13 @@
j.tails[e.origin] = e
}
- // Apply quota to the local linked list that this entry got inserted to, ie. remove elements in excess of the
- // quota.max count.
+ // Apply quota to the local linked list that this entry got inserted to, ie. remove
+ // elements in excess of the quota.max count.
quota := j.quota[e.origin]
count := (j.heads[e.origin].seqLocal - j.tails[e.origin].seqLocal) + 1
if count > quota.max {
- // Keep popping elements off the tail of the local linked list until quota is not violated.
+ // Keep popping elements off the tail of the local linked list until quota is not
+ // violated.
left := count - quota.max
cur := j.tails[e.origin]
for {
diff --git a/metropolis/pkg/logtree/journal_subscriber.go b/metropolis/pkg/logtree/journal_subscriber.go
index e6c7c62..dc9750f 100644
--- a/metropolis/pkg/logtree/journal_subscriber.go
+++ b/metropolis/pkg/logtree/journal_subscriber.go
@@ -24,12 +24,15 @@
type subscriber struct {
// filters that entries need to pass through in order to be sent to the subscriber.
filters []filter
- // dataC is the channel to which entries that pass filters will be sent. The channel must be drained regularly in
- // order to prevent accumulation of goroutines and possible reordering of messages.
+ // dataC is the channel to which entries that pass filters will be sent. The
+ // channel must be drained regularly in order to prevent accumulation of goroutines
+ // and possible reordering of messages.
dataC chan *LogEntry
- // doneC is a channel that is closed once the subscriber wishes to stop receiving notifications.
+ // doneC is a channel that is closed once the subscriber wishes to stop receiving
+ // notifications.
doneC chan struct{}
- // missed is the amount of messages missed by the subscriber by not receiving from dataC fast enough
+ // missed is the amount of messages missed by the subscriber by not receiving from
+ // dataC fast enough
missed uint64
}
diff --git a/metropolis/pkg/logtree/klog.go b/metropolis/pkg/logtree/klog.go
index 8755286..3dd040e 100644
--- a/metropolis/pkg/logtree/klog.go
+++ b/metropolis/pkg/logtree/klog.go
@@ -61,9 +61,8 @@
return k
}
-
type klogParser struct {
- n *node
+ n *node
buffer *logbuffer.LineBuffer
}
@@ -90,7 +89,7 @@
// we permit library users to 'fake' logs? This would also permit us to get rid
// of the type assertion in KLogParser().
e := &entry{
- origin: k.n.dn,
+ origin: k.n.dn,
leveled: p,
}
k.n.tree.journal.append(e)
@@ -98,14 +97,15 @@
}
var (
- // reKLog matches and parses klog/glog-formatted log lines.
- // Format: I0312 14:20:04.240540 204 shared_informer.go:247] Caches are synced for attach detach
+ // reKLog matches and parses klog/glog-formatted log lines. Format: I0312
+ // 14:20:04.240540 204 shared_informer.go:247] Caches are synced for attach
+ // detach
reKLog = regexp.MustCompile(`^([IEWF])(\d{4})\s+(\d{2}:\d{2}:\d{2}(\.\d+)?)\s+(\d+)\s+([^:]+):(\d+)]\s+(.+)$`)
)
// parse attempts to parse a klog-formatted line. Returns nil if the line
// couldn't have been parsed successfully.
-func parse(now time.Time, s string) (*LeveledPayload) {
+func parse(now time.Time, s string) *LeveledPayload {
parts := reKLog.FindStringSubmatch(s)
if parts == nil {
return nil
@@ -184,13 +184,14 @@
// The PID is discarded.
_ = pid
- // Finally we have extracted all the data from the line. Inject into the log publisher.
+ // Finally we have extracted all the data from the line. Inject into the log
+ // publisher.
return &LeveledPayload{
timestamp: ts,
- severity: severity,
- messages: []string{message},
- file: file,
- line: line,
+ severity: severity,
+ messages: []string{message},
+ file: file,
+ line: line,
}
}
diff --git a/metropolis/pkg/logtree/leveled.go b/metropolis/pkg/logtree/leveled.go
index c0d2aff..a4220f9 100644
--- a/metropolis/pkg/logtree/leveled.go
+++ b/metropolis/pkg/logtree/leveled.go
@@ -22,64 +22,70 @@
apb "source.monogon.dev/metropolis/proto/api"
)
-// LeveledLogger is a generic interface for glog-style logging. There are four hardcoded log severities, in increasing
-// order: INFO, WARNING, ERROR, FATAL. Logging at a certain severity level logs not only to consumers expecting data
-// at that severity level, but also all lower severity levels. For example, an ERROR log will also be passed to
-// consumers looking at INFO or WARNING logs.
+// LeveledLogger is a generic interface for glog-style logging. There are four
+// hardcoded log severities, in increasing order: INFO, WARNING, ERROR, FATAL.
+// Logging at a certain severity level logs not only to consumers expecting data at
+// that severity level, but also all lower severity levels. For example, an ERROR
+// log will also be passed to consumers looking at INFO or WARNING logs.
type LeveledLogger interface {
- // Info logs at the INFO severity. Arguments are handled in the manner of fmt.Print, a terminating newline is added
- // if missing.
+ // Info logs at the INFO severity. Arguments are handled in the manner of
+ // fmt.Print, a terminating newline is added if missing.
Info(args ...interface{})
- // Infof logs at the INFO severity. Arguments are handled in the manner of fmt.Printf, a terminating newline is
- // added if missing.
+ // Infof logs at the INFO severity. Arguments are handled in the manner of
+ // fmt.Printf, a terminating newline is added if missing.
Infof(format string, args ...interface{})
- // Warning logs at the WARNING severity. Arguments are handled in the manner of fmt.Print, a terminating newline is
- // added if missing.
+ // Warning logs at the WARNING severity. Arguments are handled in the manner of
+ // fmt.Print, a terminating newline is added if missing.
Warning(args ...interface{})
- // Warningf logs at the WARNING severity. Arguments are handled in the manner of fmt.Printf, a terminating newline
- // is added if missing.
+ // Warningf logs at the WARNING severity. Arguments are handled in the manner of
+ // fmt.Printf, a terminating newline is added if missing.
Warningf(format string, args ...interface{})
- // Error logs at the ERROR severity. Arguments are handled in the manner of fmt.Print, a terminating newline is
- // added if missing.
+ // Error logs at the ERROR severity. Arguments are handled in the manner of
+ // fmt.Print, a terminating newline is added if missing.
Error(args ...interface{})
- // Errorf logs at the ERROR severity. Arguments are handled in the manner of fmt.Printf, a terminating newline is
- // added if missing.
+ // Errorf logs at the ERROR severity. Arguments are handled in the manner of
+ // fmt.Printf, a terminating newline is added if missing.
Errorf(format string, args ...interface{})
- // Fatal logs at the FATAL severity and aborts the current program. Arguments are handled in the manner of
- // fmt.Print, a terminating newline is added if missing.
+ // Fatal logs at the FATAL severity and aborts the current program. Arguments are
+ // handled in the manner of fmt.Print, a terminating newline is added if missing.
Fatal(args ...interface{})
- // Fatalf logs at the FATAL severity and aborts the current program. Arguments are handled in the manner of
- // fmt.Printf, a terminating newline is added if missing.
+ // Fatalf logs at the FATAL severity and aborts the current program. Arguments are
+ // handled in the manner of fmt.Printf, a terminating newline is added if missing.
Fatalf(format string, args ...interface{})
- // V returns a VerboseLeveledLogger at a given verbosity level. These verbosity levels can be dynamically set and
- // unset on a package-granular level by consumers of the LeveledLogger logs. The returned value represents whether
- // logging at the given verbosity level was active at that time, and as such should not be a long-lived object
- // in programs.
- // This construct is further refered to as 'V-logs'.
+ // V returns a VerboseLeveledLogger at a given verbosity level. These verbosity
+ // levels can be dynamically set and unset on a package-granular level by consumers
+ // of the LeveledLogger logs. The returned value represents whether logging at the
+ // given verbosity level was active at that time, and as such should not be a long-
+ // lived object in programs. This construct is further refered to as 'V-logs'.
V(level VerbosityLevel) VerboseLeveledLogger
}
-// VerbosityLevel is a verbosity level defined for V-logs. This can be changed programmatically per Go package. When
-// logging at a given VerbosityLevel V, the current level must be equal or higher to V for the logs to be recorded.
-// Conversely, enabling a V-logging at a VerbosityLevel V also enables all logging at lower levels [Int32Min .. (V-1)].
+// VerbosityLevel is a verbosity level defined for V-logs. This can be changed
+// programmatically per Go package. When logging at a given VerbosityLevel V, the
+// current level must be equal or higher to V for the logs to be recorded.
+// Conversely, enabling a V-logging at a VerbosityLevel V also enables all logging
+// at lower levels [Int32Min .. (V-1)].
type VerbosityLevel int32
type VerboseLeveledLogger interface {
- // Enabled returns if this level was enabled. If not enabled, all logging into this logger will be discarded
- // immediately.
- // Thus, Enabled() can be used to check the verbosity level before performing any logging:
+ // Enabled returns if this level was enabled. If not enabled, all logging into this
+ // logger will be discarded immediately. Thus, Enabled() can be used to check the
+ // verbosity level before performing any logging:
// if l.V(3).Enabled() { l.Info("V3 is enabled") }
// or, in simple cases, the convenience function .Info can be used:
// l.V(3).Info("V3 is enabled")
- // The second form is shorter and more convenient, but more expensive, as its arguments are always evaluated.
+ // The second form is shorter and more convenient, but more expensive, as its
+ // arguments are always evaluated.
Enabled() bool
- // Info is the equivalent of a LeveledLogger's Info call, guarded by whether this VerboseLeveledLogger is enabled.
+ // Info is the equivalent of a LeveledLogger's Info call, guarded by whether this
+ // VerboseLeveledLogger is enabled.
Info(args ...interface{})
- // Infof is the equivalent of a LeveledLogger's Infof call, guarded by whether this VerboseLeveledLogger is enabled.
+ // Infof is the equivalent of a LeveledLogger's Infof call, guarded by whether this
+ // VerboseLeveledLogger is enabled.
Infof(format string, args ...interface{})
}
@@ -94,8 +100,9 @@
)
var (
- // SeverityAtLeast maps a given severity to a list of severities that at that severity or higher. In other words,
- // SeverityAtLeast[X] returns a list of severities that might be seen in a log at severity X.
+ // SeverityAtLeast maps a given severity to a list of severities that at that
+ // severity or higher. In other words, SeverityAtLeast[X] returns a list of
+ // severities that might be seen in a log at severity X.
SeverityAtLeast = map[Severity][]Severity{
INFO: {INFO, WARNING, ERROR, FATAL},
WARNING: {WARNING, ERROR, FATAL},
diff --git a/metropolis/pkg/logtree/leveled_payload.go b/metropolis/pkg/logtree/leveled_payload.go
index 0ceee4d..ed3ed7e 100644
--- a/metropolis/pkg/logtree/leveled_payload.go
+++ b/metropolis/pkg/logtree/leveled_payload.go
@@ -25,12 +25,13 @@
apb "source.monogon.dev/metropolis/proto/api"
)
-// LeveledPayload is a log entry for leveled logs (as per leveled.go). It contains the input to these calls (severity and
-// message split into newline-delimited messages) and additional metadata that would be usually seen in a text
+// LeveledPayload is a log entry for leveled logs (as per leveled.go). It contains
+// the input to these calls (severity and message split into newline-delimited
+// messages) and additional metadata that would be usually seen in a text
// representation of a leveled log entry.
type LeveledPayload struct {
- // messages is the list of messages contained in this payload. This list is built from splitting up the given message
- // from the user by newline.
+ // messages is the list of messages contained in this payload. This list is built
+ // from splitting up the given message from the user by newline.
messages []string
// timestamp is the time at which this message was emitted.
timestamp time.Time
@@ -42,10 +43,11 @@
line int
}
-// String returns a canonical representation of this payload as a single string prefixed with metadata. If the original
-// message was logged with newlines, this representation will also contain newlines, with each original message part
-// prefixed by the metadata.
-// For an alternative call that will instead return a canonical prefix and a list of lines in the message, see Strings().
+// String returns a canonical representation of this payload as a single string
+// prefixed with metadata. If the original message was logged with newlines, this
+// representation will also contain newlines, with each original message part
+// prefixed by the metadata. For an alternative call that will instead return a
+// canonical prefix and a list of lines in the message, see Strings().
func (p *LeveledPayload) String() string {
prefix, lines := p.Strings()
res := make([]string, len(p.messages))
@@ -55,9 +57,11 @@
return strings.Join(res, "\n")
}
-// Strings returns the canonical representation of this payload split into a prefix and all lines that were contained in
-// the original message. This is meant to be displayed to the user by showing the prefix before each line, concatenated
-// together - possibly in a table form with the prefixes all unified with a rowspan-like mechanism.
+// Strings returns the canonical representation of this payload split into a
+// prefix and all lines that were contained in the original message. This is
+// meant to be displayed to the user by showing the prefix before each line,
+// concatenated together - possibly in a table form with the prefixes all
+// unified with a rowspan- like mechanism.
//
// For example, this function can return:
// prefix = "I1102 17:20:06.921395 foo.go:42] "
@@ -76,7 +80,6 @@
// | :------------------|
// | : - two |
// '-----------------------------------------------------------'
-//
func (p *LeveledPayload) Strings() (prefix string, lines []string) {
_, month, day := p.timestamp.Date()
hour, minute, second := p.timestamp.Clock()
@@ -91,8 +94,8 @@
return
}
-// Message returns the inner message lines of this entry, ie. what was passed to the actual logging method, but split by
-// newlines.
+// Message returns the inner message lines of this entry, ie. what was passed to
+// the actual logging method, but split by newlines.
func (p *LeveledPayload) Messages() []string { return p.messages }
func (p *LeveledPayload) MessagesJoined() string { return strings.Join(p.messages, "\n") }
@@ -100,8 +103,8 @@
// Timestamp returns the time at which this entry was logged.
func (p *LeveledPayload) Timestamp() time.Time { return p.timestamp }
-// Location returns a string in the form of file_name:line_number that shows the origin of the log entry in the
-// program source.
+// Location returns a string in the form of file_name:line_number that shows the
+// origin of the log entry in the program source.
func (p *LeveledPayload) Location() string { return fmt.Sprintf("%s:%d", p.file, p.line) }
// Severity returns the Severity with which this entry was logged.
diff --git a/metropolis/pkg/logtree/logtree.go b/metropolis/pkg/logtree/logtree.go
index a21545f..968a5a9 100644
--- a/metropolis/pkg/logtree/logtree.go
+++ b/metropolis/pkg/logtree/logtree.go
@@ -24,12 +24,13 @@
"source.monogon.dev/metropolis/pkg/logbuffer"
)
-// LogTree is a tree-shaped logging system. For more information, see the package-level documentation.
+// LogTree is a tree-shaped logging system. For more information, see the package-
+// level documentation.
type LogTree struct {
// journal is the tree's journal, storing all log data and managing subscribers.
journal *journal
- // root is the root node of the actual tree of the log tree. The nodes contain per-DN configuration options, notably
- // the current verbosity level of that DN.
+ // root is the root node of the actual tree of the log tree. The nodes contain per-
+ // DN configuration options, notably the current verbosity level of that DN.
root *node
}
@@ -41,25 +42,29 @@
return lt
}
-// node represents a given DN as a discrete 'logger'. It implements the LeveledLogger interface for log publishing,
-// entries from which it passes over to the logtree's journal.
+// node represents a given DN as a discrete 'logger'. It implements the
+// LeveledLogger interface for log publishing, entries from which it passes over to
+// the logtree's journal.
type node struct {
// dn is the DN which this node represents (or "" if this is the root node).
dn DN
// tree is the LogTree to which this node belongs.
tree *LogTree
- // verbosity is the current verbosity level of this DN/node, affecting .V(n) LeveledLogger calls
+ // verbosity is the current verbosity level of this DN/node, affecting .V(n)
+ // LeveledLogger calls
verbosity VerbosityLevel
rawLineBuffer *logbuffer.LineBuffer
// mu guards children.
mu sync.Mutex
- // children is a map of DN-part to a children node in the logtree. A DN-part is a string representing a part of the
- // DN between the deliming dots, as returned by DN.Path.
+ // children is a map of DN-part to a children node in the logtree. A DN-part is a
+ // string representing a part of the DN between the deliming dots, as returned by
+ // DN.Path.
children map[string]*node
}
-// newNode returns a node at a given DN in the LogTree - but doesn't set up the LogTree to insert it accordingly.
+// newNode returns a node at a given DN in the LogTree - but doesn't set up the
+// LogTree to insert it accordingly.
func newNode(tree *LogTree, dn DN) *node {
n := &node{
dn: dn,
@@ -72,8 +77,8 @@
return n
}
-// nodeByDN returns the LogTree node corresponding to a given DN. If either the node or some of its parents do not
-// exist they will be created as needed.
+// nodeByDN returns the LogTree node corresponding to a given DN. If either the
+// node or some of its parents do not exist they will be created as needed.
func (l *LogTree) nodeByDN(dn DN) (*node, error) {
traversal, err := newTraversal(dn)
if err != nil {
@@ -82,22 +87,27 @@
return traversal.execute(l.root), nil
}
-// nodeTraversal represents a request to traverse the LogTree in search of a given node by DN.
+// nodeTraversal represents a request to traverse the LogTree in search of a given
+// node by DN.
type nodeTraversal struct {
// want is the DN of the node's that requested to be found.
want DN
- // current is the path already taken to find the node, in the form of DN parts. It starts out as want.Parts() and
- // progresses to become empty as the traversal continues.
+ // current is the path already taken to find the node, in the form of DN parts. It
+ // starts out as want.Parts() and progresses to become empty as the traversal
+ // continues.
current []string
- // left is the path that's still needed to be taken in order to find the node, in the form of DN parts. It starts
- // out empty and progresses to become wants.Parts() as the traversal continues.
+ // left is the path that's still needed to be taken in order to find the node, in
+ // the form of DN parts. It starts out empty and progresses to become wants.Parts()
+ // as the traversal continues.
left []string
}
-// next adjusts the traversal's current/left slices to the next element of the traversal, returns the part that's now
-// being looked for (or "" if the traveral is done) and the full DN of the element that's being looked for.
+// next adjusts the traversal's current/left slices to the next element of the
+// traversal, returns the part that's now being looked for (or "" if the traveral
+// is done) and the full DN of the element that's being looked for.
//
-// For example, a traversal of foo.bar.baz will cause .next() to return the following on each invocation:
+// For example, a traversal of foo.bar.baz will cause .next() to return the
+// following on each invocation:
// - part: foo, full: foo
// - part: bar, full: foo.bar
// - part: baz, full: foo.bar.baz
@@ -125,9 +135,10 @@
}, nil
}
-// execute the traversal in order to find the node. This can only be called once per traversal.
-// Nodes will be created within the tree until the target node is reached. Existing nodes will be reused.
-// This is effectively an idempotent way of accessing a node in the tree based on a traversal.
+// execute the traversal in order to find the node. This can only be called once
+// per traversal. Nodes will be created within the tree until the target node is
+// reached. Existing nodes will be reused. This is effectively an idempotent way of
+// accessing a node in the tree based on a traversal.
func (t *nodeTraversal) execute(n *node) *node {
cur := n
for {
diff --git a/metropolis/pkg/logtree/logtree_access.go b/metropolis/pkg/logtree/logtree_access.go
index fed202e..1babe1e 100644
--- a/metropolis/pkg/logtree/logtree_access.go
+++ b/metropolis/pkg/logtree/logtree_access.go
@@ -31,46 +31,54 @@
leveledWithMinimumSeverity Severity
}
-// WithChildren makes Read return/stream data for both a given DN and all its children.
+// WithChildren makes Read return/stream data for both a given DN and all its
+// children.
func WithChildren() LogReadOption { return LogReadOption{withChildren: true} }
-// WithStream makes Read return a stream of data. This works alongside WithBacklog to create a read-and-stream
-// construct.
+// WithStream makes Read return a stream of data. This works alongside WithBacklog
+// to create a read-and-stream construct.
func WithStream() LogReadOption { return LogReadOption{withStream: true} }
-// WithBacklog makes Read return already recorded log entries, up to count elements.
+// WithBacklog makes Read return already recorded log entries, up to count
+// elements.
func WithBacklog(count int) LogReadOption { return LogReadOption{withBacklog: count} }
-// BacklogAllAvailable makes WithBacklog return all backlogged log data that logtree possesses.
+// BacklogAllAvailable makes WithBacklog return all backlogged log data that
+// logtree possesses.
const BacklogAllAvailable int = -1
func OnlyRaw() LogReadOption { return LogReadOption{onlyRaw: true} }
func OnlyLeveled() LogReadOption { return LogReadOption{onlyLeveled: true} }
-// LeveledWithMinimumSeverity makes Read return only log entries that are at least at a given Severity. If only leveled
-// entries are needed, OnlyLeveled must be used. This is a no-op when OnlyRaw is used.
+// LeveledWithMinimumSeverity makes Read return only log entries that are at least
+// at a given Severity. If only leveled entries are needed, OnlyLeveled must be
+// used. This is a no-op when OnlyRaw is used.
func LeveledWithMinimumSeverity(s Severity) LogReadOption {
return LogReadOption{leveledWithMinimumSeverity: s}
}
-// LogReader permits reading an already existing backlog of log entries and to stream further ones.
+// LogReader permits reading an already existing backlog of log entries and to
+// stream further ones.
type LogReader struct {
- // Backlog are the log entries already logged by LogTree. This will only be set if WithBacklog has been passed to
- // Read.
+ // Backlog are the log entries already logged by LogTree. This will only be set if
+ // WithBacklog has been passed to Read.
Backlog []*LogEntry
- // Stream is a channel of new entries as received live by LogTree. This will only be set if WithStream has been
- // passed to Read. In this case, entries from this channel must be read as fast as possible by the consumer in order
- // to prevent missing entries.
+ // Stream is a channel of new entries as received live by LogTree. This will only
+ // be set if WithStream has been passed to Read. In this case, entries from this
+ // channel must be read as fast as possible by the consumer in order to prevent
+ // missing entries.
Stream <-chan *LogEntry
- // done is channel used to signal (by closing) that the log consumer is not interested in more Stream data.
+ // done is channel used to signal (by closing) that the log consumer is not
+ // interested in more Stream data.
done chan<- struct{}
- // missed is an atomic integer pointer that tells the subscriber how many messages in Stream they missed. This
- // pointer is nil if no streaming has been requested.
+ // missed is an atomic integer pointer that tells the subscriber how many messages
+ // in Stream they missed. This pointer is nil if no streaming has been requested.
missed *uint64
}
-// Missed returns the amount of entries that were missed from Stream (as the channel was not drained fast enough).
+// Missed returns the amount of entries that were missed from Stream (as the
+// channel was not drained fast enough).
func (l *LogReader) Missed() uint64 {
// No Stream.
if l.missed == nil {
@@ -79,8 +87,8 @@
return atomic.LoadUint64(l.missed)
}
-// Close closes the LogReader's Stream. This must be called once the Reader does not wish to receive streaming messages
-// anymore.
+// Close closes the LogReader's Stream. This must be called once the Reader does
+// not wish to receive streaming messages anymore.
func (l *LogReader) Close() {
if l.done != nil {
close(l.done)
@@ -91,9 +99,11 @@
ErrRawAndLeveled = errors.New("cannot return logs that are simultaneously OnlyRaw and OnlyLeveled")
)
-// Read and/or stream entries from a LogTree. The returned LogReader is influenced by the LogReadOptions passed, which
-// influence whether the Read will return existing entries, a stream, or both. In addition the options also dictate
-// whether only entries for that particular DN are returned, or for all sub-DNs as well.
+// Read and/or stream entries from a LogTree. The returned LogReader is influenced
+// by the LogReadOptions passed, which influence whether the Read will return
+// existing entries, a stream, or both. In addition the options also dictate
+// whether only entries for that particular DN are returned, or for all sub-DNs as
+// well.
func (l *LogTree) Read(dn DN, opts ...LogReadOption) (*LogReader, error) {
l.journal.mu.RLock()
defer l.journal.mu.RUnlock()
diff --git a/metropolis/pkg/logtree/logtree_entry.go b/metropolis/pkg/logtree/logtree_entry.go
index a1c2d62..442d456 100644
--- a/metropolis/pkg/logtree/logtree_entry.go
+++ b/metropolis/pkg/logtree/logtree_entry.go
@@ -24,8 +24,9 @@
apb "source.monogon.dev/metropolis/proto/api"
)
-// LogEntry contains a log entry, combining both leveled and raw logging into a single stream of events. A LogEntry
-// will contain exactly one of either LeveledPayload or RawPayload.
+// LogEntry contains a log entry, combining both leveled and raw logging into a
+// single stream of events. A LogEntry will contain exactly one of either
+// LeveledPayload or RawPayload.
type LogEntry struct {
// If non-nil, this is a leveled logging entry.
Leveled *LeveledPayload
@@ -35,10 +36,12 @@
DN DN
}
-// String returns a canonical representation of this payload as a single string prefixed with metadata. If the entry is
-// a leveled log entry that originally was logged with newlines this representation will also contain newlines, with
-// each original message part prefixed by the metadata.
-// For an alternative call that will instead return a canonical prefix and a list of lines in the message, see Strings().
+// String returns a canonical representation of this payload as a single string
+// prefixed with metadata. If the entry is a leveled log entry that originally was
+// logged with newlines this representation will also contain newlines, with each
+// original message part prefixed by the metadata. For an alternative call that
+// will instead return a canonical prefix and a list of lines in the message, see
+// Strings().
func (l *LogEntry) String() string {
if l.Leveled != nil {
prefix, messages := l.Leveled.Strings()
@@ -54,9 +57,11 @@
return "INVALID"
}
-// Strings returns the canonical representation of this payload split into a prefix and all lines that were contained in
-// the original message. This is meant to be displayed to the user by showing the prefix before each line, concatenated
-// together - possibly in a table form with the prefixes all unified with a rowspan-like mechanism.
+// Strings returns the canonical representation of this payload split into a
+// prefix and all lines that were contained in the original message. This is
+// meant to be displayed to the user by showing the prefix before each line,
+// concatenated together - possibly in a table form with the prefixes all
+// unified with a rowspan- like mechanism.
//
// For example, this function can return:
// prefix = "root.foo.bar I1102 17:20:06.921395 0 foo.go:42] "
@@ -68,14 +73,14 @@
// root.foo.bar I1102 17:20:06.921395 foo.go:42] - two
//
// Or, in a table layout:
-// .-------------------------------------------------------------------------------------.
-// | root.foo.bar I1102 17:20:06.921395 foo.go:42] : current tags: |
-// | :------------------|
-// | : - one |
-// | :------------------|
-// | : - two |
-// '-------------------------------------------------------------------------------------'
-//
+// .----------------------------------------------------------------------.
+// | root.foo.bar I1102 17:20:06.921395 foo.go:42] : current tags: |
+// | :------------------|
+// | : - one |
+// | :------------------|
+// | : - two |
+// '----------------------------------------------------------------------'
+
func (l *LogEntry) Strings() (prefix string, lines []string) {
if l.Leveled != nil {
prefix, messages := l.Leveled.Strings()
@@ -88,8 +93,8 @@
return "INVALID ", []string{"INVALID"}
}
-// Convert this LogEntry to proto. Returned value may be nil if given LogEntry is invalid, eg. contains neither a Raw
-// nor Leveled entry.
+// Convert this LogEntry to proto. Returned value may be nil if given LogEntry is
+// invalid, eg. contains neither a Raw nor Leveled entry.
func (l *LogEntry) Proto() *apb.LogEntry {
p := &apb.LogEntry{
Dn: string(l.DN),
@@ -111,8 +116,8 @@
return p
}
-// Parse a proto LogEntry back into internal structure. This can be used in log proto API consumers to easily print
-// received log entries.
+// Parse a proto LogEntry back into internal structure. This can be used in log
+// proto API consumers to easily print received log entries.
func LogEntryFromProto(l *apb.LogEntry) (*LogEntry, error) {
dn := DN(l.Dn)
if _, err := dn.Path(); err != nil {
diff --git a/metropolis/pkg/logtree/logtree_publisher.go b/metropolis/pkg/logtree/logtree_publisher.go
index d4d35ff..6106b19 100644
--- a/metropolis/pkg/logtree/logtree_publisher.go
+++ b/metropolis/pkg/logtree/logtree_publisher.go
@@ -26,8 +26,8 @@
"source.monogon.dev/metropolis/pkg/logbuffer"
)
-// LeveledFor returns a LeveledLogger publishing interface for a given DN. An error may be returned if the DN is
-// malformed.
+// LeveledFor returns a LeveledLogger publishing interface for a given DN. An error
+// may be returned if the DN is malformed.
func (l *LogTree) LeveledFor(dn DN) (LeveledLogger, error) {
return l.nodeByDN(dn)
}
@@ -40,7 +40,8 @@
return node.rawLineBuffer, nil
}
-// MustLeveledFor returns a LeveledLogger publishing interface for a given DN, or panics if the given DN is invalid.
+// MustLeveledFor returns a LeveledLogger publishing interface for a given DN, or
+// panics if the given DN is invalid.
func (l *LogTree) MustLeveledFor(dn DN) LeveledLogger {
leveled, err := l.LeveledFor(dn)
if err != nil {
@@ -57,7 +58,8 @@
return raw
}
-// SetVerbosity sets the verbosity for a given DN (non-recursively, ie. for that DN only, not its children).
+// SetVerbosity sets the verbosity for a given DN (non-recursively, ie. for that DN
+// only, not its children).
func (l *LogTree) SetVerbosity(dn DN, level VerbosityLevel) error {
node, err := l.nodeByDN(dn)
if err != nil {
@@ -67,8 +69,9 @@
return nil
}
-// logRaw is called by this node's LineBuffer any time a raw log line is completed. It will create a new entry, append
-// it to the journal, and notify all pertinent subscribers.
+// logRaw is called by this node's LineBuffer any time a raw log line is completed.
+// It will create a new entry, append it to the journal, and notify all pertinent
+// subscribers.
func (n *node) logRaw(line *logbuffer.Line) {
e := &entry{
origin: n.dn,
@@ -78,8 +81,9 @@
n.tree.journal.notify(e)
}
-// log builds a LeveledPayload and entry for a given message, including all related metadata. It will create a new
-// entry append it to the journal, and notify all pertinent subscribers.
+// log builds a LeveledPayload and entry for a given message, including all related
+// metadata. It will create a new entry append it to the journal, and notify all
+// pertinent subscribers.
func (n *node) logLeveled(depth int, severity Severity, msg string) {
_, file, line, ok := runtime.Caller(2 + depth)
if !ok {
@@ -158,9 +162,10 @@
}
}
-// verbose implements the VerboseLeveledLogger interface. It is a thin wrapper around node, with an 'enabled' bool. This
-// means that V(n)-returned VerboseLeveledLoggers must be short lived, as a changed in verbosity will not affect all
-// already existing VerboseLeveledLoggers.
+// verbose implements the VerboseLeveledLogger interface. It is a thin wrapper
+// around node, with an 'enabled' bool. This means that V(n)-returned
+// VerboseLeveledLoggers must be short lived, as a changed in verbosity will not
+// affect all already existing VerboseLeveledLoggers.
type verbose struct {
node *node
enabled bool
diff --git a/metropolis/pkg/loop/loop.go b/metropolis/pkg/loop/loop.go
index 64b533b..c338f04 100644
--- a/metropolis/pkg/loop/loop.go
+++ b/metropolis/pkg/loop/loop.go
@@ -16,10 +16,13 @@
// Package loop implements an interface to configure Linux loop devices.
//
-// This package requires Linux 5.8 or higher because it uses the newer LOOP_CONFIGURE ioctl, which is better-behaved
-// and twice as fast as the old approach. It doesn't support all of the cryptloop functionality as it has been
-// superseded by dm-crypt and has known vulnerabilities. It also doesn't support on-the-fly reconfiguration of loop
-// devices as this is rather unusual, works only under very specific circumstances and would make the API less clean.
+// This package requires Linux 5.8 or higher because it uses the newer
+// LOOP_CONFIGURE ioctl, which is better-behaved and twice as fast as the old
+// approach. It doesn't support all of the cryptloop functionality as it has
+// been superseded by dm-crypt and has known vulnerabilities. It also doesn't
+// support on-the-fly reconfiguration of loop devices as this is rather
+// unusual, works only under very specific circumstances and would make the API
+// less clean.
package loop
import (
@@ -35,7 +38,8 @@
"golang.org/x/sys/unix"
)
-// Lazily-initialized file descriptor for the control device /dev/loop-control (singleton)
+// Lazily-initialized file descriptor for the control device /dev/loop-control
+// (singleton)
var (
mutex sync.Mutex
loopControlFd *os.File
@@ -50,8 +54,10 @@
// struct loop_config from @linux//include/uapi/linux:loop.h
type loopConfig struct {
- fd uint32
- blockSize uint32 // Power of 2 between 512 and os.Getpagesize(), defaults reasonably
+ fd uint32
+ // blockSize is a power of 2 between 512 and os.Getpagesize(), defaults
+ // reasonably
+ blockSize uint32
info loopInfo64
_reserved [64]byte
}
@@ -74,14 +80,16 @@
}
type Config struct {
- // Block size of the loop device in bytes. Power of 2 between 512 and page size.
- // Zero defaults to an reasonable block size.
+ // Block size of the loop device in bytes. Power of 2 between 512 and page
+ // size. Zero defaults to an reasonable block size.
BlockSize uint32
// Combination of flags from the Flag constants in this package.
Flags uint32
- // Offset in bytes from the start of the file to the first byte of the device. Usually zero.
+ // Offset in bytes from the start of the file to the first byte of the
+ // device. Usually zero.
Offset uint64
- // Maximum size of the loop device in bytes. Zero defaults to the whole file.
+ // Maximum size of the loop device in bytes. Zero defaults to the whole
+ // file.
SizeLimit uint64
}
@@ -118,12 +126,14 @@
const (
// Makes the loop device read-only even if the backing file is read-write.
FlagReadOnly = 1
- // Unbinds the backing file as soon as the last user is gone. Useful for unbinding after unmount.
+ // Unbinds the backing file as soon as the last user is gone. Useful for
+ // unbinding after unmount.
FlagAutoclear = 4
- // Enables kernel-side partition scanning on the loop device. Needed if you want to access specific partitions on
- // a loop device.
+ // Enables kernel-side partition scanning on the loop device. Needed if you
+ // want to access specific partitions on a loop device.
FlagPartscan = 8
- // Enables direct IO for the loop device, bypassing caches and buffer copying.
+ // Enables direct IO for the loop device, bypassing caches and buffer
+ // copying.
FlagDirectIO = 16
)
@@ -169,7 +179,8 @@
}
}
-// Open opens a loop device at the given path. It returns an error if the path is not a loop device.
+// Open opens a loop device at the given path. It returns an error if the path
+// is not a loop device.
func Open(path string) (*Device, error) {
potentialDevice, err := os.Open(path)
if err != nil {
@@ -219,7 +230,8 @@
return string(backingFile), err
}
-// RefreshSize recalculates the size of the loop device based on the config and the size of the backing file.
+// RefreshSize recalculates the size of the loop device based on the config and
+// the size of the backing file.
func (d *Device) RefreshSize() error {
if err := d.ensureOpen(); err != nil {
return err
@@ -227,7 +239,8 @@
return unix.IoctlSetInt(int(d.dev.Fd()), unix.LOOP_SET_CAPACITY, 0)
}
-// Close closes all file descriptors open to the device. Does not remove the device itself or alter its configuration.
+// Close closes all file descriptors open to the device. Does not remove the
+// device itself or alter its configuration.
func (d *Device) Close() error {
if err := d.ensureOpen(); err != nil {
return err
diff --git a/metropolis/pkg/loop/loop_test.go b/metropolis/pkg/loop/loop_test.go
index 1ddb34f..16ead64 100644
--- a/metropolis/pkg/loop/loop_test.go
+++ b/metropolis/pkg/loop/loop_test.go
@@ -32,8 +32,9 @@
"golang.org/x/sys/unix"
)
-// Write a test file with a very specific pattern (increasing little-endian 16 bit unsigned integers) to detect offset
-// correctness. File is always 128KiB large (2^16 * 2 bytes).
+// Write a test file with a very specific pattern (increasing little-endian 16
+// bit unsigned integers) to detect offset correctness. File is always 128KiB
+// large (2^16 * 2 bytes).
func makeTestFile() *os.File {
f, err := ioutil.TempFile("/tmp", "")
if err != nil {
@@ -100,8 +101,9 @@
backingFile, err := dev.BackingFilePath()
assert.NoError(t, err)
- // The filename of the temporary file is not available in this context, but we know that the file
- // needs to be in /tmp, which should be a good-enough test.
+ // The filename of the temporary file is not available in this context, but
+ // we know that the file needs to be in /tmp, which should be a good-enough
+ // test.
assert.Contains(t, backingFile, "/tmp/")
}
diff --git a/metropolis/pkg/pki/ca.go b/metropolis/pkg/pki/ca.go
index bbed085..5ab1089 100644
--- a/metropolis/pkg/pki/ca.go
+++ b/metropolis/pkg/pki/ca.go
@@ -33,16 +33,19 @@
// certificates, and any other Certificate that has been created with CA(),
// which makes this Certificate act as a CA and issue (sign) ceritficates.
type Issuer interface {
- // CACertificate returns the DER-encoded x509 certificate of the CA that will sign certificates when Issue is
- // called, or nil if this is self-signing issuer.
+ // CACertificate returns the DER-encoded x509 certificate of the CA that
+ // will sign certificates when Issue is called, or nil if this is
+ // self-signing issuer.
CACertificate(ctx context.Context, kv clientv3.KV) ([]byte, error)
- // Issue will generate a key and certificate signed by the Issuer. The returned certificate is x509 DER-encoded,
- // while the key is a bare ed25519 key.
+ // Issue will generate a key and certificate signed by the Issuer. The
+ // returned certificate is x509 DER-encoded, while the key is a bare
+ // ed25519 key.
Issue(ctx context.Context, req *Certificate, kv clientv3.KV) (cert, key []byte, err error)
}
-// issueCertificate is a generic low level certificate-and-key issuance function. If ca or cakey is null, the
-// certificate will be self-signed. The returned certificate is DER-encoded, while the returned key is internal.
+// issueCertificate is a generic low level certificate-and-key issuance
+// function. If ca or cakey is null, the certificate will be self-signed. The
+// returned certificate is DER-encoded, while the returned key is internal.
func issueCertificate(req *Certificate, ca *x509.Certificate, caKey interface{}) (cert, key []byte, err error) {
var privKey ed25519.PrivateKey
var pubKey ed25519.PublicKey
@@ -75,7 +78,8 @@
req.template.BasicConstraintsValid = true
req.template.SubjectKeyId = skid
- // Set the AuthorityKeyID to the SKID of the signing certificate (or self, if self-signing).
+ // Set the AuthorityKeyID to the SKID of the signing certificate (or self,
+ // if self-signing).
if ca != nil && caKey != nil {
req.template.AuthorityKeyId = ca.AuthorityKeyId
} else {
diff --git a/metropolis/pkg/pki/certificate.go b/metropolis/pkg/pki/certificate.go
index ff60f73..c0a1f53 100644
--- a/metropolis/pkg/pki/certificate.go
+++ b/metropolis/pkg/pki/certificate.go
@@ -37,7 +37,8 @@
prefix string
}
-// Namespaced creates a namespace for storing certificate data in etcd at a given 'path' prefix.
+// Namespaced creates a namespace for storing certificate data in etcd at a
+// given 'path' prefix.
func Namespaced(prefix string) Namespace {
return Namespace{
prefix: prefix,
@@ -91,7 +92,7 @@
// Client makes a Kubernetes PKI-compatible client certificate template.
// Directly derived from Kubernetes PKI requirements documented at
-// https://kubernetes.io/docs/setup/best-practices/certificates/#configure-certificates-manually
+// https://kubernetes.io/docs/setup/best-practices/certificates/#configure-certificates-manually
func Client(identity string, groups []string) x509.Certificate {
return x509.Certificate{
Subject: pkix.Name{
diff --git a/metropolis/pkg/pki/x509.go b/metropolis/pkg/pki/x509.go
index d2affe8..e198902 100644
--- a/metropolis/pkg/pki/x509.go
+++ b/metropolis/pkg/pki/x509.go
@@ -30,12 +30,12 @@
unknownNotAfter = time.Unix(253402300799, 0)
)
-// Workaround for https://github.com/golang/go/issues/26676 in Go's crypto/x509. Specifically Go
-// violates Section 4.2.1.2 of RFC 5280 without this.
-// Fixed for 1.15 in https://go-review.googlesource.com/c/go/+/227098/.
+// Workaround for https://github.com/golang/go/issues/26676 in Go's
+// crypto/x509. Specifically Go violates Section 4.2.1.2 of RFC 5280 without
+// this. Fixed for 1.15 in https://go-review.googlesource.com/c/go/+/227098/.
//
-// Taken from https://github.com/FiloSottile/mkcert/blob/master/cert.go#L295 written by one of Go's
-// crypto engineers
+// Taken from https://github.com/FiloSottile/mkcert/blob/master/cert.go#L295
+// Written by one of Go's crypto engineers
//
// TODO(lorenz): remove this once we migrate to Go 1.15.
func calculateSKID(pubKey crypto.PublicKey) ([]byte, error) {
diff --git a/metropolis/pkg/supervisor/supervisor.go b/metropolis/pkg/supervisor/supervisor.go
index c37b590..f26732d 100644
--- a/metropolis/pkg/supervisor/supervisor.go
+++ b/metropolis/pkg/supervisor/supervisor.go
@@ -16,9 +16,10 @@
package supervisor
-// The service supervision library allows for writing of reliable, service-style software within a Metropolis node.
-// It builds upon the Erlang/OTP supervision tree system, adapted to be more Go-ish.
-// For detailed design see go/supervision.
+// The service supervision library allows for writing of reliable,
+// service-style software within a Metropolis node. It builds upon the
+// Erlang/OTP supervision tree system, adapted to be more Go-ish. For detailed
+// design see go/supervision.
import (
"context"
@@ -28,17 +29,21 @@
"source.monogon.dev/metropolis/pkg/logtree"
)
-// A Runnable is a function that will be run in a goroutine, and supervised throughout its lifetime. It can in turn
-// start more runnables as its children, and those will form part of a supervision tree.
-// The context passed to a runnable is very important and needs to be handled properly. It will be live (non-errored) as
-// long as the runnable should be running, and canceled (ctx.Err() will be non-nil) when the supervisor wants it to
-// exit. This means this context is also perfectly usable for performing any blocking operations.
+// A Runnable is a function that will be run in a goroutine, and supervised
+// throughout its lifetime. It can in turn start more runnables as its
+// children, and those will form part of a supervision tree.
+// The context passed to a runnable is very important and needs to be handled
+// properly. It will be live (non-errored) as long as the runnable should be
+// running, and canceled (ctx.Err() will be non-nil) when the supervisor wants
+// it to exit. This means this context is also perfectly usable for performing
+// any blocking operations.
type Runnable func(ctx context.Context) error
-// RunGroup starts a set of runnables as a group. These runnables will run together, and if any one of them quits
-// unexpectedly, the result will be canceled and restarted.
-// The context here must be an existing Runnable context, and the spawned runnables will run under the node that this
-// context represents.
+// RunGroup starts a set of runnables as a group. These runnables will run
+// together, and if any one of them quits unexpectedly, the result will be
+// canceled and restarted.
+// The context here must be an existing Runnable context, and the spawned
+// runnables will run under the node that this context represents.
func RunGroup(ctx context.Context, runnables map[string]Runnable) error {
node, unlock := fromContext(ctx)
defer unlock()
@@ -52,8 +57,9 @@
})
}
-// Signal tells the supervisor that the calling runnable has reached a certain state of its lifecycle. All runnables
-// should SignalHealthy when they are ready with set up, running other child runnables and are now 'serving'.
+// Signal tells the supervisor that the calling runnable has reached a certain
+// state of its lifecycle. All runnables should SignalHealthy when they are
+// ready with set up, running other child runnables and are now 'serving'.
func Signal(ctx context.Context, signal SignalType) {
node, unlock := fromContext(ctx)
defer unlock()
@@ -63,28 +69,34 @@
type SignalType int
const (
- // The runnable is healthy, done with setup, done with spawning more Runnables, and ready to serve in a loop.
- // The runnable needs to check the parent context and ensure that if that context is done, the runnable exits.
+ // The runnable is healthy, done with setup, done with spawning more
+ // Runnables, and ready to serve in a loop. The runnable needs to check
+ // the parent context and ensure that if that context is done, the runnable
+ // exits.
SignalHealthy SignalType = iota
- // The runnable is done - it does not need to run any loop. This is useful for Runnables that only set up other
- // child runnables. This runnable will be restarted if a related failure happens somewhere in the supervision tree.
+ // The runnable is done - it does not need to run any loop. This is useful
+ // for Runnables that only set up other child runnables. This runnable will
+ // be restarted if a related failure happens somewhere in the supervision
+ // tree.
SignalDone
)
-// supervisor represents and instance of the supervision system. It keeps track of a supervision tree and a request
-// channel to its internal processor goroutine.
+// supervisor represents and instance of the supervision system. It keeps track
+// of a supervision tree and a request channel to its internal processor
+// goroutine.
type supervisor struct {
// mu guards the entire state of the supervisor.
mu sync.RWMutex
- // root is the root node of the supervision tree, named 'root'. It represents the Runnable started with the
- // supervisor.New call.
+ // root is the root node of the supervision tree, named 'root'. It
+ // represents the Runnable started with the supervisor.New call.
root *node
// logtree is the main logtree exposed to runnables and used internally.
logtree *logtree.LogTree
// ilogger is the internal logger logging to "supervisor" in the logtree.
ilogger logtree.LeveledLogger
- // pReq is an interface channel to the lifecycle processor of the supervisor.
+ // pReq is an interface channel to the lifecycle processor of the
+ // supervisor.
pReq chan *processorRequest
// propagate panics, ie. don't catch them.
@@ -95,8 +107,9 @@
type SupervisorOpt func(s *supervisor)
var (
- // WithPropagatePanic prevents the Supervisor from catching panics in runnables and treating them as failures.
- // This is useful to enable for testing and local debugging.
+ // WithPropagatePanic prevents the Supervisor from catching panics in
+ // runnables and treating them as failures. This is useful to enable for
+ // testing and local debugging.
WithPropagatePanic = func(s *supervisor) {
s.propagatePanic = true
}
diff --git a/metropolis/pkg/supervisor/supervisor_node.go b/metropolis/pkg/supervisor/supervisor_node.go
index a7caf82..a3bf5e4 100644
--- a/metropolis/pkg/supervisor/supervisor_node.go
+++ b/metropolis/pkg/supervisor/supervisor_node.go
@@ -25,23 +25,28 @@
"github.com/cenkalti/backoff/v4"
)
-// node is a supervision tree node. It represents the state of a Runnable within this tree, its relation to other tree
-// elements, and contains supporting data needed to actually supervise it.
+// node is a supervision tree node. It represents the state of a Runnable
+// within this tree, its relation to other tree elements, and contains
+// supporting data needed to actually supervise it.
type node struct {
- // The name of this node. Opaque string. It's used to make up the 'dn' (distinguished name) of a node within
- // the tree. When starting a runnable inside a tree, this is where that name gets used.
+ // The name of this node. Opaque string. It's used to make up the 'dn'
+ // (distinguished name) of a node within the tree. When starting a runnable
+ // inside a tree, this is where that name gets used.
name string
runnable Runnable
// The supervisor managing this tree.
sup *supervisor
- // The parent, within the tree, of this node. If this is the root node of the tree, this is nil.
+ // The parent, within the tree, of this node. If this is the root node of
+ // the tree, this is nil.
parent *node
- // Children of this tree. This is represented by a map keyed from child node names, for easy access.
+ // Children of this tree. This is represented by a map keyed from child
+ // node names, for easy access.
children map[string]*node
- // Supervision groups. Each group is a set of names of children. Sets, and as such groups, don't overlap between
- // each other. A supervision group indicates that if any child within that group fails, all others should be
- // canceled and restarted together.
+ // Supervision groups. Each group is a set of names of children. Sets, and
+ // as such groups, don't overlap between each other. A supervision group
+ // indicates that if any child within that group fails, all others should
+ // be canceled and restarted together.
groups []map[string]bool
// The current state of the runnable in this node.
@@ -55,19 +60,21 @@
ctxC context.CancelFunc
}
-// nodeState is the state of a runnable within a node, and in a way the node itself.
-// This follows the state diagram from go/supervision.
+// nodeState is the state of a runnable within a node, and in a way the node
+// itself. This follows the state diagram from go/supervision.
type nodeState int
const (
- // A node that has just been created, and whose runnable has been started already but hasn't signaled anything yet.
+ // A node that has just been created, and whose runnable has been started
+ // already but hasn't signaled anything yet.
nodeStateNew nodeState = iota
- // A node whose runnable has signaled being healthy - this means it's ready to serve/act.
+ // A node whose runnable has signaled being healthy - this means it's ready
+ // to serve/act.
nodeStateHealthy
// A node that has unexpectedly returned or panicked.
nodeStateDead
- // A node that has declared that its done with its work and should not be restarted, unless a supervision tree
- // failure requires that.
+ // A node that has declared that its done with its work and should not be
+ // restarted, unless a supervision tree failure requires that.
nodeStateDone
// A node that has returned after being requested to cancel.
nodeStateCanceled
@@ -101,8 +108,9 @@
dnKey = contextKey("dn")
)
-// fromContext retrieves a tree node from a runnable context. It takes a lock on the tree and returns an unlock
-// function. This unlock function needs to be called once mutations on the tree/supervisor/node are done.
+// fromContext retrieves a tree node from a runnable context. It takes a lock
+// on the tree and returns an unlock function. This unlock function needs to be
+// called once mutations on the tree/supervisor/node are done.
func fromContext(ctx context.Context) (*node, func()) {
sup, ok := ctx.Value(supervisorKey).(*supervisor)
if !ok {
@@ -120,12 +128,13 @@
return sup.nodeByDN(dnParent), sup.mu.Unlock
}
-// All the following 'internal' supervisor functions must only be called with the supervisor lock taken. Getting a lock
-// via fromContext is enough.
+// All the following 'internal' supervisor functions must only be called with
+// the supervisor lock taken. Getting a lock via fromContext is enough.
-// dn returns the distinguished name of a node. This distinguished name is a period-separated, inverse-DNS-like name.
-// For instance, the runnable 'foo' within the runnable 'bar' will be called 'root.bar.foo'. The root of the tree is
-// always named, and has the dn, 'root'.
+// dn returns the distinguished name of a node. This distinguished name is a
+// period-separated, inverse-DNS-like name. For instance, the runnable 'foo'
+// within the runnable 'bar' will be called 'root.bar.foo'. The root of the
+// tree is always named, and has the dn, 'root'.
func (n *node) dn() string {
if n.parent != nil {
return fmt.Sprintf("%s.%s", n.parent.dn(), n.name)
@@ -133,8 +142,9 @@
return n.name
}
-// groupSiblings is a helper function to get all runnable group siblings of a given runnable name within this node.
-// All children are always in a group, even if that group is unary.
+// groupSiblings is a helper function to get all runnable group siblings of a
+// given runnable name within this node. All children are always in a group,
+// even if that group is unary.
func (n *node) groupSiblings(name string) map[string]bool {
for _, m := range n.groups {
if _, ok := m[name]; ok {
@@ -144,11 +154,12 @@
return nil
}
-// newNode creates a new node with a given parent. It does not register it with the parent (as that depends on group
-// placement).
+// newNode creates a new node with a given parent. It does not register it with
+// the parent (as that depends on group placement).
func newNode(name string, runnable Runnable, sup *supervisor, parent *node) *node {
- // We use exponential backoff for failed runnables, but at some point we cap at a given backoff time.
- // To achieve this, we set MaxElapsedTime to 0, which will cap the backoff at MaxInterval.
+ // We use exponential backoff for failed runnables, but at some point we
+ // cap at a given backoff time. To achieve this, we set MaxElapsedTime to
+ // 0, which will cap the backoff at MaxInterval.
bo := backoff.NewExponentialBackOff()
bo.MaxElapsedTime = 0
@@ -165,11 +176,12 @@
return n
}
-// resetNode sets up all the dynamic fields of the node, in preparation of starting a runnable. It clears the node's
-// children, groups and resets its context.
+// resetNode sets up all the dynamic fields of the node, in preparation of
+// starting a runnable. It clears the node's children, groups and resets its
+// context.
func (n *node) reset() {
- // Make new context. First, acquire parent context. For the root node that's Background, otherwise it's the
- // parent's context.
+ // Make new context. First, acquire parent context. For the root node
+ // that's Background, otherwise it's the parent's context.
var pCtx context.Context
if n.parent == nil {
pCtx = context.Background()
@@ -263,7 +275,8 @@
return nil
}
-// signal sequences state changes by signals received from runnables and updates a node's status accordingly.
+// signal sequences state changes by signals received from runnables and
+// updates a node's status accordingly.
func (n *node) signal(signal SignalType) {
switch signal {
case SignalHealthy:
diff --git a/metropolis/pkg/supervisor/supervisor_processor.go b/metropolis/pkg/supervisor/supervisor_processor.go
index 965a667..5fa759e 100644
--- a/metropolis/pkg/supervisor/supervisor_processor.go
+++ b/metropolis/pkg/supervisor/supervisor_processor.go
@@ -24,11 +24,13 @@
"time"
)
-// The processor maintains runnable goroutines - ie., when requested will start one, and then once it exists it will
-// record the result and act accordingly. It is also responsible for detecting and acting upon supervision subtrees that
-// need to be restarted after death (via a 'GC' process)
+// The processor maintains runnable goroutines - ie., when requested will start
+// one, and then once it exists it will record the result and act accordingly.
+// It is also responsible for detecting and acting upon supervision subtrees
+// that need to be restarted after death (via a 'GC' process)
-// processorRequest is a request for the processor. Only one of the fields can be set.
+// processorRequest is a request for the processor. Only one of the fields can
+// be set.
type processorRequest struct {
schedule *processorRequestSchedule
died *processorRequestDied
@@ -40,7 +42,8 @@
dn string
}
-// processorRequestDied is a signal from a runnable goroutine that the runnable has died.
+// processorRequestDied is a signal from a runnable goroutine that the runnable
+// has died.
type processorRequestDied struct {
dn string
err error
@@ -57,8 +60,10 @@
// Waiters waiting for the GC to be settled.
var waiters []chan struct{}
- // The GC will run every millisecond if needed. Any time the processor requests a change in the supervision tree
- // (ie a death or a new runnable) it will mark the state as dirty and run the GC on the next millisecond cycle.
+ // The GC will run every millisecond if needed. Any time the processor
+ // requests a change in the supervision tree (ie a death or a new runnable)
+ // it will mark the state as dirty and run the GC on the next millisecond
+ // cycle.
gc := time.NewTicker(1 * time.Millisecond)
defer gc.Stop()
clean := true
@@ -85,7 +90,8 @@
clean = true
cleanCycles += 1
- // This threshold is somewhat arbitrary. It's a balance between test speed and test reliability.
+ // This threshold is somewhat arbitrary. It's a balance between
+ // test speed and test reliability.
if cleanCycles > 50 {
for _, w := range waiters {
close(w)
@@ -109,8 +115,9 @@
}
}
-// processKill cancels all nodes in the supervision tree. This is only called right before exiting the processor, so
-// they do not get automatically restarted.
+// processKill cancels all nodes in the supervision tree. This is only called
+// right before exiting the processor, so they do not get automatically
+// restarted.
func (s *supervisor) processKill() {
s.mu.Lock()
defer s.mu.Unlock()
@@ -138,7 +145,8 @@
}
}
-// processSchedule starts a node's runnable in a goroutine and records its output once it's done.
+// processSchedule starts a node's runnable in a goroutine and records its
+// output once it's done.
func (s *supervisor) processSchedule(r *processorRequestSchedule) {
s.mu.Lock()
defer s.mu.Unlock()
@@ -169,8 +177,9 @@
}()
}
-// processDied records the result from a runnable goroutine, and updates its node state accordingly. If the result
-// is a death and not an expected exit, related nodes (ie. children and group siblings) are canceled accordingly.
+// processDied records the result from a runnable goroutine, and updates its
+// node state accordingly. If the result is a death and not an expected exit,
+// related nodes (ie. children and group siblings) are canceled accordingly.
func (s *supervisor) processDied(r *processorRequestDied) {
s.mu.Lock()
defer s.mu.Unlock()
@@ -195,14 +204,16 @@
break
}
- // Simple case: the context was canceled and the returned error is the context error.
+ // Simple case: the context was canceled and the returned error is the
+ // context error.
if err := ctx.Err(); err != nil && perr == err {
// Mark the node as canceled successfully.
n.state = nodeStateCanceled
return
}
- // Otherwise, the Runnable should not have died or quit. Handle accordingly.
+ // Otherwise, the Runnable should not have died or quit. Handle
+ // accordingly.
err := r.err
// A lack of returned error is also an error.
if err == nil {
@@ -225,27 +236,33 @@
continue
}
sibling := n.parent.children[name]
- // TODO(q3k): does this need to run in a goroutine, ie. can a context cancel block?
+ // TODO(q3k): does this need to run in a goroutine, ie. can a
+ // context cancel block?
sibling.ctxC()
}
}
}
-// processGC runs the GC process. It's not really Garbage Collection, as in, it doesn't remove unnecessary tree nodes -
-// but it does find nodes that need to be restarted, find the subset that can and then schedules them for running.
-// As such, it's less of a Garbage Collector and more of a Necromancer. However, GC is a friendlier name.
+// processGC runs the GC process. It's not really Garbage Collection, as in, it
+// doesn't remove unnecessary tree nodes - but it does find nodes that need to
+// be restarted, find the subset that can and then schedules them for running.
+// As such, it's less of a Garbage Collector and more of a Necromancer.
+// However, GC is a friendlier name.
func (s *supervisor) processGC() {
s.mu.Lock()
defer s.mu.Unlock()
- // The 'GC' serves is the main business logic of the supervision tree. It traverses a locked tree and tries to
- // find subtrees that must be restarted (because of a DEAD/CANCELED runnable). It then finds which of these
- // subtrees that should be restarted can be restarted, ie. which ones are fully recursively DEAD/CANCELED. It
- // also finds the smallest set of largest subtrees that can be restarted, ie. if there's multiple DEAD runnables
- // that can be restarted at once, it will do so.
+ // The 'GC' serves is the main business logic of the supervision tree. It
+ // traverses a locked tree and tries to find subtrees that must be
+ // restarted (because of a DEAD/CANCELED runnable). It then finds which of
+ // these subtrees that should be restarted can be restarted, ie. which ones
+ // are fully recursively DEAD/CANCELED. It also finds the smallest set of
+ // largest subtrees that can be restarted, ie. if there's multiple DEAD
+ // runnables that can be restarted at once, it will do so.
// Phase one: Find all leaves.
- // This is a simple DFS that finds all the leaves of the tree, ie all nodes that do not have children nodes.
+ // This is a simple DFS that finds all the leaves of the tree, ie all nodes
+ // that do not have children nodes.
leaves := make(map[string]bool)
queue := []*node{s.root}
for {
@@ -264,14 +281,17 @@
}
}
- // Phase two: traverse tree from node to root and make note of all subtrees that can be restarted.
- // A subtree is restartable/ready iff every node in that subtree is either CANCELED, DEAD or DONE.
- // Such a 'ready' subtree can be restarted by the supervisor if needed.
+ // Phase two: traverse tree from node to root and make note of all subtrees
+ // that can be restarted.
+ // A subtree is restartable/ready iff every node in that subtree is either
+ // CANCELED, DEAD or DONE. Such a 'ready' subtree can be restarted by the
+ // supervisor if needed.
// DNs that we already visited.
visited := make(map[string]bool)
// DNs whose subtrees are ready to be restarted.
- // These are all subtrees recursively - ie., root.a.a and root.a will both be marked here.
+ // These are all subtrees recursively - ie., root.a.a and root.a will both
+ // be marked here.
ready := make(map[string]bool)
// We build a queue of nodes to visit, starting from the leaves.
@@ -299,17 +319,20 @@
}
}
- // If no decision about children is available, it means we ended up in this subtree through some shorter path
- // of a shorter/lower-order leaf. There is a path to a leaf that's longer than the one that caused this node
- // to be enqueued. Easy solution: just push back the current element and retry later.
+ // If no decision about children is available, it means we ended up in
+ // this subtree through some shorter path of a shorter/lower-order
+ // leaf. There is a path to a leaf that's longer than the one that
+ // caused this node to be enqueued. Easy solution: just push back the
+ // current element and retry later.
if !allVisited {
// Push back to queue and wait for a decision later.
queue = append(queue, cur)
continue
}
- // All children have been visited and we have an idea about whether they're ready/restartable. All of the node's
- // children must be restartable in order for this node to be restartable.
+ // All children have been visited and we have an idea about whether
+ // they're ready/restartable. All of the node's children must be
+ // restartable in order for this node to be restartable.
childrenReady := true
for _, c := range cur.children {
if !ready[c.dn()] {
@@ -318,7 +341,8 @@
}
}
- // In addition to children, the node itself must be restartable (ie. DONE, DEAD or CANCELED).
+ // In addition to children, the node itself must be restartable (ie.
+ // DONE, DEAD or CANCELED).
curReady := false
switch cur.state {
case nodeStateDone:
@@ -329,7 +353,8 @@
curReady = true
}
- // Note down that we have an opinion on this node, and note that opinion down.
+ // Note down that we have an opinion on this node, and note that
+ // opinion down.
visited[curDn] = true
ready[curDn] = childrenReady && curReady
@@ -339,15 +364,17 @@
}
}
- // Phase 3: traverse tree from root to find largest subtrees that need to be restarted and are ready to be
- // restarted.
+ // Phase 3: traverse tree from root to find largest subtrees that need to
+ // be restarted and are ready to be restarted.
// All DNs that need to be restarted by the GC process.
want := make(map[string]bool)
- // All DNs that need to be restarted and can be restarted by the GC process - a subset of 'want' DNs.
+ // All DNs that need to be restarted and can be restarted by the GC process
+ // - a subset of 'want' DNs.
can := make(map[string]bool)
- // The set difference between 'want' and 'can' are all nodes that should be restarted but can't yet (ie. because
- // a child is still in the process of being canceled).
+ // The set difference between 'want' and 'can' are all nodes that should be
+ // restarted but can't yet (ie. because a child is still in the process of
+ // being canceled).
// DFS from root.
queue = []*node{s.root}
@@ -366,14 +393,16 @@
// If it should be restarted and is ready to be restarted...
if want[cur.dn()] && ready[cur.dn()] {
- // And its parent context is valid (ie hasn't been canceled), mark it as restartable.
+ // And its parent context is valid (ie hasn't been canceled), mark
+ // it as restartable.
if cur.parent == nil || cur.parent.ctx.Err() == nil {
can[cur.dn()] = true
continue
}
}
- // Otherwise, traverse further down the tree to see if something else needs to be done.
+ // Otherwise, traverse further down the tree to see if something else
+ // needs to be done.
for _, c := range cur.children {
queue = append(queue, c)
}
@@ -383,13 +412,15 @@
for dn, _ := range can {
n := s.nodeByDN(dn)
- // Only back off when the node unexpectedly died - not when it got canceled.
+ // Only back off when the node unexpectedly died - not when it got
+ // canceled.
bo := time.Duration(0)
if n.state == nodeStateDead {
bo = n.bo.NextBackOff()
}
- // Prepare node for rescheduling - remove its children, reset its state to new.
+ // Prepare node for rescheduling - remove its children, reset its state
+ // to new.
n.reset()
s.ilogger.Infof("rescheduling supervised node %s with backoff %s", dn, bo.String())
diff --git a/metropolis/pkg/supervisor/supervisor_support.go b/metropolis/pkg/supervisor/supervisor_support.go
index c2b569c..5f69104 100644
--- a/metropolis/pkg/supervisor/supervisor_support.go
+++ b/metropolis/pkg/supervisor/supervisor_support.go
@@ -16,21 +16,26 @@
package supervisor
-// Supporting infrastructure to allow running some non-Go payloads under supervision.
+// Supporting infrastructure to allow running some non-Go payloads under
+// supervision.
import (
"context"
"net"
"os/exec"
+
"source.monogon.dev/metropolis/pkg/logtree"
"google.golang.org/grpc"
)
-// GRPCServer creates a Runnable that serves gRPC requests as longs as it's not canceled.
-// If graceful is set to true, the server will be gracefully stopped instead of plain stopped. This means all pending
-// RPCs will finish, but also requires streaming gRPC handlers to check their context liveliness and exit accordingly.
-// If the server code does not support this, `graceful` should be false and the server will be killed violently instead.
+// GRPCServer creates a Runnable that serves gRPC requests as longs as it's not
+// canceled.
+// If graceful is set to true, the server will be gracefully stopped instead of
+// plain stopped. This means all pending RPCs will finish, but also requires
+// streaming gRPC handlers to check their context liveliness and exit
+// accordingly. If the server code does not support this, `graceful` should be
+// false and the server will be killed violently instead.
func GRPCServer(srv *grpc.Server, lis net.Listener, graceful bool) Runnable {
return func(ctx context.Context) error {
Signal(ctx, SignalHealthy)
@@ -52,7 +57,8 @@
}
}
-// RunCommand will create a Runnable that starts a long-running command, whose exit is determined to be a failure.
+// RunCommand will create a Runnable that starts a long-running command, whose
+// exit is determined to be a failure.
func RunCommand(ctx context.Context, cmd *exec.Cmd, opts ...RunCommandOption) error {
Signal(ctx, SignalHealthy)
@@ -86,8 +92,8 @@
parseKlog bool
}
-// ParseKLog signals that the command being run will return klog-compatible logs
-// to stdout and/or stderr, and these will be re-interpreted as structured
+// ParseKLog signals that the command being run will return klog-compatible
+// logs to stdout and/or stderr, and these will be re-interpreted as structured
// logging and emitted to the supervisor's logger.
func ParseKLog() RunCommandOption {
return RunCommandOption{
diff --git a/metropolis/pkg/supervisor/supervisor_test.go b/metropolis/pkg/supervisor/supervisor_test.go
index 9c7bdb7..db84163 100644
--- a/metropolis/pkg/supervisor/supervisor_test.go
+++ b/metropolis/pkg/supervisor/supervisor_test.go
@@ -76,7 +76,8 @@
}
}
-// rc is a Remote Controlled runnable. It is a generic runnable used for testing the supervisor.
+// rc is a Remote Controlled runnable. It is a generic runnable used for
+// testing the supervisor.
type rc struct {
req chan rcRunnableRequest
}
@@ -129,7 +130,8 @@
}
func (r *rc) waitState(s rcRunnableState) {
- // This is poll based. Making it non-poll based would make the RC runnable logic a bit more complex for little gain.
+ // This is poll based. Making it non-poll based would make the RC runnable
+ // logic a bit more complex for little gain.
for {
got := r.state()
if got == s {
@@ -384,12 +386,14 @@
}, WithPropagatePanic)
one.becomeHealthy()
- // Die a bunch of times in a row, this brings up the next exponential backoff to over a second.
+ // Die a bunch of times in a row, this brings up the next exponential
+ // backoff to over a second.
for i := 0; i < 4; i += 1 {
one.die()
one.waitState(rcRunnableStateNew)
}
- // Measure how long it takes for the runnable to respawn after a number of failures
+ // Measure how long it takes for the runnable to respawn after a number of
+ // failures
start := time.Now()
one.die()
one.becomeHealthy()
@@ -411,8 +415,9 @@
}
}
-// TestResilience throws some curveballs at the supervisor - either programming errors or high load. It then ensures
-// that another runnable is running, and that it restarts on its sibling failure.
+// TestResilience throws some curveballs at the supervisor - either programming
+// errors or high load. It then ensures that another runnable is running, and
+// that it restarts on its sibling failure.
func TestResilience(t *testing.T) {
// request/response channel for testing liveness of the 'one' runnable
req := make(chan chan struct{})
@@ -443,7 +448,8 @@
timeout.Stop()
}
- // A nasty runnable that calls Signal with the wrong context (this is a programming error)
+ // A nasty runnable that calls Signal with the wrong context (this is a
+ // programming error)
two := func(ctx context.Context) error {
Signal(context.TODO(), SignalHealthy)
return nil
diff --git a/metropolis/pkg/tpm/credactivation_compat.go b/metropolis/pkg/tpm/credactivation_compat.go
index 039f8d5..a6710ae 100644
--- a/metropolis/pkg/tpm/credactivation_compat.go
+++ b/metropolis/pkg/tpm/credactivation_compat.go
@@ -16,13 +16,16 @@
package tpm
-// This file is adapted from github.com/google/go-tpm/tpm2/credactivation which outputs broken
-// challenges for unknown reasons. They use u16 length-delimited outputs for the challenge blobs
-// which is incorrect. Rather than rewriting the routine, we only applied minimal fixes to it
-// and skip the ECC part of the issue (because we would rather trust the proprietary RSA implementation).
+// This file is adapted from github.com/google/go-tpm/tpm2/credactivation which
+// outputs broken challenges for unknown reasons. They use u16 length-delimited
+// outputs for the challenge blobs which is incorrect. Rather than rewriting
+// the routine, we only applied minimal fixes to it and skip the ECC part of
+// the issue (because we would rather trust the proprietary RSA
+// implementation).
//
-// TODO(lorenz): I'll eventually deal with this upstream, but for now just fix it here (it's not that)
-// much code after all (https://github.com/google/go-tpm/issues/121)
+// TODO(lorenz): I'll eventually deal with this upstream, but for now just fix
+// it here (it's not that) much code after all.
+// https://github.com/google/go-tpm/issues/121
import (
"crypto/aes"
@@ -48,7 +51,8 @@
return nil, nil, err
}
- // The seed length should match the keysize used by the EKs symmetric cipher.
+ // The seed length should match the keysize used by the EKs symmetric
+ // cipher.
// For typical RSA EKs, this will be 128 bits (16 bytes).
// Spec: TCG 2.0 EK Credential Profile revision 14, section 2.1.5.1.
seed := make([]byte, symBlockSize)
@@ -64,8 +68,8 @@
return nil, nil, fmt.Errorf("generating encrypted seed: %v", err)
}
- // Generate the encrypted credential by convolving the seed with the digest of
- // the AIK, and using the result as the key to encrypt the secret.
+ // Generate the encrypted credential by convolving the seed with the digest
+ // of the AIK, and using the result as the key to encrypt the secret.
// See section 24.4 of TPM 2.0 specification, part 1.
aikNameEncoded, err := aik.Encode()
if err != nil {
diff --git a/metropolis/pkg/tpm/eventlog/eventlog.go b/metropolis/pkg/tpm/eventlog/eventlog.go
index 49a8a26..8367935 100644
--- a/metropolis/pkg/tpm/eventlog/eventlog.go
+++ b/metropolis/pkg/tpm/eventlog/eventlog.go
@@ -14,7 +14,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// Taken and pruned from go-attestation revision 2453c8f39a4ff46009f6a9db6fb7c6cca789d9a1 under Apache 2.0
+// Taken and pruned from go-attestation revision
+// 2453c8f39a4ff46009f6a9db6fb7c6cca789d9a1 under Apache 2.0
package eventlog
@@ -98,7 +99,7 @@
// TPM algorithms. See the TPM 2.0 specification section 6.3.
//
-// https://trustedcomputinggroup.org/wp-content/uploads/TPM-Rev-2.0-Part-2-Structures-01.38.pdf#page=42
+// https://trustedcomputinggroup.org/wp-content/uploads/TPM-Rev-2.0-Part-2-Structures-01.38.pdf#page=42
const (
algSHA1 uint16 = 0x0004
algSHA256 uint16 = 0x000B
@@ -391,11 +392,11 @@
return verifiedEvents, nil
}
-// EV_NO_ACTION is a special event type that indicates information to the parser
-// instead of holding a measurement. For TPM 2.0, this event type is used to signal
-// switching from SHA1 format to a variable length digest.
+// EV_NO_ACTION is a special event type that indicates information to the
+// parser instead of holding a measurement. For TPM 2.0, this event type is
+// used to signal switching from SHA1 format to a variable length digest.
//
-// https://trustedcomputinggroup.org/wp-content/uploads/TCG_PCClientSpecPlat_TPM_2p0_1p04_pub.pdf#page=110
+// https://trustedcomputinggroup.org/wp-content/uploads/TCG_PCClientSpecPlat_TPM_2p0_1p04_pub.pdf#page=110
const eventTypeNoAction = 0x03
// ParseEventLog parses an unverified measurement log.
@@ -457,7 +458,7 @@
}
// Expected values for various Spec ID Event fields.
-// https://trustedcomputinggroup.org/wp-content/uploads/EFI-Protocol-Specification-rev13-160330final.pdf#page=19
+// https://trustedcomputinggroup.org/wp-content/uploads/EFI-Protocol-Specification-rev13-160330final.pdf#page=19
var wantSignature = [16]byte{0x53, 0x70,
0x65, 0x63, 0x20, 0x49,
0x44, 0x20, 0x45, 0x76,
@@ -471,8 +472,7 @@
)
// parseSpecIDEvent parses a TCG_EfiSpecIDEventStruct structure from the reader.
-//
-// https://trustedcomputinggroup.org/wp-content/uploads/EFI-Protocol-Specification-rev13-160330final.pdf#page=18
+// https://trustedcomputinggroup.org/wp-content/uploads/EFI-Protocol-Specification-rev13-160330final.pdf#page=18
func parseSpecIDEvent(b []byte) (*specIDEvent, error) {
r := bytes.NewReader(b)
var header struct {
@@ -535,7 +535,7 @@
}
// TPM 1.2 event log format. See "5.1 SHA1 Event Log Entry Format"
-// https://trustedcomputinggroup.org/wp-content/uploads/EFI-Protocol-Specification-rev13-160330final.pdf#page=15
+// https://trustedcomputinggroup.org/wp-content/uploads/EFI-Protocol-Specification-rev13-160330final.pdf#page=15
type rawEventHeader struct {
PCRIndex uint32
Type uint32
@@ -580,7 +580,7 @@
}
// TPM 2.0 event log format. See "5.2 Crypto Agile Log Entry Format"
-// https://trustedcomputinggroup.org/wp-content/uploads/EFI-Protocol-Specification-rev13-160330final.pdf#page=15
+// https://trustedcomputinggroup.org/wp-content/uploads/EFI-Protocol-Specification-rev13-160330final.pdf#page=15
type rawEvent2Header struct {
PCRIndex uint32
Type uint32
diff --git a/metropolis/pkg/tpm/eventlog/internal/events.go b/metropolis/pkg/tpm/eventlog/internal/events.go
index d9b933b..f41ed1c 100644
--- a/metropolis/pkg/tpm/eventlog/internal/events.go
+++ b/metropolis/pkg/tpm/eventlog/internal/events.go
@@ -190,10 +190,10 @@
VariableData []byte // []int8
}
-// ParseUEFIVariableData parses the data section of an event structured as
-// a UEFI variable.
+// ParseUEFIVariableData parses the data section of an event structured as a
+// UEFI variable.
//
-// https://trustedcomputinggroup.org/wp-content/uploads/TCG_PCClient_Specific_Platform_Profile_for_TPM_2p0_1p04_PUBLIC.pdf#page=100
+// https://trustedcomputinggroup.org/wp-content/uploads/TCG_PCClient_Specific_Platform_Profile_for_TPM_2p0_1p04_PUBLIC.pdf#page=100
func ParseUEFIVariableData(r io.Reader) (ret UEFIVariableData, err error) {
err = binary.Read(r, binary.LittleEndian, &ret.Header)
if err != nil {
@@ -244,15 +244,16 @@
return UEFIVariableAuthority{Certs: certs}, err
}
-// efiSignatureData represents the EFI_SIGNATURE_DATA type.
-// See section "31.4.1 Signature Database" in the specification for more information.
+// efiSignatureData represents the EFI_SIGNATURE_DATA type. See section
+// "31.4.1 Signature Database" in the specification for more information.
type efiSignatureData struct {
SignatureOwner efiGUID
SignatureData []byte // []int8
}
// efiSignatureList represents the EFI_SIGNATURE_LIST type.
-// See section "31.4.1 Signature Database" in the specification for more information.
+// See section "31.4.1 Signature Database" in the specification for more
+// information.
type efiSignatureListHeader struct {
SignatureType efiGUID
SignatureListSize uint32
diff --git a/metropolis/pkg/tpm/tpm.go b/metropolis/pkg/tpm/tpm.go
index e650ff9..ab02dd3 100644
--- a/metropolis/pkg/tpm/tpm.go
+++ b/metropolis/pkg/tpm/tpm.go
@@ -45,28 +45,32 @@
)
var (
- // SecureBootPCRs are all PCRs that measure the current Secure Boot configuration.
- // This is what we want if we rely on secure boot to verify boot integrity. The firmware
- // hashes the secure boot policy and custom keys into the PCR.
+ // SecureBootPCRs are all PCRs that measure the current Secure Boot
+ // configuration. This is what we want if we rely on secure boot to verify
+ // boot integrity. The firmware hashes the secure boot policy and custom
+ // keys into the PCR.
//
// This requires an extra step that provisions the custom keys.
//
// Some background: https://mjg59.dreamwidth.org/48897.html?thread=1847297
- // (the initramfs issue mentioned in the article has been solved by integrating
- // it into the kernel binary, and we don't have a shim bootloader)
+ // (the initramfs issue mentioned in the article has been solved by
+ // integrating it into the kernel binary, and we don't have a shim
+ // bootloader)
//
- // PCR7 alone is not sufficient - it needs to be combined with firmware measurements.
+ // PCR7 alone is not sufficient - it needs to be combined with firmware
+ // measurements.
SecureBootPCRs = []int{7}
- // FirmwarePCRs are alle PCRs that contain the firmware measurements
- // See https://trustedcomputinggroup.org/wp-content/uploads/TCG_EFI_Platform_1_22_Final_-v15.pdf
+ // FirmwarePCRs are alle PCRs that contain the firmware measurements. See:
+ // https://trustedcomputinggroup.org/wp-content/uploads/TCG_EFI_Platform_1_22_Final_-v15.pdf
FirmwarePCRs = []int{
0, // platform firmware
2, // option ROM code
3, // option ROM configuration and data
}
- // FullSystemPCRs are all PCRs that contain any measurements up to the currently running EFI payload.
+ // FullSystemPCRs are all PCRs that contain any measurements up to the
+ // currently running EFI payload.
FullSystemPCRs = []int{
0, // platform firmware
1, // host platform configuration
@@ -75,21 +79,25 @@
4, // EFI payload
}
- // Using FullSystemPCRs is the most secure, but also the most brittle option since updating the EFI
- // binary, updating the platform firmware, changing platform settings or updating the binary
- // would invalidate the sealed data. It's annoying (but possible) to predict values for PCR4,
- // and even more annoying for the firmware PCR (comparison to known values on similar hardware
- // is the only thing that comes to mind).
+ // Using FullSystemPCRs is the most secure, but also the most brittle
+ // option since updating the EFI binary, updating the platform firmware,
+ // changing platform settings or updating the binary would invalidate the
+ // sealed data. It's annoying (but possible) to predict values for PCR4,
+ // and even more annoying for the firmware PCR (comparison to known values
+ // on similar hardware is the only thing that comes to mind).
//
- // See also: https://github.com/mxre/sealkey (generates PCR4 from EFI image, BSD license)
+ // See also: https://github.com/mxre/sealkey (generates PCR4 from EFI
+ // image, BSD license)
//
- // Using only SecureBootPCRs is the easiest and still reasonably secure, if we assume that the
- // platform knows how to take care of itself (i.e. Intel Boot Guard), and that secure boot
- // is implemented properly. It is, however, a much larger amount of code we need to trust.
+ // Using only SecureBootPCRs is the easiest and still reasonably secure, if
+ // we assume that the platform knows how to take care of itself (i.e. Intel
+ // Boot Guard), and that secure boot is implemented properly. It is,
+ // however, a much larger amount of code we need to trust.
//
- // We do not care about PCR 5 (GPT partition table) since modifying it is harmless. All of
- // the boot options and cmdline are hardcoded in the kernel image, and we use no bootloader,
- // so there's no PCR for bootloader configuration or kernel cmdline.
+ // We do not care about PCR 5 (GPT partition table) since modifying it is
+ // harmless. All of the boot options and cmdline are hardcoded in the
+ // kernel image, and we use no bootloader, so there's no PCR for bootloader
+ // configuration or kernel cmdline.
)
var (
@@ -102,15 +110,17 @@
var (
// ErrNotExists is returned when no TPMs are available in the system
ErrNotExists = errors.New("no TPMs found")
- // ErrNotInitialized is returned when this package was not initialized successfully
+ // ErrNotInitialized is returned when this package was not initialized
+ // successfully
ErrNotInitialized = errors.New("no TPM was initialized")
)
// Singleton since the TPM is too
var tpm *TPM
-// We're serializing all TPM operations since it has a limited number of handles and recovering
-// if it runs out is difficult to implement correctly. Might also be marginally more secure.
+// We're serializing all TPM operations since it has a limited number of
+// handles and recovering if it runs out is difficult to implement correctly.
+// Might also be marginally more secure.
var lock sync.Mutex
// TPM represents a high-level interface to a connected TPM 2.0
@@ -118,13 +128,14 @@
logger logtree.LeveledLogger
device io.ReadWriteCloser
- // We keep the AK loaded since it's used fairly often and deriving it is expensive
+ // We keep the AK loaded since it's used fairly often and deriving it is
+ // expensive
akHandleCache tpmutil.Handle
akPublicKey crypto.PublicKey
}
-// Initialize finds and opens the TPM (if any). If there is no TPM available it returns
-// ErrNotExists
+// Initialize finds and opens the TPM (if any). If there is no TPM available it
+// returns ErrNotExists
func Initialize(logger logtree.LeveledLogger) error {
lock.Lock()
defer lock.Unlock()
@@ -170,7 +181,8 @@
return nil
}
-// GenerateSafeKey uses two sources of randomness (Kernel & TPM) to generate the key
+// GenerateSafeKey uses two sources of randomness (Kernel & TPM) to generate
+// the key
func GenerateSafeKey(size uint16) ([]byte, error) {
lock.Lock()
defer lock.Unlock()
@@ -204,8 +216,8 @@
return encryptionKey, nil
}
-// Seal seals sensitive data and only allows access if the current platform configuration in
-// matches the one the data was sealed on.
+// Seal seals sensitive data and only allows access if the current platform
+// configuration in matches the one the data was sealed on.
func Seal(data []byte, pcrs []int) ([]byte, error) {
lock.Lock()
defer lock.Unlock()
@@ -225,8 +237,8 @@
return sealedKeyRaw, nil
}
-// Unseal unseals sensitive data if the current platform configuration allows and sealing constraints
-// allow it.
+// Unseal unseals sensitive data if the current platform configuration allows
+// and sealing constraints allow it.
func Unseal(data []byte) ([]byte, error) {
lock.Lock()
defer lock.Unlock()
@@ -256,7 +268,8 @@
return unsealedData, nil
}
-// Standard AK template for RSA2048 non-duplicatable restricted signing for attestation
+// Standard AK template for RSA2048 non-duplicatable restricted signing for
+// attestation
var akTemplate = tpm2.Public{
Type: tpm2.AlgRSA,
NameAlg: tpm2.AlgSHA256,
@@ -272,11 +285,12 @@
func loadAK() error {
var err error
- // Rationale: The AK is an EK-equivalent key and used only for attestation. Using a non-primary
- // key here would require us to store the wrapped version somewhere, which is inconvenient.
- // This being a primary key in the Endorsement hierarchy means that it can always be recreated
- // and can never be "destroyed". Under our security model this is of no concern since we identify
- // a node by its IK (Identity Key) which we can destroy.
+ // Rationale: The AK is an EK-equivalent key and used only for attestation.
+ // Using a non-primary key here would require us to store the wrapped
+ // version somewhere, which is inconvenient. This being a primary key in
+ // the Endorsement hierarchy means that it can always be recreated and can
+ // never be "destroyed". Under our security model this is of no concern
+ // since we identify a node by its IK (Identity Key) which we can destroy.
tpm.akHandleCache, tpm.akPublicKey, err = tpm2.CreatePrimary(tpm.device, tpm2.HandleEndorsement,
tpm2.PCRSelection{}, "", "", akTemplate)
return err
@@ -284,12 +298,14 @@
// Process documented in TCG EK Credential Profile 2.2.1
func loadEK() (tpmutil.Handle, crypto.PublicKey, error) {
- // The EK is a primary key which is supposed to be certified by the manufacturer of the TPM.
- // Its public attributes are standardized in TCG EK Credential Profile 2.0 Table 1. These need
- // to match exactly or we aren't getting the key the manufacturere signed. tpm2tools contains
- // such a template already, so we're using that instead of redoing it ourselves.
- // This ignores the more complicated ways EKs can be specified, the additional stuff you can do
- // is just absolutely crazy (see 2.2.1.2 onward)
+ // The EK is a primary key which is supposed to be certified by the
+ // manufacturer of the TPM. Its public attributes are standardized in TCG
+ // EK Credential Profile 2.0 Table 1. These need to match exactly or we
+ // aren't getting the key the manufacturere signed. tpm2tools contains such
+ // a template already, so we're using that instead of redoing it ourselves.
+ // This ignores the more complicated ways EKs can be specified, the
+ // additional stuff you can do is just absolutely crazy (see 2.2.1.2
+ // onward)
return tpm2.CreatePrimary(tpm.device, tpm2.HandleEndorsement,
tpm2.PCRSelection{}, "", "", tpm2tools.DefaultEKTemplateRSA())
}
@@ -313,10 +329,11 @@
return public.Encode()
}
-// TCG TPM v2.0 Provisioning Guidance v1.0 7.8 Table 2 and
-// TCG EK Credential Profile v2.1 2.2.1.4 de-facto Standard for Windows
-// These are both non-normative and reference Windows 10 documentation that's no longer available :(
-// But in practice this is what people are using, so if it's normative or not doesn't really matter
+// TCG TPM v2.0 Provisioning Guidance v1.0 7.8 Table 2 and TCG EK Credential
+// Profile v2.1 2.2.1.4 de-facto Standard for Windows These are both
+// non-normative and reference Windows 10 documentation that's no longer
+// available :( But in practice this is what people are using, so if it's
+// normative or not doesn't really matter
const ekCertHandle = 0x01c00002
// GetEKPublic gets the public key and (if available) Certificate of the EK
@@ -345,7 +362,8 @@
return publicKey, ekCertRaw, nil
}
-// MakeAKChallenge generates a challenge for TPM residency and attributes of the AK
+// MakeAKChallenge generates a challenge for TPM residency and attributes of
+// the AK
func MakeAKChallenge(ekPubKey, akPub []byte, nonce []byte) ([]byte, []byte, error) {
ekPubKeyData, err := x509.ParsePKIXPublicKey(ekPubKey)
if err != nil {
@@ -385,12 +403,14 @@
}
defer tpm2.FlushContext(tpm.device, ekHandle)
- // This is necessary since the EK requires an endorsement handle policy in its session
- // For us this is stupid because we keep all hierarchies open anyways since a) we cannot safely
- // store secrets on the OS side pre-global unlock and b) it makes no sense in this security model
- // since an uncompromised host OS will not let an untrusted entity attest as itself and a
- // compromised OS can either not pass PCR policy checks or the game's already over (you
- // successfully runtime-exploited a production Metropolis node)
+ // This is necessary since the EK requires an endorsement handle policy in
+ // its session. For us this is stupid because we keep all hierarchies open
+ // anyways since a) we cannot safely store secrets on the OS side
+ // pre-global unlock and b) it makes no sense in this security model since
+ // an uncompromised host OS will not let an untrusted entity attest as
+ // itself and a compromised OS can either not pass PCR policy checks or the
+ // game's already over (you successfully runtime-exploited a production
+ // Metropolis node).
endorsementSession, _, err := tpm2.StartAuthSession(
tpm.device,
tpm2.HandleNull,
@@ -412,8 +432,10 @@
for {
solution, err := tpm2.ActivateCredentialUsingAuth(tpm.device, []tpm2.AuthCommand{
- {Session: tpm2.HandlePasswordSession, Attributes: tpm2.AttrContinueSession}, // Use standard no-password authentication
- {Session: endorsementSession, Attributes: tpm2.AttrContinueSession}, // Use a full policy session for the EK
+ // Use standard no-password authenatication
+ {Session: tpm2.HandlePasswordSession, Attributes: tpm2.AttrContinueSession},
+ // Use a full policy session for the EK
+ {Session: endorsementSession, Attributes: tpm2.AttrContinueSession},
}, tpm.akHandleCache, ekHandle, credBlob, secretChallenge)
if warn, ok := err.(tpm2.Warning); ok && warn.Code == tpm2.RCRetry {
time.Sleep(100 * time.Millisecond)
@@ -445,7 +467,8 @@
return nil
}
-// AttestPlatform performs a PCR quote using the AK and returns the quote and its signature
+// AttestPlatform performs a PCR quote using the AK and returns the quote and
+// its signature
func AttestPlatform(nonce []byte) ([]byte, []byte, error) {
lock.Lock()
defer lock.Unlock()
@@ -457,9 +480,9 @@
return []byte{}, []byte{}, fmt.Errorf("failed to load AK primary key: %w", err)
}
}
- // We only care about SHA256 since SHA1 is weak. This is supported on at least GCE and
- // Intel / AMD fTPM, which is good enough for now. Alg is null because that would just hash the
- // nonce, which is dumb.
+ // We only care about SHA256 since SHA1 is weak. This is supported on at
+ // least GCE and Intel / AMD fTPM, which is good enough for now. Alg is
+ // null because that would just hash the nonce, which is dumb.
quote, signature, err := tpm2.Quote(tpm.device, tpm.akHandleCache, "", "", nonce, srtmPCRs,
tpm2.AlgNull)
if err != nil {
@@ -468,8 +491,8 @@
return quote, signature.RSA.Signature, err
}
-// VerifyAttestPlatform verifies a given attestation. You can rely on all data coming back as being
-// from the TPM on which the AK is bound to.
+// VerifyAttestPlatform verifies a given attestation. You can rely on all data
+// coming back as being from the TPM on which the AK is bound to.
func VerifyAttestPlatform(nonce, akPub, quote, signature []byte) (*tpm2.AttestationData, error) {
hash := crypto.SHA256.New()
hash.Write(quote)
@@ -495,12 +518,14 @@
if err != nil {
return nil, err
}
- // quoteData.Magic works together with the TPM's Restricted key attribute. If this attribute is set
- // (which it needs to be for the AK to be considered valid) the TPM will not sign external data
- // having this prefix with such a key. Only data that originates inside the TPM like quotes and
- // key certifications can have this prefix and sill be signed by a restricted key. This check
- // is thus vital, otherwise somebody can just feed the TPM an arbitrary attestation to sign with
- // its AK and this function will happily accept the forged attestation.
+ // quoteData.Magic works together with the TPM's Restricted key attribute.
+ // If this attribute is set (which it needs to be for the AK to be
+ // considered valid) the TPM will not sign external data having this prefix
+ // with such a key. Only data that originates inside the TPM like quotes
+ // and key certifications can have this prefix and sill be signed by a
+ // restricted key. This check is thus vital, otherwise somebody can just
+ // feed the TPM an arbitrary attestation to sign with its AK and this
+ // function will happily accept the forged attestation.
if quoteData.Magic != tpmGeneratedValue {
return nil, errors.New("invalid TPM quote: data marker for internal data not set - forged attestation")
}
@@ -523,8 +548,9 @@
}
pcrs := make([][]byte, numSRTMPCRs)
- // The TPM can (and most do) return partial results. Let's just retry as many times as we have
- // PCRs since each read should return at least one PCR.
+ // The TPM can (and most do) return partial results. Let's just retry as
+ // many times as we have PCRs since each read should return at least one
+ // PCR.
readLoop:
for i := 0; i < numSRTMPCRs; i++ {
sel := tpm2.PCRSelection{Hash: tpm2.AlgSHA256}
@@ -554,8 +580,9 @@
return pcrs, nil
}
-// GetMeasurmentLog returns the binary log of all data hashed into PCRs. The result can be parsed by eventlog.
-// As this library currently doesn't support extending PCRs it just returns the log as supplied by the EFI interface.
+// GetMeasurmentLog returns the binary log of all data hashed into PCRs. The
+// result can be parsed by eventlog. As this library currently doesn't support
+// extending PCRs it just returns the log as supplied by the EFI interface.
func GetMeasurementLog() ([]byte, error) {
return ioutil.ReadFile("/sys/kernel/security/tpm0/binary_bios_measurements")
}