metropolis: unify utility packages
One last sweeping rename / reshuffle.
We get rid of //metropolis/node/common and //golibs, unifying them into
a single //metropolis/pkg meta-package.
This is to be documented somwhere properly, but here's the new logic
behind selecting where to place a new library package:
- if it's specific to k8s-on-metropolis, put it in
//metropolis/node/kubernetes/*. This is a self-contained tree that
other paths cannot import from.
- if it's a big new subsystem of the metropolis core, put it in
//metropolis/node/core. This can be imported by anything in
//m/n (eg the Kubernetes code at //m/n/kubernetes
- otherwise, treat it as generic library that's part of the metropolis
project, and put it in //metropolis/pkg. This can be imported by
anything within //metropolis.
This will be followed up by a diff that updates visibility rules.
Test Plan: Pure refactor, CI only.
X-Origin-Diff: phab/D683
GitOrigin-RevId: 883e7f09a7d22d64e966d07bbe839454ed081c79
diff --git a/metropolis/node/core/BUILD.bazel b/metropolis/node/core/BUILD.bazel
index 2398205..004bbc8 100644
--- a/metropolis/node/core/BUILD.bazel
+++ b/metropolis/node/core/BUILD.bazel
@@ -15,18 +15,18 @@
visibility = ["//visibility:private"],
deps = [
"//metropolis/node:go_default_library",
- "//metropolis/node/common/supervisor:go_default_library",
"//metropolis/node/core/cluster:go_default_library",
"//metropolis/node/core/consensus/ca:go_default_library",
"//metropolis/node/core/localstorage:go_default_library",
"//metropolis/node/core/localstorage/declarative:go_default_library",
- "//metropolis/node/core/logtree:go_default_library",
"//metropolis/node/core/network:go_default_library",
"//metropolis/node/core/network/dns:go_default_library",
- "//metropolis/node/core/tpm:go_default_library",
"//metropolis/node/kubernetes:go_default_library",
"//metropolis/node/kubernetes/containerd:go_default_library",
"//metropolis/node/kubernetes/pki:go_default_library",
+ "//metropolis/pkg/logtree:go_default_library",
+ "//metropolis/pkg/supervisor:go_default_library",
+ "//metropolis/pkg/tpm:go_default_library",
"//metropolis/proto/api:go_default_library",
"@org_golang_google_grpc//:go_default_library",
"@org_golang_google_grpc//codes:go_default_library",
diff --git a/metropolis/node/core/cluster/BUILD.bazel b/metropolis/node/core/cluster/BUILD.bazel
index 70daba2..0e7a7b1 100644
--- a/metropolis/node/core/cluster/BUILD.bazel
+++ b/metropolis/node/core/cluster/BUILD.bazel
@@ -10,11 +10,11 @@
visibility = ["//metropolis/node/core:__subpackages__"],
deps = [
"//metropolis/node:go_default_library",
- "//metropolis/node/common/supervisor:go_default_library",
"//metropolis/node/core/consensus:go_default_library",
"//metropolis/node/core/localstorage:go_default_library",
"//metropolis/node/core/localstorage/declarative:go_default_library",
"//metropolis/node/core/network:go_default_library",
+ "//metropolis/pkg/supervisor:go_default_library",
"//metropolis/proto/api:go_default_library",
"//metropolis/proto/internal:go_default_library",
"@com_github_cenkalti_backoff_v4//:go_default_library",
diff --git a/metropolis/node/core/cluster/manager.go b/metropolis/node/core/cluster/manager.go
index 5f072b8..4cb7ea9 100644
--- a/metropolis/node/core/cluster/manager.go
+++ b/metropolis/node/core/cluster/manager.go
@@ -32,11 +32,11 @@
"go.etcd.io/etcd/clientv3"
common "git.monogon.dev/source/nexantic.git/metropolis/node"
- "git.monogon.dev/source/nexantic.git/metropolis/node/common/supervisor"
"git.monogon.dev/source/nexantic.git/metropolis/node/core/consensus"
"git.monogon.dev/source/nexantic.git/metropolis/node/core/localstorage"
"git.monogon.dev/source/nexantic.git/metropolis/node/core/localstorage/declarative"
"git.monogon.dev/source/nexantic.git/metropolis/node/core/network"
+ "git.monogon.dev/source/nexantic.git/metropolis/pkg/supervisor"
apb "git.monogon.dev/source/nexantic.git/metropolis/proto/api"
)
diff --git a/metropolis/node/core/consensus/BUILD.bazel b/metropolis/node/core/consensus/BUILD.bazel
index cab2c0a..464dc79 100644
--- a/metropolis/node/core/consensus/BUILD.bazel
+++ b/metropolis/node/core/consensus/BUILD.bazel
@@ -7,9 +7,9 @@
visibility = ["//:__subpackages__"],
deps = [
"//metropolis/node:go_default_library",
- "//metropolis/node/common/supervisor:go_default_library",
"//metropolis/node/core/consensus/ca:go_default_library",
"//metropolis/node/core/localstorage:go_default_library",
+ "//metropolis/pkg/supervisor:go_default_library",
"@io_etcd_go_etcd//clientv3:go_default_library",
"@io_etcd_go_etcd//clientv3/namespace:go_default_library",
"@io_etcd_go_etcd//embed:go_default_library",
@@ -22,9 +22,9 @@
srcs = ["consensus_test.go"],
embed = [":go_default_library"],
deps = [
- "//golibs/common:go_default_library",
- "//metropolis/node/common/supervisor:go_default_library",
"//metropolis/node/core/localstorage:go_default_library",
"//metropolis/node/core/localstorage/declarative:go_default_library",
+ "//metropolis/pkg/freeport:go_default_library",
+ "//metropolis/pkg/supervisor:go_default_library",
],
)
diff --git a/metropolis/node/core/consensus/consensus.go b/metropolis/node/core/consensus/consensus.go
index b707a27..241ce03 100644
--- a/metropolis/node/core/consensus/consensus.go
+++ b/metropolis/node/core/consensus/consensus.go
@@ -44,10 +44,10 @@
"go.etcd.io/etcd/embed"
"go.uber.org/atomic"
- common "git.monogon.dev/source/nexantic.git/metropolis/node"
- "git.monogon.dev/source/nexantic.git/metropolis/node/common/supervisor"
+ node "git.monogon.dev/source/nexantic.git/metropolis/node"
"git.monogon.dev/source/nexantic.git/metropolis/node/core/consensus/ca"
"git.monogon.dev/source/nexantic.git/metropolis/node/core/localstorage"
+ "git.monogon.dev/source/nexantic.git/metropolis/pkg/supervisor"
)
const (
@@ -118,7 +118,7 @@
port := s.config.Port
if port == 0 {
- port = common.ConsensusPort
+ port = node.ConsensusPort
}
cfg := embed.NewConfig()
diff --git a/metropolis/node/core/consensus/consensus_test.go b/metropolis/node/core/consensus/consensus_test.go
index 22bcf20..a25ebef 100644
--- a/metropolis/node/core/consensus/consensus_test.go
+++ b/metropolis/node/core/consensus/consensus_test.go
@@ -26,10 +26,10 @@
"testing"
"time"
- "git.monogon.dev/source/nexantic.git/golibs/common"
- "git.monogon.dev/source/nexantic.git/metropolis/node/common/supervisor"
"git.monogon.dev/source/nexantic.git/metropolis/node/core/localstorage"
"git.monogon.dev/source/nexantic.git/metropolis/node/core/localstorage/declarative"
+ "git.monogon.dev/source/nexantic.git/metropolis/pkg/freeport"
+ "git.monogon.dev/source/nexantic.git/metropolis/pkg/supervisor"
)
type boilerplate struct {
@@ -90,7 +90,7 @@
InitialCluster: "127.0.0.1",
ExternalHost: "127.0.0.1",
ListenHost: "127.0.0.1",
- Port: common.MustConsume(common.AllocateTCPPort()),
+ Port: freeport.MustConsume(freeport.AllocateTCPPort()),
})
supervisor.New(b.ctx, etcd.Run)
@@ -116,7 +116,7 @@
InitialCluster: "127.0.0.1",
ExternalHost: "127.0.0.1",
ListenHost: "127.0.0.1",
- Port: common.MustConsume(common.AllocateTCPPort()),
+ Port: freeport.MustConsume(freeport.AllocateTCPPort()),
})
supervisor.New(b.ctx, etcd.Run)
waitEtcd(t, etcd)
@@ -161,7 +161,7 @@
InitialCluster: "127.0.0.1",
ExternalHost: "127.0.0.1",
ListenHost: "127.0.0.1",
- Port: common.MustConsume(common.AllocateTCPPort()),
+ Port: freeport.MustConsume(freeport.AllocateTCPPort()),
})
ctx, ctxC := context.WithCancel(b.ctx)
supervisor.New(ctx, etcd.Run)
@@ -207,7 +207,7 @@
InitialCluster: "127.0.0.1",
ExternalHost: "127.0.0.1",
ListenHost: "127.0.0.1",
- Port: common.MustConsume(common.AllocateTCPPort()),
+ Port: freeport.MustConsume(freeport.AllocateTCPPort()),
})
supervisor.New(b.ctx, etcd.Run)
waitEtcd(t, etcd)
diff --git a/metropolis/node/core/debug_service.go b/metropolis/node/core/debug_service.go
index 8e81d2d..6866479 100644
--- a/metropolis/node/core/debug_service.go
+++ b/metropolis/node/core/debug_service.go
@@ -28,8 +28,8 @@
common "git.monogon.dev/source/nexantic.git/metropolis/node"
"git.monogon.dev/source/nexantic.git/metropolis/node/core/cluster"
"git.monogon.dev/source/nexantic.git/metropolis/node/core/consensus/ca"
- "git.monogon.dev/source/nexantic.git/metropolis/node/core/logtree"
"git.monogon.dev/source/nexantic.git/metropolis/node/kubernetes"
+ "git.monogon.dev/source/nexantic.git/metropolis/pkg/logtree"
apb "git.monogon.dev/source/nexantic.git/metropolis/proto/api"
)
diff --git a/metropolis/node/core/localstorage/BUILD.bazel b/metropolis/node/core/localstorage/BUILD.bazel
index 099a380..a38ae9f 100644
--- a/metropolis/node/core/localstorage/BUILD.bazel
+++ b/metropolis/node/core/localstorage/BUILD.bazel
@@ -13,7 +13,7 @@
deps = [
"//metropolis/node/core/localstorage/crypt:go_default_library",
"//metropolis/node/core/localstorage/declarative:go_default_library",
- "//metropolis/node/core/tpm:go_default_library",
+ "//metropolis/pkg/tpm:go_default_library",
"@org_golang_x_sys//unix:go_default_library",
],
)
diff --git a/metropolis/node/core/localstorage/crypt/BUILD.bazel b/metropolis/node/core/localstorage/crypt/BUILD.bazel
index 38e27d6..41cb78e 100644
--- a/metropolis/node/core/localstorage/crypt/BUILD.bazel
+++ b/metropolis/node/core/localstorage/crypt/BUILD.bazel
@@ -9,8 +9,8 @@
importpath = "git.monogon.dev/source/nexantic.git/metropolis/node/core/localstorage/crypt",
visibility = ["//metropolis/node/core/localstorage:__subpackages__"],
deps = [
- "//metropolis/node/common/devicemapper:go_default_library",
- "//metropolis/node/common/sysfs:go_default_library",
+ "//metropolis/pkg/devicemapper:go_default_library",
+ "//metropolis/pkg/sysfs:go_default_library",
"@com_github_rekby_gpt//:go_default_library",
"@org_golang_x_sys//unix:go_default_library",
],
diff --git a/metropolis/node/core/localstorage/crypt/blockdev.go b/metropolis/node/core/localstorage/crypt/blockdev.go
index 5abe60b..1a7c3d4 100644
--- a/metropolis/node/core/localstorage/crypt/blockdev.go
+++ b/metropolis/node/core/localstorage/crypt/blockdev.go
@@ -27,7 +27,7 @@
"github.com/rekby/gpt"
"golang.org/x/sys/unix"
- "git.monogon.dev/source/nexantic.git/metropolis/node/common/sysfs"
+ "git.monogon.dev/source/nexantic.git/metropolis/pkg/sysfs"
)
var (
diff --git a/metropolis/node/core/localstorage/crypt/crypt.go b/metropolis/node/core/localstorage/crypt/crypt.go
index e0a8321..450f729 100644
--- a/metropolis/node/core/localstorage/crypt/crypt.go
+++ b/metropolis/node/core/localstorage/crypt/crypt.go
@@ -25,7 +25,7 @@
"golang.org/x/sys/unix"
- "git.monogon.dev/source/nexantic.git/metropolis/node/common/devicemapper"
+ "git.monogon.dev/source/nexantic.git/metropolis/pkg/devicemapper"
)
func readDataSectors(path string) (uint64, error) {
diff --git a/metropolis/node/core/localstorage/directory_data.go b/metropolis/node/core/localstorage/directory_data.go
index 52abbea..3f41738 100644
--- a/metropolis/node/core/localstorage/directory_data.go
+++ b/metropolis/node/core/localstorage/directory_data.go
@@ -25,7 +25,7 @@
"git.monogon.dev/source/nexantic.git/metropolis/node/core/localstorage/crypt"
"git.monogon.dev/source/nexantic.git/metropolis/node/core/localstorage/declarative"
- "git.monogon.dev/source/nexantic.git/metropolis/node/core/tpm"
+ "git.monogon.dev/source/nexantic.git/metropolis/pkg/tpm"
)
var keySize uint16 = 256 / 8
diff --git a/metropolis/node/core/logtree/BUILD.bazel b/metropolis/node/core/logtree/BUILD.bazel
deleted file mode 100644
index 120bf9f..0000000
--- a/metropolis/node/core/logtree/BUILD.bazel
+++ /dev/null
@@ -1,32 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
-
-go_library(
- name = "go_default_library",
- srcs = [
- "doc.go",
- "journal.go",
- "journal_entry.go",
- "journal_subscriber.go",
- "leveled.go",
- "leveled_payload.go",
- "logtree.go",
- "logtree_access.go",
- "logtree_entry.go",
- "logtree_publisher.go",
- ],
- importpath = "git.monogon.dev/source/nexantic.git/metropolis/node/core/logtree",
- visibility = ["//visibility:public"],
- deps = [
- "//metropolis/node/common/logbuffer:go_default_library",
- "//metropolis/proto/api:go_default_library",
- ],
-)
-
-go_test(
- name = "go_default_test",
- srcs = [
- "journal_test.go",
- "logtree_test.go",
- ],
- embed = [":go_default_library"],
-)
diff --git a/metropolis/node/core/logtree/doc.go b/metropolis/node/core/logtree/doc.go
deleted file mode 100644
index ab3c537..0000000
--- a/metropolis/node/core/logtree/doc.go
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-Package logtree implements a tree-shaped logger for debug events. It provides log publishers (ie. Go code) with a
-glog-like API and io.Writer API, with loggers placed in a hierarchical structure defined by a dot-delimited path
-(called a DN, short for Distinguished Name).
-
- tree.MustLeveledFor("foo.bar.baz").Warningf("Houston, we have a problem: %v", err)
- fmt.Fprintf(tree.MustRawFor("foo.bar.baz"), "some\nunstructured\ndata\n")
-
-Logs in this context are unstructured, operational and developer-centric human readable text messages presented as lines
-of text to consumers, with some attached metadata. Logtree does not deal with 'structured' logs as some parts of the
-industry do, and instead defers any machine-readable logs to either be handled by metrics systems like Prometheus or
-event sourcing systems like Kafka.
-
-Tree Structure
-
-As an example, consider an application that produces logs with the following DNs:
-
- listener.http
- listener.grpc
- svc
- svc.cache
- svc.cache.gc
-
-This would correspond to a tree as follows:
-
- .------.
- | "" |
- | (root) |
- '------'
- .----------------' '------.
- .--------------. .---------------.
- | svc | | listener |
- '--------------' '---------------'
- | .----' '----.
- .--------------. .---------------. .---------------.
- | svc.cache | | listener.http | | listener.grpc |
- '--------------' '---------------' '---------------'
- |
- .--------------.
- | svc.cache.gc |
- '--------------'
-
-In this setup, every DN acts as a separate logging target, each with its own retention policy and quota. Logging to a DN
-under foo.bar does NOT automatically log to foo - all tree mechanisms are applied on log access by consumers. Loggers
-are automatically created on first use, and importantly, can be created at any time, and will automatically be created
-if a sub-DN is created that requires a parent DN to exist first. Note, for instance, that a `listener` logging node was
-created even though the example application only logged to `listener.http` and `listener.grpc`.
-
-An implicit root node is always present in the tree, accessed by DN "" (an empty string). All other logger nodes are
-children (or transitive children) of the root node.
-
-Log consumers (application code that reads the log and passes them on to operators, or ships them off for aggregation in
-other systems) to select subtrees of logs for readout. In the example tree, a consumer could select to either read all
-logs of the entire tree, just a single DN (like svc), or a subtree (like everything under listener, ie. messages emitted
-to listener.http and listener.grpc).
-
-Leveled Log Producer API
-
-As part of the glog-like logging API available to producers, the following metadata is attached to emitted logs in
-addition to the DN of the logger to which the log entry was emitted:
-
- - timestamp at which the entry was emitted
- - a severity level (one of FATAL, ERROR, WARN or INFO)
- - a source of the message (file name and line number)
-
-In addition, the logger mechanism supports a variable verbosity level (so-called 'V-logging') that can be set at every
-node of the tree. For more information about the producer-facing logging API, see the documentation of the LeveledLogger
-interface, which is the main interface exposed to log producers.
-
-If the submitted message contains newlines, it will be split accordingly into a single log entry that contains multiple
-string lines. This allows for log producers to submit long, multi-line messages that are guaranteed to be non-interleaved
-with other entries, and allows for access API consumers to maintain semantic linking between multiple lines being emitted
-as a single atomic entry.
-
-Raw Log Producer API
-
-In addition to leveled, glog-like logging, LogTree supports 'raw logging'. This is implemented as an io.Writer that will
-split incoming bytes into newline-delimited lines, and log them into that logtree's DN. This mechanism is primarily
-intended to support storage of unstructured log data from external processes - for example binaries running with redirected
-stdout/stderr.
-
-Log Access API
-
-The Log Access API is mostly exposed via a single function on the LogTree struct: Read. It allows access to log entries
-that have been already buffered inside LogTree and to subscribe to receive future entries over a channel. As outlined
-earlier, any access can specify whether it is just interested in a single logger (addressed by DN), or a subtree of
-loggers.
-
-Due to the current implementation of the logtree, subtree accesses of backlogged data is significantly slower than
-accessing data of just one DN, or the whole tree (as every subtree backlog access performs a scan on all logged data).
-Thus, log consumers should be aware that it is much better to stream and buffer logs specific to some long-standing
-logging request on their own, rather than repeatedly perform reads of a subtree backlog.
-
-The data returned from the log access API is a LogEntry, which itself can contain either a raw logging entry, or a leveled
-logging entry. Helper functions are available on LogEntry that allow canonical string representations to be returned, for
-easy use in consuming tools/interfaces. Alternatively, the consumer can itself access the internal raw/leveled entries and
-print them according to their own preferred format.
-
-*/
-package logtree
diff --git a/metropolis/node/core/logtree/journal.go b/metropolis/node/core/logtree/journal.go
deleted file mode 100644
index 78c55a1..0000000
--- a/metropolis/node/core/logtree/journal.go
+++ /dev/null
@@ -1,218 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logtree
-
-import (
- "errors"
- "strings"
- "sync"
-)
-
-// DN is the Distinguished Name, a dot-delimited path used to address loggers within a LogTree. For example, "foo.bar"
-// designates the 'bar' logger node under the 'foo' logger node under the root node of the logger. An empty string is
-// the root node of the tree.
-type DN string
-
-var (
- ErrInvalidDN = errors.New("invalid DN")
-)
-
-// Path return the parts of a DN, ie. all the elements of the dot-delimited DN path. For the root node, an empty list
-// will be returned. An error will be returned if the DN is invalid (contains empty parts, eg. `foo..bar`, `.foo` or
-// `foo.`.
-func (d DN) Path() ([]string, error) {
- if d == "" {
- return nil, nil
- }
- parts := strings.Split(string(d), ".")
- for _, p := range parts {
- if p == "" {
- return nil, ErrInvalidDN
- }
- }
- return parts, nil
-}
-
-// journal is the main log recording structure of logtree. It manages linked lists containing the actual log entries,
-// and implements scans across them. It does not understand the hierarchical nature of logtree, and instead sees all
-// entries as part of a global linked list and a local linked list for a given DN.
-//
-// The global linked list is represented by the head/tail pointers in journal and nextGlobal/prevGlobal pointers in
-// entries. The local linked lists are represented by heads[DN]/tails[DN] pointers in journal and nextLocal/prevLocal
-// pointers in entries:
-//
-// .------------. .------------. .------------.
-// | dn: A.B | | dn: Z | | dn: A.B |
-// | time: 1 | | time: 2 | | time: 3 |
-// |------------| |------------| |------------|
-// | nextGlobal :------->| nextGlobal :------->| nextGlobal :--> nil
-// nil <-: prevGlobal |<-------: prevGlobal |<-------| prevGlobal |
-// |------------| |------------| n |------------|
-// | nextLocal :---. n | nextLocal :->i .-->| nextLocal :--> nil
-// nil <-: prevLocal |<--: i<-: prevLocal | l :---| prevLocal |
-// '------------' | l '------------' | '------------'
-// ^ '----------------------' ^
-// | ^ |
-// | | |
-// ( head ) ( tails[Z] ) ( tail )
-// ( heads[A.B] ) ( heads[Z] ) ( tails[A.B] )
-//
-type journal struct {
- // mu locks the rest of the structure. It must be taken during any operation on the journal.
- mu sync.RWMutex
-
- // tail is the side of the global linked list that contains the newest log entry, ie. the one that has been pushed
- // the most recently. It can be nil when no log entry has yet been pushed. The global linked list contains all log
- // entries pushed to the journal.
- tail *entry
- // head is the side of the global linked list that contains the oldest log entry. It can be nil when no log entry
- // has yet been pushed.
- head *entry
-
- // tails are the tail sides of a local linked list for a given DN, ie. the sides that contain the newest entry. They
- // are nil if there are no log entries for that DN.
- tails map[DN]*entry
- // heads are the head sides of a local linked list for a given DN, ie. the sides that contain the oldest entry. They
- // are nil if there are no log entries for that DN.
- heads map[DN]*entry
-
- // quota is a map from DN to quota structure, representing the quota policy of a particular DN-designated logger.
- quota map[DN]*quota
-
- // subscribers are observer to logs. New log entries get emitted to channels present in the subscriber structure,
- // after filtering them through subscriber-provided filters (eg. to limit events to subtrees that interest that
- // particular subscriber).
- subscribers []*subscriber
-}
-
-// newJournal creates a new empty journal. All journals are independent from eachother, and as such, all LogTrees are
-// also independent.
-func newJournal() *journal {
- return &journal{
- tails: make(map[DN]*entry),
- heads: make(map[DN]*entry),
-
- quota: make(map[DN]*quota),
- }
-}
-
-// filter is a predicate that returns true if a log subscriber or reader is interested in a given log entry.
-type filter func(*entry) bool
-
-// filterAll returns a filter that accepts all log entries.
-func filterAll() filter {
- return func(*entry) bool { return true }
-}
-
-// filterExact returns a filter that accepts only log entries at a given exact DN. This filter should not be used in
-// conjunction with journal.scanEntries - instead, journal.getEntries should be used, as it is much faster.
-func filterExact(dn DN) filter {
- return func(e *entry) bool {
- return e.origin == dn
- }
-}
-
-// filterSubtree returns a filter that accepts all log entries at a given DN and sub-DNs. For example, filterSubtree at
-// "foo.bar" would allow entries at "foo.bar", "foo.bar.baz", but not "foo" or "foo.barr".
-func filterSubtree(root DN) filter {
- if root == "" {
- return filterAll()
- }
-
- rootParts := strings.Split(string(root), ".")
- return func(e *entry) bool {
- parts := strings.Split(string(e.origin), ".")
- if len(parts) < len(rootParts) {
- return false
- }
-
- for i, p := range rootParts {
- if parts[i] != p {
- return false
- }
- }
-
- return true
- }
-}
-
-// filterSeverity returns a filter that accepts log entries at a given severity level or above. See the Severity type
-// for more information about severity levels.
-func filterSeverity(atLeast Severity) filter {
- return func(e *entry) bool {
- return e.leveled != nil && e.leveled.severity.AtLeast(atLeast)
- }
-}
-
-func filterOnlyRaw(e *entry) bool {
- return e.raw != nil
-}
-
-func filterOnlyLeveled(e *entry) bool {
- return e.leveled != nil
-}
-
-// scanEntries does a linear scan through the global entry list and returns all entries that match the given filters. If
-// retrieving entries for an exact event, getEntries should be used instead, as it will leverage DN-local linked lists
-// to retrieve them faster.
-// journal.mu must be taken at R or RW level when calling this function.
-func (j *journal) scanEntries(filters ...filter) (res []*entry) {
- cur := j.tail
- for {
- if cur == nil {
- return
- }
-
- passed := true
- for _, filter := range filters {
- if !filter(cur) {
- passed = false
- break
- }
- }
- if passed {
- res = append(res, cur)
- }
- cur = cur.nextGlobal
- }
-}
-
-// getEntries returns all entries at a given DN. This is faster than a scanEntries(filterExact), as it uses the special
-// local linked list pointers to traverse the journal. Additional filters can be passed to further limit the entries
-// returned, but a scan through this DN's local linked list will be performed regardless.
-// journal.mu must be taken at R or RW level when calling this function.
-func (j *journal) getEntries(exact DN, filters ...filter) (res []*entry) {
- cur := j.tails[exact]
- for {
- if cur == nil {
- return
- }
-
- passed := true
- for _, filter := range filters {
- if !filter(cur) {
- passed = false
- break
- }
- }
- if passed {
- res = append(res, cur)
- }
- cur = cur.nextLocal
- }
-
-}
diff --git a/metropolis/node/core/logtree/journal_entry.go b/metropolis/node/core/logtree/journal_entry.go
deleted file mode 100644
index 61619b3..0000000
--- a/metropolis/node/core/logtree/journal_entry.go
+++ /dev/null
@@ -1,169 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logtree
-
-import "git.monogon.dev/source/nexantic.git/metropolis/node/common/logbuffer"
-
-// entry is a journal entry, representing a single log event (encompassed in a Payload) at a given DN.
-// See the journal struct for more information about the global/local linked lists.
-type entry struct {
- // origin is the DN at which the log entry was recorded, or conversely, in which DN it will be available at.
- origin DN
- // journal is the parent journal of this entry. An entry can belong only to a single journal. This pointer is used
- // to mutate the journal's head/tail pointers when unlinking an entry.
- journal *journal
- // leveled is the leveled log entry for this entry, if this log entry was emitted by leveled logging. Otherwise it
- // is nil.
- leveled *LeveledPayload
- // raw is the raw log entry for this entry, if this log entry was emitted by raw logging. Otherwise it is nil.
- raw *logbuffer.Line
-
- // prevGlobal is the previous entry in the global linked list, or nil if this entry is the oldest entry in the
- // global linked list.
- prevGlobal *entry
- // nextGlobal is the next entry in the global linked list, or nil if this entry is the newest entry in the global
- // linked list.
- nextGlobal *entry
-
- // prevLocal is the previous entry in this entry DN's local linked list, or nil if this entry is the oldest entry in
- // this local linked list.
- prevLocal *entry
- // prevLocal is the next entry in this entry DN's local linked list, or nil if this entry is the newest entry in
- // this local linked list.
- nextLocal *entry
-
- // seqLocal is a counter within a local linked list that increases by one each time a new log entry is added. It is
- // used to quickly establish local linked list sizes (by subtracting seqLocal from both ends). This setup allows for
- // O(1) length calculation for local linked lists as long as entries are only unlinked from the head or tail (which
- // is the case in the current implementation).
- seqLocal uint64
-}
-
-// external returns a LogEntry object for this entry, ie. the public version of this object, without fields relating to
-// the parent journal, linked lists, sequences, etc. These objects are visible to library consumers.
-func (e *entry) external() *LogEntry {
- return &LogEntry{
- DN: e.origin,
- Leveled: e.leveled,
- Raw: e.raw,
- }
-}
-
-// unlink removes this entry from both global and local linked lists, updating the journal's head/tail pointers if
-// needed.
-// journal.mu must be taken as RW
-func (e *entry) unlink() {
- // Unlink from the global linked list.
- if e.prevGlobal != nil {
- e.prevGlobal.nextGlobal = e.nextGlobal
- }
- if e.nextGlobal != nil {
- e.nextGlobal.prevGlobal = e.prevGlobal
- }
- // Update journal head/tail pointers.
- if e.journal.head == e {
- e.journal.head = e.prevGlobal
- }
- if e.journal.tail == e {
- e.journal.tail = e.nextGlobal
- }
-
- // Unlink from the local linked list.
- if e.prevLocal != nil {
- e.prevLocal.nextLocal = e.nextLocal
- }
- if e.nextLocal != nil {
- e.nextLocal.prevLocal = e.prevLocal
- }
- // Update journal head/tail pointers.
- if e.journal.heads[e.origin] == e {
- e.journal.heads[e.origin] = e.prevLocal
- }
- if e.journal.tails[e.origin] == e {
- e.journal.tails[e.origin] = e.nextLocal
- }
-}
-
-// quota describes the quota policy for logging at a given DN.
-type quota struct {
- // origin is the exact DN that this quota applies to.
- origin DN
- // max is the maximum count of log entries permitted for this DN - ie, the maximum size of the local linked list.
- max uint64
-}
-
-// append adds an entry at the head of the global and local linked lists.
-func (j *journal) append(e *entry) {
- j.mu.Lock()
- defer j.mu.Unlock()
-
- e.journal = j
-
- // Insert at head in global linked list, set pointers.
- e.nextGlobal = nil
- e.prevGlobal = j.head
- if j.head != nil {
- j.head.nextGlobal = e
- }
- j.head = e
- if j.tail == nil {
- j.tail = e
- }
-
- // Create quota if necessary.
- if _, ok := j.quota[e.origin]; !ok {
- j.quota[e.origin] = "a{origin: e.origin, max: 8192}
- }
-
- // Insert at head in local linked list, calculate seqLocal, set pointers.
- e.nextLocal = nil
- e.prevLocal = j.heads[e.origin]
- if j.heads[e.origin] != nil {
- j.heads[e.origin].nextLocal = e
- e.seqLocal = e.prevLocal.seqLocal + 1
- } else {
- e.seqLocal = 0
- }
- j.heads[e.origin] = e
- if j.tails[e.origin] == nil {
- j.tails[e.origin] = e
- }
-
- // Apply quota to the local linked list that this entry got inserted to, ie. remove elements in excess of the
- // quota.max count.
- quota := j.quota[e.origin]
- count := (j.heads[e.origin].seqLocal - j.tails[e.origin].seqLocal) + 1
- if count > quota.max {
- // Keep popping elements off the tail of the local linked list until quota is not violated.
- left := count - quota.max
- cur := j.tails[e.origin]
- for {
- // This shouldn't happen if quota.max >= 1.
- if cur == nil {
- break
- }
- if left == 0 {
- break
- }
- el := cur
- cur = el.nextLocal
- // Unlinking the entry unlinks it from both the global and local linked lists.
- el.unlink()
- left -= 1
- }
- }
-}
diff --git a/metropolis/node/core/logtree/journal_subscriber.go b/metropolis/node/core/logtree/journal_subscriber.go
deleted file mode 100644
index e6c7c62..0000000
--- a/metropolis/node/core/logtree/journal_subscriber.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logtree
-
-import (
- "sync/atomic"
-)
-
-// subscriber is an observer for new entries that are appended to the journal.
-type subscriber struct {
- // filters that entries need to pass through in order to be sent to the subscriber.
- filters []filter
- // dataC is the channel to which entries that pass filters will be sent. The channel must be drained regularly in
- // order to prevent accumulation of goroutines and possible reordering of messages.
- dataC chan *LogEntry
- // doneC is a channel that is closed once the subscriber wishes to stop receiving notifications.
- doneC chan struct{}
- // missed is the amount of messages missed by the subscriber by not receiving from dataC fast enough
- missed uint64
-}
-
-// subscribe attaches a subscriber to the journal.
-// mu must be taken in W mode
-func (j *journal) subscribe(sub *subscriber) {
- j.subscribers = append(j.subscribers, sub)
-}
-
-// notify sends an entry to all subscribers that wish to receive it.
-func (j *journal) notify(e *entry) {
- j.mu.Lock()
- defer j.mu.Unlock()
-
- newSub := make([]*subscriber, 0, len(j.subscribers))
- for _, sub := range j.subscribers {
- select {
- case <-sub.doneC:
- close(sub.dataC)
- continue
- default:
- newSub = append(newSub, sub)
- }
-
- for _, filter := range sub.filters {
- if !filter(e) {
- continue
- }
- }
- select {
- case sub.dataC <- e.external():
- default:
- atomic.AddUint64(&sub.missed, 1)
- }
- }
- j.subscribers = newSub
-}
diff --git a/metropolis/node/core/logtree/journal_test.go b/metropolis/node/core/logtree/journal_test.go
deleted file mode 100644
index 474748a..0000000
--- a/metropolis/node/core/logtree/journal_test.go
+++ /dev/null
@@ -1,148 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logtree
-
-import (
- "fmt"
- "strings"
- "testing"
- "time"
-)
-
-func testPayload(msg string) *LeveledPayload {
- return &LeveledPayload{
- messages: []string{msg},
- timestamp: time.Now(),
- severity: INFO,
- file: "main.go",
- line: 1337,
- }
-}
-
-func TestJournalRetention(t *testing.T) {
- j := newJournal()
-
- for i := 0; i < 9000; i += 1 {
- e := &entry{
- origin: "main",
- leveled: testPayload(fmt.Sprintf("test %d", i)),
- }
- j.append(e)
- }
-
- entries := j.getEntries("main")
- if want, got := 8192, len(entries); want != got {
- t.Fatalf("wanted %d entries, got %d", want, got)
- }
- for i, entry := range entries {
- want := fmt.Sprintf("test %d", (9000-8192)+i)
- got := strings.Join(entry.leveled.messages, "\n")
- if want != got {
- t.Fatalf("wanted entry %q, got %q", want, got)
- }
- }
-}
-
-func TestJournalQuota(t *testing.T) {
- j := newJournal()
-
- for i := 0; i < 9000; i += 1 {
- j.append(&entry{
- origin: "chatty",
- leveled: testPayload(fmt.Sprintf("chatty %d", i)),
- })
- if i%10 == 0 {
- j.append(&entry{
- origin: "solemn",
- leveled: testPayload(fmt.Sprintf("solemn %d", i)),
- })
- }
- }
-
- entries := j.getEntries("chatty")
- if want, got := 8192, len(entries); want != got {
- t.Fatalf("wanted %d chatty entries, got %d", want, got)
- }
- entries = j.getEntries("solemn")
- if want, got := 900, len(entries); want != got {
- t.Fatalf("wanted %d solemn entries, got %d", want, got)
- }
- entries = j.getEntries("absent")
- if want, got := 0, len(entries); want != got {
- t.Fatalf("wanted %d absent entries, got %d", want, got)
- }
-
- entries = j.scanEntries(filterAll())
- if want, got := 8192+900, len(entries); want != got {
- t.Fatalf("wanted %d total entries, got %d", want, got)
- }
- setMessages := make(map[string]bool)
- for _, entry := range entries {
- setMessages[strings.Join(entry.leveled.messages, "\n")] = true
- }
-
- for i := 0; i < 900; i += 1 {
- want := fmt.Sprintf("solemn %d", i*10)
- if !setMessages[want] {
- t.Fatalf("could not find entry %q in journal", want)
- }
- }
- for i := 0; i < 8192; i += 1 {
- want := fmt.Sprintf("chatty %d", i+(9000-8192))
- if !setMessages[want] {
- t.Fatalf("could not find entry %q in journal", want)
- }
- }
-}
-
-func TestJournalSubtree(t *testing.T) {
- j := newJournal()
- j.append(&entry{origin: "a", leveled: testPayload("a")})
- j.append(&entry{origin: "a.b", leveled: testPayload("a.b")})
- j.append(&entry{origin: "a.b.c", leveled: testPayload("a.b.c")})
- j.append(&entry{origin: "a.b.d", leveled: testPayload("a.b.d")})
- j.append(&entry{origin: "e.f", leveled: testPayload("e.f")})
- j.append(&entry{origin: "e.g", leveled: testPayload("e.g")})
-
- expect := func(f filter, msgs ...string) string {
- res := j.scanEntries(f)
- set := make(map[string]bool)
- for _, entry := range res {
- set[strings.Join(entry.leveled.messages, "\n")] = true
- }
-
- for _, want := range msgs {
- if !set[want] {
- return fmt.Sprintf("missing entry %q", want)
- }
- }
- return ""
- }
-
- if res := expect(filterAll(), "a", "a.b", "a.b.c", "a.b.d", "e.f", "e.g"); res != "" {
- t.Fatalf("All: %s", res)
- }
- if res := expect(filterSubtree("a"), "a", "a.b", "a.b.c", "a.b.d"); res != "" {
- t.Fatalf("Subtree(a): %s", res)
- }
- if res := expect(filterSubtree("a.b"), "a.b", "a.b.c", "a.b.d"); res != "" {
- t.Fatalf("Subtree(a.b): %s", res)
- }
- if res := expect(filterSubtree("e"), "e.f", "e.g"); res != "" {
- t.Fatalf("Subtree(a.b): %s", res)
- }
-}
diff --git a/metropolis/node/core/logtree/leveled.go b/metropolis/node/core/logtree/leveled.go
deleted file mode 100644
index c24357e..0000000
--- a/metropolis/node/core/logtree/leveled.go
+++ /dev/null
@@ -1,144 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logtree
-
-import (
- "fmt"
-
- apb "git.monogon.dev/source/nexantic.git/metropolis/proto/api"
-)
-
-// LeveledLogger is a generic interface for glog-style logging. There are four hardcoded log severities, in increasing
-// order: INFO, WARNING, ERROR, FATAL. Logging at a certain severity level logs not only to consumers expecting data
-// at that severity level, but also all lower severity levels. For example, an ERROR log will also be passed to
-// consumers looking at INFO or WARNING logs.
-type LeveledLogger interface {
- // Info logs at the INFO severity. Arguments are handled in the manner of fmt.Print, a terminating newline is added
- // if missing.
- Info(args ...interface{})
- // Infof logs at the INFO severity. Arguments are handled in the manner of fmt.Printf, a terminating newline is
- // added if missing.
- Infof(format string, args ...interface{})
-
- // Warning logs at the WARNING severity. Arguments are handled in the manner of fmt.Print, a terminating newline is
- // added if missing.
- Warning(args ...interface{})
- // Warningf logs at the WARNING severity. Arguments are handled in the manner of fmt.Printf, a terminating newline
- // is added if missing.
- Warningf(format string, args ...interface{})
-
- // Error logs at the ERROR severity. Arguments are handled in the manner of fmt.Print, a terminating newline is
- // added if missing.
- Error(args ...interface{})
- // Errorf logs at the ERROR severity. Arguments are handled in the manner of fmt.Printf, a terminating newline is
- // added if missing.
- Errorf(format string, args ...interface{})
-
- // Fatal logs at the FATAL severity and aborts the current program. Arguments are handled in the manner of
- // fmt.Print, a terminating newline is added if missing.
- Fatal(args ...interface{})
- // Fatalf logs at the FATAL severity and aborts the current program. Arguments are handled in the manner of
- // fmt.Printf, a terminating newline is added if missing.
- Fatalf(format string, args ...interface{})
-
- // V returns a VerboseLeveledLogger at a given verbosity level. These verbosity levels can be dynamically set and
- // unset on a package-granular level by consumers of the LeveledLogger logs. The returned value represents whether
- // logging at the given verbosity level was active at that time, and as such should not be a long-lived object
- // in programs.
- // This construct is further refered to as 'V-logs'.
- V(level VerbosityLevel) VerboseLeveledLogger
-}
-
-// VerbosityLevel is a verbosity level defined for V-logs. This can be changed programmatically per Go package. When
-// logging at a given VerbosityLevel V, the current level must be equal or higher to V for the logs to be recorded.
-// Conversely, enabling a V-logging at a VerbosityLevel V also enables all logging at lower levels [Int32Min .. (V-1)].
-type VerbosityLevel int32
-
-type VerboseLeveledLogger interface {
- // Enabled returns if this level was enabled. If not enabled, all logging into this logger will be discarded
- // immediately.
- // Thus, Enabled() can be used to check the verbosity level before performing any logging:
- // if l.V(3).Enabled() { l.Info("V3 is enabled") }
- // or, in simple cases, the convenience function .Info can be used:
- // l.V(3).Info("V3 is enabled")
- // The second form is shorter and more convenient, but more expensive, as its arguments are always evaluated.
- Enabled() bool
- // Info is the equivalent of a LeveledLogger's Info call, guarded by whether this VerboseLeveledLogger is enabled.
- Info(args ...interface{})
- // Infof is the equivalent of a LeveledLogger's Infof call, guarded by whether this VerboseLeveledLogger is enabled.
- Infof(format string, args ...interface{})
-}
-
-// Severity is one of the severities as described in LeveledLogger.
-type Severity string
-
-const (
- INFO Severity = "I"
- WARNING Severity = "W"
- ERROR Severity = "E"
- FATAL Severity = "F"
-)
-
-var (
- // SeverityAtLeast maps a given severity to a list of severities that at that severity or higher. In other words,
- // SeverityAtLeast[X] returns a list of severities that might be seen in a log at severity X.
- SeverityAtLeast = map[Severity][]Severity{
- INFO: {INFO, WARNING, ERROR, FATAL},
- WARNING: {WARNING, ERROR, FATAL},
- ERROR: {ERROR, FATAL},
- FATAL: {FATAL},
- }
-)
-
-func (s Severity) AtLeast(other Severity) bool {
- for _, el := range SeverityAtLeast[other] {
- if el == s {
- return true
- }
- }
- return false
-}
-
-func SeverityFromProto(s apb.LeveledLogSeverity) (Severity, error) {
- switch s {
- case apb.LeveledLogSeverity_INFO:
- return INFO, nil
- case apb.LeveledLogSeverity_WARNING:
- return WARNING, nil
- case apb.LeveledLogSeverity_ERROR:
- return ERROR, nil
- case apb.LeveledLogSeverity_FATAL:
- return FATAL, nil
- default:
- return "", fmt.Errorf("unknown severity value %d", s)
- }
-}
-
-func (s Severity) ToProto() apb.LeveledLogSeverity {
- switch s {
- case INFO:
- return apb.LeveledLogSeverity_INFO
- case WARNING:
- return apb.LeveledLogSeverity_WARNING
- case ERROR:
- return apb.LeveledLogSeverity_ERROR
- case FATAL:
- return apb.LeveledLogSeverity_FATAL
- default:
- return apb.LeveledLogSeverity_INVALID
- }
-}
diff --git a/metropolis/node/core/logtree/leveled_payload.go b/metropolis/node/core/logtree/leveled_payload.go
deleted file mode 100644
index fad42e3..0000000
--- a/metropolis/node/core/logtree/leveled_payload.go
+++ /dev/null
@@ -1,142 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logtree
-
-import (
- "fmt"
- "strconv"
- "strings"
- "time"
-
- apb "git.monogon.dev/source/nexantic.git/metropolis/proto/api"
-)
-
-// LeveledPayload is a log entry for leveled logs (as per leveled.go). It contains the input to these calls (severity and
-// message split into newline-delimited messages) and additional metadata that would be usually seen in a text
-// representation of a leveled log entry.
-type LeveledPayload struct {
- // messages is the list of messages contained in this payload. This list is built from splitting up the given message
- // from the user by newline.
- messages []string
- // timestamp is the time at which this message was emitted.
- timestamp time.Time
- // severity is the leveled Severity at which this message was emitted.
- severity Severity
- // file is the filename of the caller that emitted this message.
- file string
- // line is the line number within the file of the caller that emitted this message.
- line int
-}
-
-// String returns a canonical representation of this payload as a single string prefixed with metadata. If the original
-// message was logged with newlines, this representation will also contain newlines, with each original message part
-// prefixed by the metadata.
-// For an alternative call that will instead return a canonical prefix and a list of lines in the message, see Strings().
-func (p *LeveledPayload) String() string {
- prefix, lines := p.Strings()
- res := make([]string, len(p.messages))
- for i, line := range lines {
- res[i] = fmt.Sprintf("%s%s", prefix, line)
- }
- return strings.Join(res, "\n")
-}
-
-// Strings returns the canonical representation of this payload split into a prefix and all lines that were contained in
-// the original message. This is meant to be displayed to the user by showing the prefix before each line, concatenated
-// together - possibly in a table form with the prefixes all unified with a rowspan-like mechanism.
-//
-// For example, this function can return:
-// prefix = "I1102 17:20:06.921395 foo.go:42] "
-// lines = []string{"current tags:", " - one", " - two"}
-//
-// With this data, the result should be presented to users this way in text form:
-// I1102 17:20:06.921395 foo.go:42] current tags:
-// I1102 17:20:06.921395 foo.go:42] - one
-// I1102 17:20:06.921395 foo.go:42] - two
-//
-// Or, in a table layout:
-// .-----------------------------------------------------------.
-// | I1102 17:20:06.921395 0 foo.go:42] : current tags: |
-// | :------------------|
-// | : - one |
-// | :------------------|
-// | : - two |
-// '-----------------------------------------------------------'
-//
-func (p *LeveledPayload) Strings() (prefix string, lines []string) {
- _, month, day := p.timestamp.Date()
- hour, minute, second := p.timestamp.Clock()
- nsec := p.timestamp.Nanosecond() / 1000
-
- // Same format as in glog, but without treadid.
- // Lmmdd hh:mm:ss.uuuuuu file:line]
- // TODO(q3k): rewrite this to printf-less code.
- prefix = fmt.Sprintf("%s%02d%02d %02d:%02d:%02d.%06d %s:%d] ", p.severity, month, day, hour, minute, second, nsec, p.file, p.line)
-
- lines = p.messages
- return
-}
-
-// Message returns the inner message lines of this entry, ie. what was passed to the actual logging method, but split by
-// newlines.
-func (p *LeveledPayload) Messages() []string { return p.messages }
-
-func (p *LeveledPayload) MessagesJoined() string { return strings.Join(p.messages, "\n") }
-
-// Timestamp returns the time at which this entry was logged.
-func (p *LeveledPayload) Timestamp() time.Time { return p.timestamp }
-
-// Location returns a string in the form of file_name:line_number that shows the origin of the log entry in the
-// program source.
-func (p *LeveledPayload) Location() string { return fmt.Sprintf("%s:%d", p.file, p.line) }
-
-// Severity returns the Severity with which this entry was logged.
-func (p *LeveledPayload) Severity() Severity { return p.severity }
-
-// Proto converts a LeveledPayload to protobuf format.
-func (p *LeveledPayload) Proto() *apb.LogEntry_Leveled {
- return &apb.LogEntry_Leveled{
- Lines: p.Messages(),
- Timestamp: p.Timestamp().UnixNano(),
- Severity: p.Severity().ToProto(),
- Location: p.Location(),
- }
-}
-
-// LeveledPayloadFromProto parses a protobuf message into the internal format.
-func LeveledPayloadFromProto(p *apb.LogEntry_Leveled) (*LeveledPayload, error) {
- severity, err := SeverityFromProto(p.Severity)
- if err != nil {
- return nil, fmt.Errorf("could not convert severity: %w", err)
- }
- parts := strings.Split(p.Location, ":")
- if len(parts) != 2 {
- return nil, fmt.Errorf("invalid location, must be two :-delimited parts, is %d parts", len(parts))
- }
- file := parts[0]
- line, err := strconv.Atoi(parts[1])
- if err != nil {
- return nil, fmt.Errorf("invalid location line number: %w", err)
- }
- return &LeveledPayload{
- messages: p.Lines,
- timestamp: time.Unix(0, p.Timestamp),
- severity: severity,
- file: file,
- line: line,
- }, nil
-}
diff --git a/metropolis/node/core/logtree/logtree.go b/metropolis/node/core/logtree/logtree.go
deleted file mode 100644
index fab72ba..0000000
--- a/metropolis/node/core/logtree/logtree.go
+++ /dev/null
@@ -1,147 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logtree
-
-import (
- "fmt"
- "strings"
- "sync"
-
- "git.monogon.dev/source/nexantic.git/metropolis/node/common/logbuffer"
-)
-
-// LogTree is a tree-shaped logging system. For more information, see the package-level documentation.
-type LogTree struct {
- // journal is the tree's journal, storing all log data and managing subscribers.
- journal *journal
- // root is the root node of the actual tree of the log tree. The nodes contain per-DN configuration options, notably
- // the current verbosity level of that DN.
- root *node
-}
-
-func New() *LogTree {
- lt := &LogTree{
- journal: newJournal(),
- }
- lt.root = newNode(lt, "")
- return lt
-}
-
-// node represents a given DN as a discrete 'logger'. It implements the LeveledLogger interface for log publishing,
-// entries from which it passes over to the logtree's journal.
-type node struct {
- // dn is the DN which this node represents (or "" if this is the root node).
- dn DN
- // tree is the LogTree to which this node belongs.
- tree *LogTree
- // verbosity is the current verbosity level of this DN/node, affecting .V(n) LeveledLogger calls
- verbosity VerbosityLevel
- rawLineBuffer *logbuffer.LineBuffer
-
- // mu guards children.
- mu sync.Mutex
- // children is a map of DN-part to a children node in the logtree. A DN-part is a string representing a part of the
- // DN between the deliming dots, as returned by DN.Path.
- children map[string]*node
-}
-
-// newNode returns a node at a given DN in the LogTree - but doesn't set up the LogTree to insert it accordingly.
-func newNode(tree *LogTree, dn DN) *node {
- n := &node{
- dn: dn,
- tree: tree,
- children: make(map[string]*node),
- }
- // TODO(q3k): make this limit configurable. If this happens, or the default (1024) gets changes, max chunk size
- // calculations when serving the logs (eg. in NodeDebugService) must reflect this.
- n.rawLineBuffer = logbuffer.NewLineBuffer(1024, n.logRaw)
- return n
-}
-
-// nodeByDN returns the LogTree node corresponding to a given DN. If either the node or some of its parents do not
-// exist they will be created as needed.
-func (l *LogTree) nodeByDN(dn DN) (*node, error) {
- traversal, err := newTraversal(dn)
- if err != nil {
- return nil, fmt.Errorf("traversal failed: %w", err)
- }
- return traversal.execute(l.root), nil
-}
-
-// nodeTraversal represents a request to traverse the LogTree in search of a given node by DN.
-type nodeTraversal struct {
- // want is the DN of the node's that requested to be found.
- want DN
- // current is the path already taken to find the node, in the form of DN parts. It starts out as want.Parts() and
- // progresses to become empty as the traversal continues.
- current []string
- // left is the path that's still needed to be taken in order to find the node, in the form of DN parts. It starts
- // out empty and progresses to become wants.Parts() as the traversal continues.
- left []string
-}
-
-// next adjusts the traversal's current/left slices to the next element of the traversal, returns the part that's now
-// being looked for (or "" if the traveral is done) and the full DN of the element that's being looked for.
-//
-// For example, a traversal of foo.bar.baz will cause .next() to return the following on each invocation:
-// - part: foo, full: foo
-// - part: bar, full: foo.bar
-// - part: baz, full: foo.bar.baz
-// - part: "", full: foo.bar.baz
-func (t *nodeTraversal) next() (part string, full DN) {
- if len(t.left) == 0 {
- return "", t.want
- }
- part = t.left[0]
- t.current = append(t.current, part)
- t.left = t.left[1:]
- full = DN(strings.Join(t.current, "."))
- return
-}
-
-// newTraversal returns a nodeTraversal fora a given wanted DN.
-func newTraversal(dn DN) (*nodeTraversal, error) {
- parts, err := dn.Path()
- if err != nil {
- return nil, err
- }
- return &nodeTraversal{
- want: dn,
- left: parts,
- }, nil
-}
-
-// execute the traversal in order to find the node. This can only be called once per traversal.
-// Nodes will be created within the tree until the target node is reached. Existing nodes will be reused.
-// This is effectively an idempotent way of accessing a node in the tree based on a traversal.
-func (t *nodeTraversal) execute(n *node) *node {
- cur := n
- for {
- part, full := t.next()
- if part == "" {
- return cur
- }
-
- mu := &cur.mu
- mu.Lock()
- if _, ok := cur.children[part]; !ok {
- cur.children[part] = newNode(n.tree, DN(full))
- }
- cur = cur.children[part]
- mu.Unlock()
- }
-}
diff --git a/metropolis/node/core/logtree/logtree_access.go b/metropolis/node/core/logtree/logtree_access.go
deleted file mode 100644
index fed202e..0000000
--- a/metropolis/node/core/logtree/logtree_access.go
+++ /dev/null
@@ -1,183 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logtree
-
-import (
- "errors"
- "sync/atomic"
-)
-
-// LogReadOption describes options for the LogTree.Read call.
-type LogReadOption struct {
- withChildren bool
- withStream bool
- withBacklog int
- onlyLeveled bool
- onlyRaw bool
- leveledWithMinimumSeverity Severity
-}
-
-// WithChildren makes Read return/stream data for both a given DN and all its children.
-func WithChildren() LogReadOption { return LogReadOption{withChildren: true} }
-
-// WithStream makes Read return a stream of data. This works alongside WithBacklog to create a read-and-stream
-// construct.
-func WithStream() LogReadOption { return LogReadOption{withStream: true} }
-
-// WithBacklog makes Read return already recorded log entries, up to count elements.
-func WithBacklog(count int) LogReadOption { return LogReadOption{withBacklog: count} }
-
-// BacklogAllAvailable makes WithBacklog return all backlogged log data that logtree possesses.
-const BacklogAllAvailable int = -1
-
-func OnlyRaw() LogReadOption { return LogReadOption{onlyRaw: true} }
-
-func OnlyLeveled() LogReadOption { return LogReadOption{onlyLeveled: true} }
-
-// LeveledWithMinimumSeverity makes Read return only log entries that are at least at a given Severity. If only leveled
-// entries are needed, OnlyLeveled must be used. This is a no-op when OnlyRaw is used.
-func LeveledWithMinimumSeverity(s Severity) LogReadOption {
- return LogReadOption{leveledWithMinimumSeverity: s}
-}
-
-// LogReader permits reading an already existing backlog of log entries and to stream further ones.
-type LogReader struct {
- // Backlog are the log entries already logged by LogTree. This will only be set if WithBacklog has been passed to
- // Read.
- Backlog []*LogEntry
- // Stream is a channel of new entries as received live by LogTree. This will only be set if WithStream has been
- // passed to Read. In this case, entries from this channel must be read as fast as possible by the consumer in order
- // to prevent missing entries.
- Stream <-chan *LogEntry
- // done is channel used to signal (by closing) that the log consumer is not interested in more Stream data.
- done chan<- struct{}
- // missed is an atomic integer pointer that tells the subscriber how many messages in Stream they missed. This
- // pointer is nil if no streaming has been requested.
- missed *uint64
-}
-
-// Missed returns the amount of entries that were missed from Stream (as the channel was not drained fast enough).
-func (l *LogReader) Missed() uint64 {
- // No Stream.
- if l.missed == nil {
- return 0
- }
- return atomic.LoadUint64(l.missed)
-}
-
-// Close closes the LogReader's Stream. This must be called once the Reader does not wish to receive streaming messages
-// anymore.
-func (l *LogReader) Close() {
- if l.done != nil {
- close(l.done)
- }
-}
-
-var (
- ErrRawAndLeveled = errors.New("cannot return logs that are simultaneously OnlyRaw and OnlyLeveled")
-)
-
-// Read and/or stream entries from a LogTree. The returned LogReader is influenced by the LogReadOptions passed, which
-// influence whether the Read will return existing entries, a stream, or both. In addition the options also dictate
-// whether only entries for that particular DN are returned, or for all sub-DNs as well.
-func (l *LogTree) Read(dn DN, opts ...LogReadOption) (*LogReader, error) {
- l.journal.mu.RLock()
- defer l.journal.mu.RUnlock()
-
- var backlog int
- var stream bool
- var recursive bool
- var leveledSeverity Severity
- var onlyRaw, onlyLeveled bool
-
- for _, opt := range opts {
- if opt.withBacklog > 0 || opt.withBacklog == BacklogAllAvailable {
- backlog = opt.withBacklog
- }
- if opt.withStream {
- stream = true
- }
- if opt.withChildren {
- recursive = true
- }
- if opt.leveledWithMinimumSeverity != "" {
- leveledSeverity = opt.leveledWithMinimumSeverity
- }
- if opt.onlyLeveled {
- onlyLeveled = true
- }
- if opt.onlyRaw {
- onlyRaw = true
- }
- }
-
- if onlyLeveled && onlyRaw {
- return nil, ErrRawAndLeveled
- }
-
- var filters []filter
- if onlyLeveled {
- filters = append(filters, filterOnlyLeveled)
- }
- if onlyRaw {
- filters = append(filters, filterOnlyRaw)
- }
- if recursive {
- filters = append(filters, filterSubtree(dn))
- } else {
- filters = append(filters, filterExact(dn))
- }
- if leveledSeverity != "" {
- filters = append(filters, filterSeverity(leveledSeverity))
- }
-
- var entries []*entry
- if backlog > 0 || backlog == BacklogAllAvailable {
- // TODO(q3k): pass over the backlog count to scanEntries/getEntries, instead of discarding them here.
- if recursive {
- entries = l.journal.scanEntries(filters...)
- } else {
- entries = l.journal.getEntries(dn, filters...)
- }
- if backlog != BacklogAllAvailable && len(entries) > backlog {
- entries = entries[:backlog]
- }
- }
-
- var sub *subscriber
- if stream {
- sub = &subscriber{
- // TODO(q3k): make buffer size configurable
- dataC: make(chan *LogEntry, 128),
- doneC: make(chan struct{}),
- filters: filters,
- }
- l.journal.subscribe(sub)
- }
-
- lr := &LogReader{}
- lr.Backlog = make([]*LogEntry, len(entries))
- for i, entry := range entries {
- lr.Backlog[i] = entry.external()
- }
- if stream {
- lr.Stream = sub.dataC
- lr.done = sub.doneC
- lr.missed = &sub.missed
- }
- return lr, nil
-}
diff --git a/metropolis/node/core/logtree/logtree_entry.go b/metropolis/node/core/logtree/logtree_entry.go
deleted file mode 100644
index 635e5a8..0000000
--- a/metropolis/node/core/logtree/logtree_entry.go
+++ /dev/null
@@ -1,141 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logtree
-
-import (
- "fmt"
- "strings"
-
- "git.monogon.dev/source/nexantic.git/metropolis/node/common/logbuffer"
- apb "git.monogon.dev/source/nexantic.git/metropolis/proto/api"
-)
-
-// LogEntry contains a log entry, combining both leveled and raw logging into a single stream of events. A LogEntry
-// will contain exactly one of either LeveledPayload or RawPayload.
-type LogEntry struct {
- // If non-nil, this is a leveled logging entry.
- Leveled *LeveledPayload
- // If non-nil, this is a raw logging entry line.
- Raw *logbuffer.Line
- // DN from which this entry was logged.
- DN DN
-}
-
-// String returns a canonical representation of this payload as a single string prefixed with metadata. If the entry is
-// a leveled log entry that originally was logged with newlines this representation will also contain newlines, with
-// each original message part prefixed by the metadata.
-// For an alternative call that will instead return a canonical prefix and a list of lines in the message, see Strings().
-func (l *LogEntry) String() string {
- if l.Leveled != nil {
- prefix, messages := l.Leveled.Strings()
- res := make([]string, len(messages))
- for i, m := range messages {
- res[i] = fmt.Sprintf("%-32s %s%s", l.DN, prefix, m)
- }
- return strings.Join(res, "\n")
- }
- if l.Raw != nil {
- return fmt.Sprintf("%-32s R %s", l.DN, l.Raw)
- }
- return "INVALID"
-}
-
-// Strings returns the canonical representation of this payload split into a prefix and all lines that were contained in
-// the original message. This is meant to be displayed to the user by showing the prefix before each line, concatenated
-// together - possibly in a table form with the prefixes all unified with a rowspan-like mechanism.
-//
-// For example, this function can return:
-// prefix = "root.foo.bar I1102 17:20:06.921395 0 foo.go:42] "
-// lines = []string{"current tags:", " - one", " - two"}
-//
-// With this data, the result should be presented to users this way in text form:
-// root.foo.bar I1102 17:20:06.921395 foo.go:42] current tags:
-// root.foo.bar I1102 17:20:06.921395 foo.go:42] - one
-// root.foo.bar I1102 17:20:06.921395 foo.go:42] - two
-//
-// Or, in a table layout:
-// .-------------------------------------------------------------------------------------.
-// | root.foo.bar I1102 17:20:06.921395 foo.go:42] : current tags: |
-// | :------------------|
-// | : - one |
-// | :------------------|
-// | : - two |
-// '-------------------------------------------------------------------------------------'
-//
-func (l *LogEntry) Strings() (prefix string, lines []string) {
- if l.Leveled != nil {
- prefix, messages := l.Leveled.Strings()
- prefix = fmt.Sprintf("%-32s %s", l.DN, prefix)
- return prefix, messages
- }
- if l.Raw != nil {
- return fmt.Sprintf("%-32s R ", l.DN), []string{l.Raw.Data}
- }
- return "INVALID ", []string{"INVALID"}
-}
-
-// Convert this LogEntry to proto. Returned value may be nil if given LogEntry is invalid, eg. contains neither a Raw
-// nor Leveled entry.
-func (l *LogEntry) Proto() *apb.LogEntry {
- p := &apb.LogEntry{
- Dn: string(l.DN),
- }
- switch {
- case l.Leveled != nil:
- leveled := l.Leveled
- p.Kind = &apb.LogEntry_Leveled_{
- Leveled: leveled.Proto(),
- }
- case l.Raw != nil:
- raw := l.Raw
- p.Kind = &apb.LogEntry_Raw_{
- Raw: raw.ProtoLog(),
- }
- default:
- return nil
- }
- return p
-}
-
-// Parse a proto LogEntry back into internal structure. This can be used in log proto API consumers to easily print
-// received log entries.
-func LogEntryFromProto(l *apb.LogEntry) (*LogEntry, error) {
- dn := DN(l.Dn)
- if _, err := dn.Path(); err != nil {
- return nil, fmt.Errorf("could not convert DN: %w", err)
- }
- res := &LogEntry{
- DN: dn,
- }
- switch inner := l.Kind.(type) {
- case *apb.LogEntry_Leveled_:
- leveled, err := LeveledPayloadFromProto(inner.Leveled)
- if err != nil {
- return nil, fmt.Errorf("could not convert leveled entry: %w", err)
- }
- res.Leveled = leveled
- case *apb.LogEntry_Raw_:
- line, err := logbuffer.LineFromLogProto(inner.Raw)
- if err != nil {
- return nil, fmt.Errorf("could not convert raw entry: %w", err)
- }
- res.Raw = line
- default:
- return nil, fmt.Errorf("proto has neither Leveled nor Raw set")
- }
- return res, nil
-}
diff --git a/metropolis/node/core/logtree/logtree_publisher.go b/metropolis/node/core/logtree/logtree_publisher.go
deleted file mode 100644
index c4880bc..0000000
--- a/metropolis/node/core/logtree/logtree_publisher.go
+++ /dev/null
@@ -1,185 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logtree
-
-import (
- "fmt"
- "io"
- "runtime"
- "strings"
- "time"
-
- "git.monogon.dev/source/nexantic.git/metropolis/node/common/logbuffer"
-)
-
-// LeveledFor returns a LeveledLogger publishing interface for a given DN. An error may be returned if the DN is
-// malformed.
-func (l *LogTree) LeveledFor(dn DN) (LeveledLogger, error) {
- return l.nodeByDN(dn)
-}
-
-func (l *LogTree) RawFor(dn DN) (io.Writer, error) {
- node, err := l.nodeByDN(dn)
- if err != nil {
- return nil, fmt.Errorf("could not retrieve raw logger: %w", err)
- }
- return node.rawLineBuffer, nil
-}
-
-// MustLeveledFor returns a LeveledLogger publishing interface for a given DN, or panics if the given DN is invalid.
-func (l *LogTree) MustLeveledFor(dn DN) LeveledLogger {
- leveled, err := l.LeveledFor(dn)
- if err != nil {
- panic(fmt.Errorf("LeveledFor returned: %w", err))
- }
- return leveled
-}
-
-func (l *LogTree) MustRawFor(dn DN) io.Writer {
- raw, err := l.RawFor(dn)
- if err != nil {
- panic(fmt.Errorf("RawFor returned: %w", err))
- }
- return raw
-}
-
-// SetVerbosity sets the verbosity for a given DN (non-recursively, ie. for that DN only, not its children).
-func (l *LogTree) SetVerbosity(dn DN, level VerbosityLevel) error {
- node, err := l.nodeByDN(dn)
- if err != nil {
- return err
- }
- node.verbosity = level
- return nil
-}
-
-// logRaw is called by this node's LineBuffer any time a raw log line is completed. It will create a new entry, append
-// it to the journal, and notify all pertinent subscribers.
-func (n *node) logRaw(line *logbuffer.Line) {
- e := &entry{
- origin: n.dn,
- raw: line,
- }
- n.tree.journal.append(e)
- n.tree.journal.notify(e)
-}
-
-// log builds a LeveledPayload and entry for a given message, including all related metadata. It will create a new
-// entry append it to the journal, and notify all pertinent subscribers.
-func (n *node) logLeveled(depth int, severity Severity, msg string) {
- _, file, line, ok := runtime.Caller(2 + depth)
- if !ok {
- file = "???"
- line = 1
- } else {
- slash := strings.LastIndex(file, "/")
- if slash >= 0 {
- file = file[slash+1:]
- }
- }
-
- // Remove leading/trailing newlines and split.
- messages := strings.Split(strings.Trim(msg, "\n"), "\n")
-
- p := &LeveledPayload{
- timestamp: time.Now(),
- severity: severity,
- messages: messages,
- file: file,
- line: line,
- }
- e := &entry{
- origin: n.dn,
- leveled: p,
- }
- n.tree.journal.append(e)
- n.tree.journal.notify(e)
-}
-
-// Info implements the LeveledLogger interface.
-func (n *node) Info(args ...interface{}) {
- n.logLeveled(0, INFO, fmt.Sprint(args...))
-}
-
-// Infof implements the LeveledLogger interface.
-func (n *node) Infof(format string, args ...interface{}) {
- n.logLeveled(0, INFO, fmt.Sprintf(format, args...))
-}
-
-// Warning implements the LeveledLogger interface.
-func (n *node) Warning(args ...interface{}) {
- n.logLeveled(0, WARNING, fmt.Sprint(args...))
-}
-
-// Warningf implements the LeveledLogger interface.
-func (n *node) Warningf(format string, args ...interface{}) {
- n.logLeveled(0, WARNING, fmt.Sprintf(format, args...))
-}
-
-// Error implements the LeveledLogger interface.
-func (n *node) Error(args ...interface{}) {
- n.logLeveled(0, ERROR, fmt.Sprint(args...))
-}
-
-// Errorf implements the LeveledLogger interface.
-func (n *node) Errorf(format string, args ...interface{}) {
- n.logLeveled(0, ERROR, fmt.Sprintf(format, args...))
-}
-
-// Fatal implements the LeveledLogger interface.
-func (n *node) Fatal(args ...interface{}) {
- n.logLeveled(0, FATAL, fmt.Sprint(args...))
-}
-
-// Fatalf implements the LeveledLogger interface.
-func (n *node) Fatalf(format string, args ...interface{}) {
- n.logLeveled(0, FATAL, fmt.Sprintf(format, args...))
-}
-
-// V implements the LeveledLogger interface.
-func (n *node) V(v VerbosityLevel) VerboseLeveledLogger {
- return &verbose{
- node: n,
- enabled: n.verbosity >= v,
- }
-}
-
-// verbose implements the VerboseLeveledLogger interface. It is a thin wrapper around node, with an 'enabled' bool. This
-// means that V(n)-returned VerboseLeveledLoggers must be short lived, as a changed in verbosity will not affect all
-// already existing VerboseLeveledLoggers.
-type verbose struct {
- node *node
- enabled bool
-}
-
-func (v *verbose) Enabled() bool {
- return v.enabled
-}
-
-func (v *verbose) Info(args ...interface{}) {
- if !v.enabled {
- return
- }
- v.node.logLeveled(0, INFO, fmt.Sprint(args...))
-}
-
-func (v *verbose) Infof(format string, args ...interface{}) {
- if !v.enabled {
- return
- }
- v.node.logLeveled(0, INFO, fmt.Sprintf(format, args...))
-}
diff --git a/metropolis/node/core/logtree/logtree_test.go b/metropolis/node/core/logtree/logtree_test.go
deleted file mode 100644
index b900201..0000000
--- a/metropolis/node/core/logtree/logtree_test.go
+++ /dev/null
@@ -1,211 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logtree
-
-import (
- "fmt"
- "strings"
- "testing"
- "time"
-)
-
-func expect(tree *LogTree, t *testing.T, dn DN, entries ...string) string {
- t.Helper()
- res, err := tree.Read(dn, WithChildren(), WithBacklog(BacklogAllAvailable))
- if err != nil {
- t.Fatalf("Read: %v", err)
- }
- if want, got := len(entries), len(res.Backlog); want != got {
- t.Fatalf("wanted %v backlog entries, got %v", want, got)
- }
- got := make(map[string]bool)
- for _, entry := range res.Backlog {
- if entry.Leveled != nil {
- got[entry.Leveled.MessagesJoined()] = true
- }
- if entry.Raw != nil {
- got[entry.Raw.Data] = true
- }
- }
- for _, entry := range entries {
- if !got[entry] {
- return fmt.Sprintf("missing entry %q", entry)
- }
- }
- return ""
-}
-
-func TestMultiline(t *testing.T) {
- tree := New()
- // Two lines in a single message.
- tree.MustLeveledFor("main").Info("foo\nbar")
- // Two lines in a single message with a hanging newline that should get stripped.
- tree.MustLeveledFor("main").Info("one\ntwo\n")
-
- if res := expect(tree, t, "main", "foo\nbar", "one\ntwo"); res != "" {
- t.Errorf("retrieval at main failed: %s", res)
- }
-}
-
-func TestBacklog(t *testing.T) {
- tree := New()
- tree.MustLeveledFor("main").Info("hello, main!")
- tree.MustLeveledFor("main.foo").Info("hello, main.foo!")
- tree.MustLeveledFor("main.bar").Info("hello, main.bar!")
- tree.MustLeveledFor("aux").Info("hello, aux!")
- // No newline at the last entry - shouldn't get propagated to the backlog.
- fmt.Fprintf(tree.MustRawFor("aux.process"), "processing foo\nprocessing bar\nbaz")
-
- if res := expect(tree, t, "main", "hello, main!", "hello, main.foo!", "hello, main.bar!"); res != "" {
- t.Errorf("retrieval at main failed: %s", res)
- }
- if res := expect(tree, t, "", "hello, main!", "hello, main.foo!", "hello, main.bar!", "hello, aux!", "processing foo", "processing bar"); res != "" {
- t.Errorf("retrieval at root failed: %s", res)
- }
- if res := expect(tree, t, "aux", "hello, aux!", "processing foo", "processing bar"); res != "" {
- t.Errorf("retrieval at aux failed: %s", res)
- }
-}
-
-func TestStream(t *testing.T) {
- tree := New()
- tree.MustLeveledFor("main").Info("hello, backlog")
- fmt.Fprintf(tree.MustRawFor("main.process"), "hello, raw backlog\n")
-
- res, err := tree.Read("", WithBacklog(BacklogAllAvailable), WithChildren(), WithStream())
- if err != nil {
- t.Fatalf("Read: %v", err)
- }
- defer res.Close()
- if want, got := 2, len(res.Backlog); want != got {
- t.Errorf("wanted %d backlog item, got %d", want, got)
- }
-
- tree.MustLeveledFor("main").Info("hello, stream")
- fmt.Fprintf(tree.MustRawFor("main.raw"), "hello, raw stream\n")
-
- entries := make(map[string]bool)
- timeout := time.After(time.Second * 1)
- for {
- done := false
- select {
- case <-timeout:
- done = true
- case p := <-res.Stream:
- if p.Leveled != nil {
- entries[p.Leveled.MessagesJoined()] = true
- }
- if p.Raw != nil {
- entries[p.Raw.Data] = true
- }
- }
- if done {
- break
- }
- }
- if entry := "hello, stream"; !entries[entry] {
- t.Errorf("Missing entry %q", entry)
- }
- if entry := "hello, raw stream"; !entries[entry] {
- t.Errorf("Missing entry %q", entry)
- }
-}
-
-func TestVerbose(t *testing.T) {
- tree := New()
-
- tree.MustLeveledFor("main").V(10).Info("this shouldn't get logged")
-
- reader, err := tree.Read("", WithBacklog(BacklogAllAvailable), WithChildren())
- if err != nil {
- t.Fatalf("Read: %v", err)
- }
- if want, got := 0, len(reader.Backlog); want != got {
- t.Fatalf("expected nothing to be logged, got %+v", reader.Backlog)
- }
-
- tree.SetVerbosity("main", 10)
- tree.MustLeveledFor("main").V(10).Info("this should get logged")
-
- reader, err = tree.Read("", WithBacklog(BacklogAllAvailable), WithChildren())
- if err != nil {
- t.Fatalf("Read: %v", err)
- }
- if want, got := 1, len(reader.Backlog); want != got {
- t.Fatalf("expected %d entries to get logged, got %d", want, got)
- }
-}
-
-func TestMetadata(t *testing.T) {
- tree := New()
- tree.MustLeveledFor("main").Error("i am an error")
- tree.MustLeveledFor("main").Warning("i am a warning")
- tree.MustLeveledFor("main").Info("i am informative")
- tree.MustLeveledFor("main").V(0).Info("i am a zero-level debug")
-
- reader, err := tree.Read("", WithChildren(), WithBacklog(BacklogAllAvailable))
- if err != nil {
- t.Fatalf("Read: %v", err)
- }
- if want, got := 4, len(reader.Backlog); want != got {
- t.Fatalf("expected %d entries, got %d", want, got)
- }
-
- for _, te := range []struct {
- ix int
- severity Severity
- message string
- }{
- {0, ERROR, "i am an error"},
- {1, WARNING, "i am a warning"},
- {2, INFO, "i am informative"},
- {3, INFO, "i am a zero-level debug"},
- } {
- p := reader.Backlog[te.ix]
- if want, got := te.severity, p.Leveled.Severity(); want != got {
- t.Errorf("wanted element %d to have severity %s, got %s", te.ix, want, got)
- }
- if want, got := te.message, p.Leveled.MessagesJoined(); want != got {
- t.Errorf("wanted element %d to have message %q, got %q", te.ix, want, got)
- }
- if want, got := "logtree_test.go", strings.Split(p.Leveled.Location(), ":")[0]; want != got {
- t.Errorf("wanted element %d to have file %q, got %q", te.ix, want, got)
- }
- }
-}
-
-func TestSeverity(t *testing.T) {
- tree := New()
- tree.MustLeveledFor("main").Error("i am an error")
- tree.MustLeveledFor("main").Warning("i am a warning")
- tree.MustLeveledFor("main").Info("i am informative")
- tree.MustLeveledFor("main").V(0).Info("i am a zero-level debug")
-
- reader, err := tree.Read("main", WithBacklog(BacklogAllAvailable), LeveledWithMinimumSeverity(WARNING))
- if err != nil {
- t.Fatalf("Read: %v", err)
- }
- if want, got := 2, len(reader.Backlog); want != got {
- t.Fatalf("wanted %d entries, got %d", want, got)
- }
- if want, got := "i am an error", reader.Backlog[0].Leveled.MessagesJoined(); want != got {
- t.Fatalf("wanted entry %q, got %q", want, got)
- }
- if want, got := "i am a warning", reader.Backlog[1].Leveled.MessagesJoined(); want != got {
- t.Fatalf("wanted entry %q, got %q", want, got)
- }
-}
diff --git a/metropolis/node/core/main.go b/metropolis/node/core/main.go
index 806cfef..d277fcd 100644
--- a/metropolis/node/core/main.go
+++ b/metropolis/node/core/main.go
@@ -33,17 +33,17 @@
"google.golang.org/grpc"
common "git.monogon.dev/source/nexantic.git/metropolis/node"
- "git.monogon.dev/source/nexantic.git/metropolis/node/common/supervisor"
"git.monogon.dev/source/nexantic.git/metropolis/node/core/cluster"
"git.monogon.dev/source/nexantic.git/metropolis/node/core/localstorage"
"git.monogon.dev/source/nexantic.git/metropolis/node/core/localstorage/declarative"
- "git.monogon.dev/source/nexantic.git/metropolis/node/core/logtree"
"git.monogon.dev/source/nexantic.git/metropolis/node/core/network"
"git.monogon.dev/source/nexantic.git/metropolis/node/core/network/dns"
- "git.monogon.dev/source/nexantic.git/metropolis/node/core/tpm"
"git.monogon.dev/source/nexantic.git/metropolis/node/kubernetes"
"git.monogon.dev/source/nexantic.git/metropolis/node/kubernetes/containerd"
"git.monogon.dev/source/nexantic.git/metropolis/node/kubernetes/pki"
+ "git.monogon.dev/source/nexantic.git/metropolis/pkg/logtree"
+ "git.monogon.dev/source/nexantic.git/metropolis/pkg/supervisor"
+ "git.monogon.dev/source/nexantic.git/metropolis/pkg/tpm"
apb "git.monogon.dev/source/nexantic.git/metropolis/proto/api"
)
diff --git a/metropolis/node/core/network/BUILD.bazel b/metropolis/node/core/network/BUILD.bazel
index 9ba56a9..eb3423a 100644
--- a/metropolis/node/core/network/BUILD.bazel
+++ b/metropolis/node/core/network/BUILD.bazel
@@ -6,11 +6,11 @@
importpath = "git.monogon.dev/source/nexantic.git/metropolis/node/core/network",
visibility = ["//:__subpackages__"],
deps = [
- "//metropolis/node/common/supervisor:go_default_library",
- "//metropolis/node/core/logtree:go_default_library",
"//metropolis/node/core/network/dhcp4c:go_default_library",
"//metropolis/node/core/network/dhcp4c/callback:go_default_library",
"//metropolis/node/core/network/dns:go_default_library",
+ "//metropolis/pkg/logtree:go_default_library",
+ "//metropolis/pkg/supervisor:go_default_library",
"@com_github_google_nftables//:go_default_library",
"@com_github_google_nftables//expr:go_default_library",
"@com_github_insomniacslk_dhcp//dhcpv4:go_default_library",
diff --git a/metropolis/node/core/network/dhcp4c/BUILD.bazel b/metropolis/node/core/network/dhcp4c/BUILD.bazel
index 19b4c70..c84bd05 100644
--- a/metropolis/node/core/network/dhcp4c/BUILD.bazel
+++ b/metropolis/node/core/network/dhcp4c/BUILD.bazel
@@ -10,8 +10,8 @@
importpath = "git.monogon.dev/source/nexantic.git/metropolis/node/core/network/dhcp4c",
visibility = ["//visibility:public"],
deps = [
- "//metropolis/node/common/supervisor:go_default_library",
"//metropolis/node/core/network/dhcp4c/transport:go_default_library",
+ "//metropolis/pkg/supervisor:go_default_library",
"@com_github_cenkalti_backoff_v4//:go_default_library",
"@com_github_insomniacslk_dhcp//dhcpv4:go_default_library",
"@com_github_insomniacslk_dhcp//iana:go_default_library",
diff --git a/metropolis/node/core/network/dhcp4c/dhcpc.go b/metropolis/node/core/network/dhcp4c/dhcpc.go
index 4352506..2ec2be0 100644
--- a/metropolis/node/core/network/dhcp4c/dhcpc.go
+++ b/metropolis/node/core/network/dhcp4c/dhcpc.go
@@ -34,8 +34,8 @@
"github.com/insomniacslk/dhcp/dhcpv4"
"github.com/insomniacslk/dhcp/iana"
- "git.monogon.dev/source/nexantic.git/metropolis/node/common/supervisor"
"git.monogon.dev/source/nexantic.git/metropolis/node/core/network/dhcp4c/transport"
+ "git.monogon.dev/source/nexantic.git/metropolis/pkg/supervisor"
)
type state int
diff --git a/metropolis/node/core/network/dns/BUILD.bazel b/metropolis/node/core/network/dns/BUILD.bazel
index 862d4cf..ac517cc 100644
--- a/metropolis/node/core/network/dns/BUILD.bazel
+++ b/metropolis/node/core/network/dns/BUILD.bazel
@@ -9,7 +9,7 @@
importpath = "git.monogon.dev/source/nexantic.git/metropolis/node/core/network/dns",
visibility = ["//metropolis/node:__subpackages__"],
deps = [
- "//metropolis/node/common/fileargs:go_default_library",
- "//metropolis/node/common/supervisor:go_default_library",
+ "//metropolis/pkg/fileargs:go_default_library",
+ "//metropolis/pkg/supervisor:go_default_library",
],
)
diff --git a/metropolis/node/core/network/dns/coredns.go b/metropolis/node/core/network/dns/coredns.go
index b6400f7..8403cf1 100644
--- a/metropolis/node/core/network/dns/coredns.go
+++ b/metropolis/node/core/network/dns/coredns.go
@@ -26,8 +26,8 @@
"sync"
"syscall"
- "git.monogon.dev/source/nexantic.git/metropolis/node/common/fileargs"
- "git.monogon.dev/source/nexantic.git/metropolis/node/common/supervisor"
+ "git.monogon.dev/source/nexantic.git/metropolis/pkg/fileargs"
+ "git.monogon.dev/source/nexantic.git/metropolis/pkg/supervisor"
)
const corefileBase = `
diff --git a/metropolis/node/core/network/main.go b/metropolis/node/core/network/main.go
index 94bb4d4..63f600d 100644
--- a/metropolis/node/core/network/main.go
+++ b/metropolis/node/core/network/main.go
@@ -32,11 +32,11 @@
"github.com/vishvananda/netlink"
"golang.org/x/sys/unix"
- "git.monogon.dev/source/nexantic.git/metropolis/node/common/supervisor"
- "git.monogon.dev/source/nexantic.git/metropolis/node/core/logtree"
"git.monogon.dev/source/nexantic.git/metropolis/node/core/network/dhcp4c"
dhcpcb "git.monogon.dev/source/nexantic.git/metropolis/node/core/network/dhcp4c/callback"
"git.monogon.dev/source/nexantic.git/metropolis/node/core/network/dns"
+ "git.monogon.dev/source/nexantic.git/metropolis/pkg/logtree"
+ "git.monogon.dev/source/nexantic.git/metropolis/pkg/supervisor"
)
const (
diff --git a/metropolis/node/core/switchroot.go b/metropolis/node/core/switchroot.go
index c980a3a..d897ec7 100644
--- a/metropolis/node/core/switchroot.go
+++ b/metropolis/node/core/switchroot.go
@@ -27,7 +27,7 @@
"golang.org/x/sys/unix"
- "git.monogon.dev/source/nexantic.git/metropolis/node/core/logtree"
+ "git.monogon.dev/source/nexantic.git/metropolis/pkg/logtree"
)
// switchRoot moves the root from initramfs into a tmpfs
diff --git a/metropolis/node/core/tpm/BUILD.bazel b/metropolis/node/core/tpm/BUILD.bazel
deleted file mode 100644
index fd42681..0000000
--- a/metropolis/node/core/tpm/BUILD.bazel
+++ /dev/null
@@ -1,22 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library")
-
-go_library(
- name = "go_default_library",
- srcs = [
- "credactivation_compat.go",
- "tpm.go",
- ],
- importpath = "git.monogon.dev/source/nexantic.git/metropolis/node/core/tpm",
- visibility = ["//visibility:public"],
- deps = [
- "//metropolis/node/common/sysfs:go_default_library",
- "//metropolis/node/core/logtree:go_default_library",
- "@com_github_gogo_protobuf//proto:go_default_library",
- "@com_github_google_go_tpm//tpm2:go_default_library",
- "@com_github_google_go_tpm//tpmutil:go_default_library",
- "@com_github_google_go_tpm_tools//proto:go_default_library",
- "@com_github_google_go_tpm_tools//tpm2tools:go_default_library",
- "@com_github_pkg_errors//:go_default_library",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/metropolis/node/core/tpm/credactivation_compat.go b/metropolis/node/core/tpm/credactivation_compat.go
deleted file mode 100644
index 039f8d5..0000000
--- a/metropolis/node/core/tpm/credactivation_compat.go
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tpm
-
-// This file is adapted from github.com/google/go-tpm/tpm2/credactivation which outputs broken
-// challenges for unknown reasons. They use u16 length-delimited outputs for the challenge blobs
-// which is incorrect. Rather than rewriting the routine, we only applied minimal fixes to it
-// and skip the ECC part of the issue (because we would rather trust the proprietary RSA implementation).
-//
-// TODO(lorenz): I'll eventually deal with this upstream, but for now just fix it here (it's not that)
-// much code after all (https://github.com/google/go-tpm/issues/121)
-
-import (
- "crypto/aes"
- "crypto/cipher"
- "crypto/hmac"
- "crypto/rsa"
- "fmt"
- "io"
-
- "github.com/google/go-tpm/tpm2"
- "github.com/google/go-tpm/tpmutil"
-)
-
-const (
- labelIdentity = "IDENTITY"
- labelStorage = "STORAGE"
- labelIntegrity = "INTEGRITY"
-)
-
-func generateRSA(aik *tpm2.HashValue, pub *rsa.PublicKey, symBlockSize int, secret []byte, rnd io.Reader) ([]byte, []byte, error) {
- newAIKHash, err := aik.Alg.HashConstructor()
- if err != nil {
- return nil, nil, err
- }
-
- // The seed length should match the keysize used by the EKs symmetric cipher.
- // For typical RSA EKs, this will be 128 bits (16 bytes).
- // Spec: TCG 2.0 EK Credential Profile revision 14, section 2.1.5.1.
- seed := make([]byte, symBlockSize)
- if _, err := io.ReadFull(rnd, seed); err != nil {
- return nil, nil, fmt.Errorf("generating seed: %v", err)
- }
-
- // Encrypt the seed value using the provided public key.
- // See annex B, section 10.4 of the TPM specification revision 2 part 1.
- label := append([]byte(labelIdentity), 0)
- encSecret, err := rsa.EncryptOAEP(newAIKHash(), rnd, pub, seed, label)
- if err != nil {
- return nil, nil, fmt.Errorf("generating encrypted seed: %v", err)
- }
-
- // Generate the encrypted credential by convolving the seed with the digest of
- // the AIK, and using the result as the key to encrypt the secret.
- // See section 24.4 of TPM 2.0 specification, part 1.
- aikNameEncoded, err := aik.Encode()
- if err != nil {
- return nil, nil, fmt.Errorf("encoding aikName: %v", err)
- }
- symmetricKey, err := tpm2.KDFa(aik.Alg, seed, labelStorage, aikNameEncoded, nil, len(seed)*8)
- if err != nil {
- return nil, nil, fmt.Errorf("generating symmetric key: %v", err)
- }
- c, err := aes.NewCipher(symmetricKey)
- if err != nil {
- return nil, nil, fmt.Errorf("symmetric cipher setup: %v", err)
- }
- cv, err := tpmutil.Pack(tpmutil.U16Bytes(secret))
- if err != nil {
- return nil, nil, fmt.Errorf("generating cv (TPM2B_Digest): %v", err)
- }
-
- // IV is all null bytes. encIdentity represents the encrypted credential.
- encIdentity := make([]byte, len(cv))
- cipher.NewCFBEncrypter(c, make([]byte, len(symmetricKey))).XORKeyStream(encIdentity, cv)
-
- // Generate the integrity HMAC, which is used to protect the integrity of the
- // encrypted structure.
- // See section 24.5 of the TPM specification revision 2 part 1.
- macKey, err := tpm2.KDFa(aik.Alg, seed, labelIntegrity, nil, nil, newAIKHash().Size()*8)
- if err != nil {
- return nil, nil, fmt.Errorf("generating HMAC key: %v", err)
- }
-
- mac := hmac.New(newAIKHash, macKey)
- mac.Write(encIdentity)
- mac.Write(aikNameEncoded)
- integrityHMAC := mac.Sum(nil)
-
- idObject := &tpm2.IDObject{
- IntegrityHMAC: integrityHMAC,
- EncIdentity: encIdentity,
- }
- id, err := tpmutil.Pack(idObject)
- if err != nil {
- return nil, nil, fmt.Errorf("encoding IDObject: %v", err)
- }
-
- packedID, err := tpmutil.Pack(id)
- if err != nil {
- return nil, nil, fmt.Errorf("packing id: %v", err)
- }
- packedEncSecret, err := tpmutil.Pack(encSecret)
- if err != nil {
- return nil, nil, fmt.Errorf("packing encSecret: %v", err)
- }
-
- return packedID, packedEncSecret, nil
-}
diff --git a/metropolis/node/core/tpm/eventlog/BUILD.bazel b/metropolis/node/core/tpm/eventlog/BUILD.bazel
deleted file mode 100644
index 64fa1ff..0000000
--- a/metropolis/node/core/tpm/eventlog/BUILD.bazel
+++ /dev/null
@@ -1,17 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library")
-
-go_library(
- name = "go_default_library",
- srcs = [
- "compat.go",
- "eventlog.go",
- "secureboot.go",
- ],
- importpath = "git.monogon.dev/source/nexantic.git/metropolis/node/core/tpm/eventlog",
- visibility = ["//visibility:public"],
- deps = [
- "//metropolis/node/core/tpm/eventlog/internal:go_default_library",
- "@com_github_google_certificate_transparency_go//x509:go_default_library",
- "@com_github_google_go_tpm//tpm2:go_default_library",
- ],
-)
diff --git a/metropolis/node/core/tpm/eventlog/LICENSE-3RD-PARTY.txt b/metropolis/node/core/tpm/eventlog/LICENSE-3RD-PARTY.txt
deleted file mode 100644
index 2d3298c..0000000
--- a/metropolis/node/core/tpm/eventlog/LICENSE-3RD-PARTY.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-Copyright 2020 Google Inc.
-Licensed under the Apache License, Version 2.0 (the "License"); you may not
-use this file except in compliance with the License. You may obtain a copy of
-the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-License for the specific language governing permissions and limitations under
-the License.
\ No newline at end of file
diff --git a/metropolis/node/core/tpm/eventlog/compat.go b/metropolis/node/core/tpm/eventlog/compat.go
deleted file mode 100644
index f83972b..0000000
--- a/metropolis/node/core/tpm/eventlog/compat.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package eventlog
-
-// This file contains compatibility functions for our TPM library
-
-import (
- "crypto"
-)
-
-// ConvertRawPCRs converts from raw PCRs to eventlog PCR structures
-func ConvertRawPCRs(pcrs [][]byte) []PCR {
- var evPCRs []PCR
- for i, digest := range pcrs {
- evPCRs = append(evPCRs, PCR{DigestAlg: crypto.SHA256, Index: i, Digest: digest})
- }
- return evPCRs
-}
diff --git a/metropolis/node/core/tpm/eventlog/eventlog.go b/metropolis/node/core/tpm/eventlog/eventlog.go
deleted file mode 100644
index 49a8a26..0000000
--- a/metropolis/node/core/tpm/eventlog/eventlog.go
+++ /dev/null
@@ -1,646 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Taken and pruned from go-attestation revision 2453c8f39a4ff46009f6a9db6fb7c6cca789d9a1 under Apache 2.0
-
-package eventlog
-
-import (
- "bytes"
- "crypto"
- "crypto/sha1"
- "crypto/sha256"
- "encoding/binary"
- "errors"
- "fmt"
- "io"
- "sort"
-
- // Ensure hashes are available.
- _ "crypto/sha256"
-
- "github.com/google/go-tpm/tpm2"
-)
-
-// HashAlg identifies a hashing Algorithm.
-type HashAlg uint8
-
-// Valid hash algorithms.
-var (
- HashSHA1 = HashAlg(tpm2.AlgSHA1)
- HashSHA256 = HashAlg(tpm2.AlgSHA256)
-)
-
-func (a HashAlg) cryptoHash() crypto.Hash {
- switch a {
- case HashSHA1:
- return crypto.SHA1
- case HashSHA256:
- return crypto.SHA256
- }
- return 0
-}
-
-func (a HashAlg) goTPMAlg() tpm2.Algorithm {
- switch a {
- case HashSHA1:
- return tpm2.AlgSHA1
- case HashSHA256:
- return tpm2.AlgSHA256
- }
- return 0
-}
-
-// String returns a human-friendly representation of the hash algorithm.
-func (a HashAlg) String() string {
- switch a {
- case HashSHA1:
- return "SHA1"
- case HashSHA256:
- return "SHA256"
- }
- return fmt.Sprintf("HashAlg<%d>", int(a))
-}
-
-// ReplayError describes the parsed events that failed to verify against
-// a particular PCR.
-type ReplayError struct {
- Events []Event
- invalidPCRs []int
-}
-
-func (e ReplayError) affected(pcr int) bool {
- for _, p := range e.invalidPCRs {
- if p == pcr {
- return true
- }
- }
- return false
-}
-
-// Error returns a human-friendly description of replay failures.
-func (e ReplayError) Error() string {
- return fmt.Sprintf("event log failed to verify: the following registers failed to replay: %v", e.invalidPCRs)
-}
-
-// TPM algorithms. See the TPM 2.0 specification section 6.3.
-//
-// https://trustedcomputinggroup.org/wp-content/uploads/TPM-Rev-2.0-Part-2-Structures-01.38.pdf#page=42
-const (
- algSHA1 uint16 = 0x0004
- algSHA256 uint16 = 0x000B
-)
-
-// EventType indicates what kind of data an event is reporting.
-type EventType uint32
-
-// Event is a single event from a TCG event log. This reports descrete items such
-// as BIOs measurements or EFI states.
-type Event struct {
- // order of the event in the event log.
- sequence int
-
- // PCR index of the event.
- Index int
- // Type of the event.
- Type EventType
-
- // Data of the event. For certain kinds of events, this must match the event
- // digest to be valid.
- Data []byte
- // Digest is the verified digest of the event data. While an event can have
- // multiple for different hash values, this is the one that was matched to the
- // PCR value.
- Digest []byte
-
- // TODO(ericchiang): Provide examples or links for which event types must
- // match their data to their digest.
-}
-
-func (e *Event) digestEquals(b []byte) error {
- if len(e.Digest) == 0 {
- return errors.New("no digests present")
- }
-
- switch len(e.Digest) {
- case crypto.SHA256.Size():
- s := sha256.Sum256(b)
- if bytes.Equal(s[:], e.Digest) {
- return nil
- }
- case crypto.SHA1.Size():
- s := sha1.Sum(b)
- if bytes.Equal(s[:], e.Digest) {
- return nil
- }
- default:
- return fmt.Errorf("cannot compare hash of length %d", len(e.Digest))
- }
-
- return fmt.Errorf("digest (len %d) does not match", len(e.Digest))
-}
-
-// EventLog is a parsed measurement log. This contains unverified data representing
-// boot events that must be replayed against PCR values to determine authenticity.
-type EventLog struct {
- // Algs holds the set of algorithms that the event log uses.
- Algs []HashAlg
-
- rawEvents []rawEvent
-}
-
-func (e *EventLog) clone() *EventLog {
- out := EventLog{
- Algs: make([]HashAlg, len(e.Algs)),
- rawEvents: make([]rawEvent, len(e.rawEvents)),
- }
- copy(out.Algs, e.Algs)
- copy(out.rawEvents, e.rawEvents)
- return &out
-}
-
-type elWorkaround struct {
- id string
- affectedPCR int
- apply func(e *EventLog) error
-}
-
-// inject3 appends two new events into the event log.
-func inject3(e *EventLog, pcr int, data1, data2, data3 string) error {
- if err := inject(e, pcr, data1); err != nil {
- return err
- }
- if err := inject(e, pcr, data2); err != nil {
- return err
- }
- return inject(e, pcr, data3)
-}
-
-// inject2 appends two new events into the event log.
-func inject2(e *EventLog, pcr int, data1, data2 string) error {
- if err := inject(e, pcr, data1); err != nil {
- return err
- }
- return inject(e, pcr, data2)
-}
-
-// inject appends a new event into the event log.
-func inject(e *EventLog, pcr int, data string) error {
- evt := rawEvent{
- data: []byte(data),
- index: pcr,
- sequence: e.rawEvents[len(e.rawEvents)-1].sequence + 1,
- }
- for _, alg := range e.Algs {
- h := alg.cryptoHash().New()
- h.Write([]byte(data))
- evt.digests = append(evt.digests, digest{hash: alg.cryptoHash(), data: h.Sum(nil)})
- }
- e.rawEvents = append(e.rawEvents, evt)
- return nil
-}
-
-const (
- ebsInvocation = "Exit Boot Services Invocation"
- ebsSuccess = "Exit Boot Services Returned with Success"
- ebsFailure = "Exit Boot Services Returned with Failure"
-)
-
-var eventlogWorkarounds = []elWorkaround{
- {
- id: "EBS Invocation + Success",
- affectedPCR: 5,
- apply: func(e *EventLog) error {
- return inject2(e, 5, ebsInvocation, ebsSuccess)
- },
- },
- {
- id: "EBS Invocation + Failure",
- affectedPCR: 5,
- apply: func(e *EventLog) error {
- return inject2(e, 5, ebsInvocation, ebsFailure)
- },
- },
- {
- id: "EBS Invocation + Failure + Success",
- affectedPCR: 5,
- apply: func(e *EventLog) error {
- return inject3(e, 5, ebsInvocation, ebsFailure, ebsSuccess)
- },
- },
-}
-
-// Verify replays the event log against a TPM's PCR values, returning the
-// events which could be matched to a provided PCR value.
-// An error is returned if the replayed digest for events with a given PCR
-// index do not match any provided value for that PCR index.
-func (e *EventLog) Verify(pcrs []PCR) ([]Event, error) {
- events, err := e.verify(pcrs)
- // If there were any issues replaying the PCRs, try each of the workarounds
- // in turn.
- // TODO(jsonp): Allow workarounds to be combined.
- if rErr, isReplayErr := err.(ReplayError); isReplayErr {
- for _, wkrd := range eventlogWorkarounds {
- if !rErr.affected(wkrd.affectedPCR) {
- continue
- }
- el := e.clone()
- if err := wkrd.apply(el); err != nil {
- return nil, fmt.Errorf("failed applying workaround %q: %v", wkrd.id, err)
- }
- if events, err := el.verify(pcrs); err == nil {
- return events, nil
- }
- }
- }
-
- return events, err
-}
-
-// PCR encapsulates the value of a PCR at a point in time.
-type PCR struct {
- Index int
- Digest []byte
- DigestAlg crypto.Hash
-}
-
-func (e *EventLog) verify(pcrs []PCR) ([]Event, error) {
- events, err := replayEvents(e.rawEvents, pcrs)
- if err != nil {
- if _, isReplayErr := err.(ReplayError); isReplayErr {
- return nil, err
- }
- return nil, fmt.Errorf("pcrs failed to replay: %v", err)
- }
- return events, nil
-}
-
-func extend(pcr PCR, replay []byte, e rawEvent) (pcrDigest []byte, eventDigest []byte, err error) {
- h := pcr.DigestAlg
-
- for _, digest := range e.digests {
- if digest.hash != pcr.DigestAlg {
- continue
- }
- if len(digest.data) != len(pcr.Digest) {
- return nil, nil, fmt.Errorf("digest data length (%d) doesn't match PCR digest length (%d)", len(digest.data), len(pcr.Digest))
- }
- hash := h.New()
- if len(replay) != 0 {
- hash.Write(replay)
- } else {
- b := make([]byte, h.Size())
- hash.Write(b)
- }
- hash.Write(digest.data)
- return hash.Sum(nil), digest.data, nil
- }
- return nil, nil, fmt.Errorf("no event digest matches pcr algorithm: %v", pcr.DigestAlg)
-}
-
-// replayPCR replays the event log for a specific PCR, using pcr and
-// event digests with the algorithm in pcr. An error is returned if the
-// replayed values do not match the final PCR digest, or any event tagged
-// with that PCR does not posess an event digest with the specified algorithm.
-func replayPCR(rawEvents []rawEvent, pcr PCR) ([]Event, bool) {
- var (
- replay []byte
- outEvents []Event
- )
-
- for _, e := range rawEvents {
- if e.index != pcr.Index {
- continue
- }
-
- replayValue, digest, err := extend(pcr, replay, e)
- if err != nil {
- return nil, false
- }
- replay = replayValue
- outEvents = append(outEvents, Event{sequence: e.sequence, Data: e.data, Digest: digest, Index: pcr.Index, Type: e.typ})
- }
-
- if len(outEvents) > 0 && !bytes.Equal(replay, pcr.Digest) {
- return nil, false
- }
- return outEvents, true
-}
-
-type pcrReplayResult struct {
- events []Event
- successful bool
-}
-
-func replayEvents(rawEvents []rawEvent, pcrs []PCR) ([]Event, error) {
- var (
- invalidReplays []int
- verifiedEvents []Event
- allPCRReplays = map[int][]pcrReplayResult{}
- )
-
- // Replay the event log for every PCR and digest algorithm combination.
- for _, pcr := range pcrs {
- events, ok := replayPCR(rawEvents, pcr)
- allPCRReplays[pcr.Index] = append(allPCRReplays[pcr.Index], pcrReplayResult{events, ok})
- }
-
- // Record PCR indices which do not have any successful replay. Record the
- // events for a successful replay.
-pcrLoop:
- for i, replaysForPCR := range allPCRReplays {
- for _, replay := range replaysForPCR {
- if replay.successful {
- // We consider the PCR verified at this stage: The replay of values with
- // one digest algorithm matched a provided value.
- // As such, we save the PCR's events, and proceed to the next PCR.
- verifiedEvents = append(verifiedEvents, replay.events...)
- continue pcrLoop
- }
- }
- invalidReplays = append(invalidReplays, i)
- }
-
- if len(invalidReplays) > 0 {
- events := make([]Event, 0, len(rawEvents))
- for _, e := range rawEvents {
- events = append(events, Event{e.sequence, e.index, e.typ, e.data, nil})
- }
- return nil, ReplayError{
- Events: events,
- invalidPCRs: invalidReplays,
- }
- }
-
- sort.Slice(verifiedEvents, func(i int, j int) bool {
- return verifiedEvents[i].sequence < verifiedEvents[j].sequence
- })
- return verifiedEvents, nil
-}
-
-// EV_NO_ACTION is a special event type that indicates information to the parser
-// instead of holding a measurement. For TPM 2.0, this event type is used to signal
-// switching from SHA1 format to a variable length digest.
-//
-// https://trustedcomputinggroup.org/wp-content/uploads/TCG_PCClientSpecPlat_TPM_2p0_1p04_pub.pdf#page=110
-const eventTypeNoAction = 0x03
-
-// ParseEventLog parses an unverified measurement log.
-func ParseEventLog(measurementLog []byte) (*EventLog, error) {
- var specID *specIDEvent
- r := bytes.NewBuffer(measurementLog)
- parseFn := parseRawEvent
- var el EventLog
- e, err := parseFn(r, specID)
- if err != nil {
- return nil, fmt.Errorf("parse first event: %v", err)
- }
- if e.typ == eventTypeNoAction {
- specID, err = parseSpecIDEvent(e.data)
- if err != nil {
- return nil, fmt.Errorf("failed to parse spec ID event: %v", err)
- }
- for _, alg := range specID.algs {
- switch tpm2.Algorithm(alg.ID) {
- case tpm2.AlgSHA1:
- el.Algs = append(el.Algs, HashSHA1)
- case tpm2.AlgSHA256:
- el.Algs = append(el.Algs, HashSHA256)
- }
- }
- if len(el.Algs) == 0 {
- return nil, fmt.Errorf("measurement log didn't use sha1 or sha256 digests")
- }
- // Switch to parsing crypto agile events. Don't include this in the
- // replayed events since it intentionally doesn't extend the PCRs.
- //
- // Note that this doesn't actually guarentee that events have SHA256
- // digests.
- parseFn = parseRawEvent2
- } else {
- el.Algs = []HashAlg{HashSHA1}
- el.rawEvents = append(el.rawEvents, e)
- }
- sequence := 1
- for r.Len() != 0 {
- e, err := parseFn(r, specID)
- if err != nil {
- return nil, err
- }
- e.sequence = sequence
- sequence++
- el.rawEvents = append(el.rawEvents, e)
- }
- return &el, nil
-}
-
-type specIDEvent struct {
- algs []specAlgSize
-}
-
-type specAlgSize struct {
- ID uint16
- Size uint16
-}
-
-// Expected values for various Spec ID Event fields.
-// https://trustedcomputinggroup.org/wp-content/uploads/EFI-Protocol-Specification-rev13-160330final.pdf#page=19
-var wantSignature = [16]byte{0x53, 0x70,
- 0x65, 0x63, 0x20, 0x49,
- 0x44, 0x20, 0x45, 0x76,
- 0x65, 0x6e, 0x74, 0x30,
- 0x33, 0x00} // "Spec ID Event03\0"
-
-const (
- wantMajor = 2
- wantMinor = 0
- wantErrata = 0
-)
-
-// parseSpecIDEvent parses a TCG_EfiSpecIDEventStruct structure from the reader.
-//
-// https://trustedcomputinggroup.org/wp-content/uploads/EFI-Protocol-Specification-rev13-160330final.pdf#page=18
-func parseSpecIDEvent(b []byte) (*specIDEvent, error) {
- r := bytes.NewReader(b)
- var header struct {
- Signature [16]byte
- PlatformClass uint32
- VersionMinor uint8
- VersionMajor uint8
- Errata uint8
- UintnSize uint8
- NumAlgs uint32
- }
- if err := binary.Read(r, binary.LittleEndian, &header); err != nil {
- return nil, fmt.Errorf("reading event header: %v", err)
- }
- if header.Signature != wantSignature {
- return nil, fmt.Errorf("invalid spec id signature: %x", header.Signature)
- }
- if header.VersionMajor != wantMajor {
- return nil, fmt.Errorf("invalid spec major version, got %02x, wanted %02x",
- header.VersionMajor, wantMajor)
- }
- if header.VersionMinor != wantMinor {
- return nil, fmt.Errorf("invalid spec minor version, got %02x, wanted %02x",
- header.VersionMajor, wantMinor)
- }
-
- // TODO(ericchiang): Check errata? Or do we expect that to change in ways
- // we're okay with?
-
- specAlg := specAlgSize{}
- e := specIDEvent{}
- for i := 0; i < int(header.NumAlgs); i++ {
- if err := binary.Read(r, binary.LittleEndian, &specAlg); err != nil {
- return nil, fmt.Errorf("reading algorithm: %v", err)
- }
- e.algs = append(e.algs, specAlg)
- }
-
- var vendorInfoSize uint8
- if err := binary.Read(r, binary.LittleEndian, &vendorInfoSize); err != nil {
- return nil, fmt.Errorf("reading vender info size: %v", err)
- }
- if r.Len() != int(vendorInfoSize) {
- return nil, fmt.Errorf("reading vendor info, expected %d remaining bytes, got %d", vendorInfoSize, r.Len())
- }
- return &e, nil
-}
-
-type digest struct {
- hash crypto.Hash
- data []byte
-}
-
-type rawEvent struct {
- sequence int
- index int
- typ EventType
- data []byte
- digests []digest
-}
-
-// TPM 1.2 event log format. See "5.1 SHA1 Event Log Entry Format"
-// https://trustedcomputinggroup.org/wp-content/uploads/EFI-Protocol-Specification-rev13-160330final.pdf#page=15
-type rawEventHeader struct {
- PCRIndex uint32
- Type uint32
- Digest [20]byte
- EventSize uint32
-}
-
-type eventSizeErr struct {
- eventSize uint32
- logSize int
-}
-
-func (e *eventSizeErr) Error() string {
- return fmt.Sprintf("event data size (%d bytes) is greater than remaining measurement log (%d bytes)", e.eventSize, e.logSize)
-}
-
-func parseRawEvent(r *bytes.Buffer, specID *specIDEvent) (event rawEvent, err error) {
- var h rawEventHeader
- if err = binary.Read(r, binary.LittleEndian, &h); err != nil {
- return event, err
- }
- if h.EventSize == 0 {
- return event, errors.New("event data size is 0")
- }
- if h.EventSize > uint32(r.Len()) {
- return event, &eventSizeErr{h.EventSize, r.Len()}
- }
-
- data := make([]byte, int(h.EventSize))
- if _, err := io.ReadFull(r, data); err != nil {
- return event, err
- }
-
- digests := []digest{{hash: crypto.SHA1, data: h.Digest[:]}}
-
- return rawEvent{
- typ: EventType(h.Type),
- data: data,
- index: int(h.PCRIndex),
- digests: digests,
- }, nil
-}
-
-// TPM 2.0 event log format. See "5.2 Crypto Agile Log Entry Format"
-// https://trustedcomputinggroup.org/wp-content/uploads/EFI-Protocol-Specification-rev13-160330final.pdf#page=15
-type rawEvent2Header struct {
- PCRIndex uint32
- Type uint32
-}
-
-func parseRawEvent2(r *bytes.Buffer, specID *specIDEvent) (event rawEvent, err error) {
- var h rawEvent2Header
-
- if err = binary.Read(r, binary.LittleEndian, &h); err != nil {
- return event, err
- }
- event.typ = EventType(h.Type)
- event.index = int(h.PCRIndex)
-
- // parse the event digests
- var numDigests uint32
- if err := binary.Read(r, binary.LittleEndian, &numDigests); err != nil {
- return event, err
- }
-
- for i := 0; i < int(numDigests); i++ {
- var algID uint16
- if err := binary.Read(r, binary.LittleEndian, &algID); err != nil {
- return event, err
- }
- var digest digest
-
- for _, alg := range specID.algs {
- if alg.ID != algID {
- continue
- }
- if uint16(r.Len()) < alg.Size {
- return event, fmt.Errorf("reading digest: %v", io.ErrUnexpectedEOF)
- }
- digest.data = make([]byte, alg.Size)
- digest.hash = HashAlg(alg.ID).cryptoHash()
- }
- if len(digest.data) == 0 {
- return event, fmt.Errorf("unknown algorithm ID %x", algID)
- }
- if _, err := io.ReadFull(r, digest.data); err != nil {
- return event, err
- }
- event.digests = append(event.digests, digest)
- }
-
- // parse event data
- var eventSize uint32
- if err = binary.Read(r, binary.LittleEndian, &eventSize); err != nil {
- return event, err
- }
- if eventSize == 0 {
- return event, errors.New("event data size is 0")
- }
- if eventSize > uint32(r.Len()) {
- return event, &eventSizeErr{eventSize, r.Len()}
- }
- event.data = make([]byte, int(eventSize))
- if _, err := io.ReadFull(r, event.data); err != nil {
- return event, err
- }
- return event, err
-}
diff --git a/metropolis/node/core/tpm/eventlog/internal/BUILD.bazel b/metropolis/node/core/tpm/eventlog/internal/BUILD.bazel
deleted file mode 100644
index 48e1e81..0000000
--- a/metropolis/node/core/tpm/eventlog/internal/BUILD.bazel
+++ /dev/null
@@ -1,12 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library")
-
-go_library(
- name = "go_default_library",
- srcs = ["events.go"],
- importpath = "git.monogon.dev/source/nexantic.git/metropolis/node/core/tpm/eventlog/internal",
- visibility = ["//metropolis/node/core/tpm/eventlog:__subpackages__"],
- deps = [
- "@com_github_google_certificate_transparency_go//asn1:go_default_library",
- "@com_github_google_certificate_transparency_go//x509:go_default_library",
- ],
-)
diff --git a/metropolis/node/core/tpm/eventlog/internal/events.go b/metropolis/node/core/tpm/eventlog/internal/events.go
deleted file mode 100644
index d9b933b..0000000
--- a/metropolis/node/core/tpm/eventlog/internal/events.go
+++ /dev/null
@@ -1,403 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Taken from go-attestation under Apache 2.0
-package internal
-
-import (
- "bytes"
- "encoding/binary"
- "errors"
- "fmt"
- "io"
- "unicode/utf16"
-
- "github.com/google/certificate-transparency-go/asn1"
- "github.com/google/certificate-transparency-go/x509"
-)
-
-const (
- // maxNameLen is the maximum accepted byte length for a name field.
- // This value should be larger than any reasonable value.
- maxNameLen = 2048
- // maxDataLen is the maximum size in bytes of a variable data field.
- // This value should be larger than any reasonable value.
- maxDataLen = 1024 * 1024 // 1 Megabyte.
-)
-
-// GUIDs representing the contents of an UEFI_SIGNATURE_LIST.
-var (
- hashSHA256SigGUID = efiGUID{0xc1c41626, 0x504c, 0x4092, [8]byte{0xac, 0xa9, 0x41, 0xf9, 0x36, 0x93, 0x43, 0x28}}
- hashSHA1SigGUID = efiGUID{0x826ca512, 0xcf10, 0x4ac9, [8]byte{0xb1, 0x87, 0xbe, 0x01, 0x49, 0x66, 0x31, 0xbd}}
- hashSHA224SigGUID = efiGUID{0x0b6e5233, 0xa65c, 0x44c9, [8]byte{0x94, 0x07, 0xd9, 0xab, 0x83, 0xbf, 0xc8, 0xbd}}
- hashSHA384SigGUID = efiGUID{0xff3e5307, 0x9fd0, 0x48c9, [8]byte{0x85, 0xf1, 0x8a, 0xd5, 0x6c, 0x70, 0x1e, 0x01}}
- hashSHA512SigGUID = efiGUID{0x093e0fae, 0xa6c4, 0x4f50, [8]byte{0x9f, 0x1b, 0xd4, 0x1e, 0x2b, 0x89, 0xc1, 0x9a}}
- keyRSA2048SigGUID = efiGUID{0x3c5766e8, 0x269c, 0x4e34, [8]byte{0xaa, 0x14, 0xed, 0x77, 0x6e, 0x85, 0xb3, 0xb6}}
- certRSA2048SHA256SigGUID = efiGUID{0xe2b36190, 0x879b, 0x4a3d, [8]byte{0xad, 0x8d, 0xf2, 0xe7, 0xbb, 0xa3, 0x27, 0x84}}
- certRSA2048SHA1SigGUID = efiGUID{0x67f8444f, 0x8743, 0x48f1, [8]byte{0xa3, 0x28, 0x1e, 0xaa, 0xb8, 0x73, 0x60, 0x80}}
- certX509SigGUID = efiGUID{0xa5c059a1, 0x94e4, 0x4aa7, [8]byte{0x87, 0xb5, 0xab, 0x15, 0x5c, 0x2b, 0xf0, 0x72}}
- certHashSHA256SigGUID = efiGUID{0x3bd2a492, 0x96c0, 0x4079, [8]byte{0xb4, 0x20, 0xfc, 0xf9, 0x8e, 0xf1, 0x03, 0xed}}
- certHashSHA384SigGUID = efiGUID{0x7076876e, 0x80c2, 0x4ee6, [8]byte{0xaa, 0xd2, 0x28, 0xb3, 0x49, 0xa6, 0x86, 0x5b}}
- certHashSHA512SigGUID = efiGUID{0x446dbf63, 0x2502, 0x4cda, [8]byte{0xbc, 0xfa, 0x24, 0x65, 0xd2, 0xb0, 0xfe, 0x9d}}
-)
-
-// EventType describes the type of event signalled in the event log.
-type EventType uint32
-
-// BIOS Events (TCG PC Client Specific Implementation Specification for Conventional BIOS 1.21)
-const (
- PrebootCert EventType = 0x00000000
- PostCode EventType = 0x00000001
- unused EventType = 0x00000002
- NoAction EventType = 0x00000003
- Separator EventType = 0x00000004
- Action EventType = 0x00000005
- EventTag EventType = 0x00000006
- SCRTMContents EventType = 0x00000007
- SCRTMVersion EventType = 0x00000008
- CpuMicrocode EventType = 0x00000009
- PlatformConfigFlags EventType = 0x0000000A
- TableOfDevices EventType = 0x0000000B
- CompactHash EventType = 0x0000000C
- Ipl EventType = 0x0000000D
- IplPartitionData EventType = 0x0000000E
- NonhostCode EventType = 0x0000000F
- NonhostConfig EventType = 0x00000010
- NonhostInfo EventType = 0x00000011
- OmitBootDeviceEvents EventType = 0x00000012
-)
-
-// EFI Events (TCG EFI Platform Specification Version 1.22)
-const (
- EFIEventBase EventType = 0x80000000
- EFIVariableDriverConfig EventType = 0x80000001
- EFIVariableBoot EventType = 0x80000002
- EFIBootServicesApplication EventType = 0x80000003
- EFIBootServicesDriver EventType = 0x80000004
- EFIRuntimeServicesDriver EventType = 0x80000005
- EFIGPTEvent EventType = 0x80000006
- EFIAction EventType = 0x80000007
- EFIPlatformFirmwareBlob EventType = 0x80000008
- EFIHandoffTables EventType = 0x80000009
- EFIHCRTMEvent EventType = 0x80000010
- EFIVariableAuthority EventType = 0x800000e0
-)
-
-// ErrSigMissingGUID is returned if an EFI_SIGNATURE_DATA structure was parsed
-// successfully, however was missing the SignatureOwner GUID. This case is
-// handled specially as a workaround for a bug relating to authority events.
-var ErrSigMissingGUID = errors.New("signature data was missing owner GUID")
-
-var eventTypeNames = map[EventType]string{
- PrebootCert: "Preboot Cert",
- PostCode: "POST Code",
- unused: "Unused",
- NoAction: "No Action",
- Separator: "Separator",
- Action: "Action",
- EventTag: "Event Tag",
- SCRTMContents: "S-CRTM Contents",
- SCRTMVersion: "S-CRTM Version",
- CpuMicrocode: "CPU Microcode",
- PlatformConfigFlags: "Platform Config Flags",
- TableOfDevices: "Table of Devices",
- CompactHash: "Compact Hash",
- Ipl: "IPL",
- IplPartitionData: "IPL Partition Data",
- NonhostCode: "Non-Host Code",
- NonhostConfig: "Non-HostConfig",
- NonhostInfo: "Non-Host Info",
- OmitBootDeviceEvents: "Omit Boot Device Events",
-
- EFIEventBase: "EFI Event Base",
- EFIVariableDriverConfig: "EFI Variable Driver Config",
- EFIVariableBoot: "EFI Variable Boot",
- EFIBootServicesApplication: "EFI Boot Services Application",
- EFIBootServicesDriver: "EFI Boot Services Driver",
- EFIRuntimeServicesDriver: "EFI Runtime Services Driver",
- EFIGPTEvent: "EFI GPT Event",
- EFIAction: "EFI Action",
- EFIPlatformFirmwareBlob: "EFI Platform Firmware Blob",
- EFIVariableAuthority: "EFI Variable Authority",
- EFIHandoffTables: "EFI Handoff Tables",
- EFIHCRTMEvent: "EFI H-CRTM Event",
-}
-
-func (e EventType) String() string {
- if s, ok := eventTypeNames[e]; ok {
- return s
- }
- return fmt.Sprintf("EventType(0x%x)", uint32(e))
-}
-
-// UntrustedParseEventType returns the event type indicated by
-// the provided value.
-func UntrustedParseEventType(et uint32) (EventType, error) {
- // "The value associated with a UEFI specific platform event type MUST be in
- // the range between 0x80000000 and 0x800000FF, inclusive."
- if (et < 0x80000000 && et > 0x800000FF) || (et < 0x0 && et > 0x12) {
- return EventType(0), fmt.Errorf("event type not between [0x0, 0x12] or [0x80000000, 0x800000FF]: got %#x", et)
- }
- if _, ok := eventTypeNames[EventType(et)]; !ok {
- return EventType(0), fmt.Errorf("unknown event type %#x", et)
- }
- return EventType(et), nil
-}
-
-// efiGUID represents the EFI_GUID type.
-// See section "2.3.1 Data Types" in the specification for more information.
-// type efiGUID [16]byte
-type efiGUID struct {
- Data1 uint32
- Data2 uint16
- Data3 uint16
- Data4 [8]byte
-}
-
-func (d efiGUID) String() string {
- var u [8]byte
- binary.BigEndian.PutUint32(u[:4], d.Data1)
- binary.BigEndian.PutUint16(u[4:6], d.Data2)
- binary.BigEndian.PutUint16(u[6:8], d.Data3)
- return fmt.Sprintf("%x-%x-%x-%x-%x", u[:4], u[4:6], u[6:8], d.Data4[:2], d.Data4[2:])
-}
-
-// UEFIVariableDataHeader represents the leading fixed-size fields
-// within UEFI_VARIABLE_DATA.
-type UEFIVariableDataHeader struct {
- VariableName efiGUID
- UnicodeNameLength uint64 // uintN
- VariableDataLength uint64 // uintN
-}
-
-// UEFIVariableData represents the UEFI_VARIABLE_DATA structure.
-type UEFIVariableData struct {
- Header UEFIVariableDataHeader
- UnicodeName []uint16
- VariableData []byte // []int8
-}
-
-// ParseUEFIVariableData parses the data section of an event structured as
-// a UEFI variable.
-//
-// https://trustedcomputinggroup.org/wp-content/uploads/TCG_PCClient_Specific_Platform_Profile_for_TPM_2p0_1p04_PUBLIC.pdf#page=100
-func ParseUEFIVariableData(r io.Reader) (ret UEFIVariableData, err error) {
- err = binary.Read(r, binary.LittleEndian, &ret.Header)
- if err != nil {
- return
- }
- if ret.Header.UnicodeNameLength > maxNameLen {
- return UEFIVariableData{}, fmt.Errorf("unicode name too long: %d > %d", ret.Header.UnicodeNameLength, maxNameLen)
- }
- ret.UnicodeName = make([]uint16, ret.Header.UnicodeNameLength)
- for i := 0; uint64(i) < ret.Header.UnicodeNameLength; i++ {
- err = binary.Read(r, binary.LittleEndian, &ret.UnicodeName[i])
- if err != nil {
- return
- }
- }
- if ret.Header.VariableDataLength > maxDataLen {
- return UEFIVariableData{}, fmt.Errorf("variable data too long: %d > %d", ret.Header.VariableDataLength, maxDataLen)
- }
- ret.VariableData = make([]byte, ret.Header.VariableDataLength)
- _, err = io.ReadFull(r, ret.VariableData)
- return
-}
-
-func (v *UEFIVariableData) VarName() string {
- return string(utf16.Decode(v.UnicodeName))
-}
-
-func (v *UEFIVariableData) SignatureData() (certs []x509.Certificate, hashes [][]byte, err error) {
- return parseEfiSignatureList(v.VariableData)
-}
-
-// UEFIVariableAuthority describes the contents of a UEFI variable authority
-// event.
-type UEFIVariableAuthority struct {
- Certs []x509.Certificate
-}
-
-// ParseUEFIVariableAuthority parses the data section of an event structured as
-// a UEFI variable authority.
-//
-// https://uefi.org/sites/default/files/resources/UEFI_Spec_2_8_final.pdf#page=1789
-func ParseUEFIVariableAuthority(r io.Reader) (UEFIVariableAuthority, error) {
- v, err := ParseUEFIVariableData(r)
- if err != nil {
- return UEFIVariableAuthority{}, err
- }
- certs, err := parseEfiSignature(v.VariableData)
- return UEFIVariableAuthority{Certs: certs}, err
-}
-
-// efiSignatureData represents the EFI_SIGNATURE_DATA type.
-// See section "31.4.1 Signature Database" in the specification for more information.
-type efiSignatureData struct {
- SignatureOwner efiGUID
- SignatureData []byte // []int8
-}
-
-// efiSignatureList represents the EFI_SIGNATURE_LIST type.
-// See section "31.4.1 Signature Database" in the specification for more information.
-type efiSignatureListHeader struct {
- SignatureType efiGUID
- SignatureListSize uint32
- SignatureHeaderSize uint32
- SignatureSize uint32
-}
-
-type efiSignatureList struct {
- Header efiSignatureListHeader
- SignatureData []byte
- Signatures []byte
-}
-
-// parseEfiSignatureList parses a EFI_SIGNATURE_LIST structure.
-// The structure and related GUIDs are defined at:
-// https://uefi.org/sites/default/files/resources/UEFI_Spec_2_8_final.pdf#page=1790
-func parseEfiSignatureList(b []byte) ([]x509.Certificate, [][]byte, error) {
- if len(b) < 28 {
- // Being passed an empty signature list here appears to be valid
- return nil, nil, nil
- }
- signatures := efiSignatureList{}
- buf := bytes.NewReader(b)
- certificates := []x509.Certificate{}
- hashes := [][]byte{}
-
- for buf.Len() > 0 {
- err := binary.Read(buf, binary.LittleEndian, &signatures.Header)
- if err != nil {
- return nil, nil, err
- }
-
- if signatures.Header.SignatureHeaderSize > maxDataLen {
- return nil, nil, fmt.Errorf("signature header too large: %d > %d", signatures.Header.SignatureHeaderSize, maxDataLen)
- }
- if signatures.Header.SignatureListSize > maxDataLen {
- return nil, nil, fmt.Errorf("signature list too large: %d > %d", signatures.Header.SignatureListSize, maxDataLen)
- }
-
- signatureType := signatures.Header.SignatureType
- switch signatureType {
- case certX509SigGUID: // X509 certificate
- for sigOffset := 0; uint32(sigOffset) < signatures.Header.SignatureListSize-28; {
- signature := efiSignatureData{}
- signature.SignatureData = make([]byte, signatures.Header.SignatureSize-16)
- err := binary.Read(buf, binary.LittleEndian, &signature.SignatureOwner)
- if err != nil {
- return nil, nil, err
- }
- err = binary.Read(buf, binary.LittleEndian, &signature.SignatureData)
- if err != nil {
- return nil, nil, err
- }
- cert, err := x509.ParseCertificate(signature.SignatureData)
- if err != nil {
- return nil, nil, err
- }
- sigOffset += int(signatures.Header.SignatureSize)
- certificates = append(certificates, *cert)
- }
- case hashSHA256SigGUID: // SHA256
- for sigOffset := 0; uint32(sigOffset) < signatures.Header.SignatureListSize-28; {
- signature := efiSignatureData{}
- signature.SignatureData = make([]byte, signatures.Header.SignatureSize-16)
- err := binary.Read(buf, binary.LittleEndian, &signature.SignatureOwner)
- if err != nil {
- return nil, nil, err
- }
- err = binary.Read(buf, binary.LittleEndian, &signature.SignatureData)
- if err != nil {
- return nil, nil, err
- }
- hashes = append(hashes, signature.SignatureData)
- sigOffset += int(signatures.Header.SignatureSize)
- }
- case keyRSA2048SigGUID:
- err = errors.New("unhandled RSA2048 key")
- case certRSA2048SHA256SigGUID:
- err = errors.New("unhandled RSA2048-SHA256 key")
- case hashSHA1SigGUID:
- err = errors.New("unhandled SHA1 hash")
- case certRSA2048SHA1SigGUID:
- err = errors.New("unhandled RSA2048-SHA1 key")
- case hashSHA224SigGUID:
- err = errors.New("unhandled SHA224 hash")
- case hashSHA384SigGUID:
- err = errors.New("unhandled SHA384 hash")
- case hashSHA512SigGUID:
- err = errors.New("unhandled SHA512 hash")
- case certHashSHA256SigGUID:
- err = errors.New("unhandled X509-SHA256 hash metadata")
- case certHashSHA384SigGUID:
- err = errors.New("unhandled X509-SHA384 hash metadata")
- case certHashSHA512SigGUID:
- err = errors.New("unhandled X509-SHA512 hash metadata")
- default:
- err = fmt.Errorf("unhandled signature type %s", signatureType)
- }
- if err != nil {
- return nil, nil, err
- }
- }
- return certificates, hashes, nil
-}
-
-// EFISignatureData represents the EFI_SIGNATURE_DATA type.
-// See section "31.4.1 Signature Database" in the specification
-// for more information.
-type EFISignatureData struct {
- SignatureOwner efiGUID
- SignatureData []byte // []int8
-}
-
-func parseEfiSignature(b []byte) ([]x509.Certificate, error) {
- certificates := []x509.Certificate{}
-
- if len(b) < 16 {
- return nil, fmt.Errorf("invalid signature: buffer smaller than header (%d < %d)", len(b), 16)
- }
-
- buf := bytes.NewReader(b)
- signature := EFISignatureData{}
- signature.SignatureData = make([]byte, len(b)-16)
-
- if err := binary.Read(buf, binary.LittleEndian, &signature.SignatureOwner); err != nil {
- return certificates, err
- }
- if err := binary.Read(buf, binary.LittleEndian, &signature.SignatureData); err != nil {
- return certificates, err
- }
-
- cert, err := x509.ParseCertificate(signature.SignatureData)
- if err == nil {
- certificates = append(certificates, *cert)
- } else {
- // A bug in shim may cause an event to be missing the SignatureOwner GUID.
- // We handle this, but signal back to the caller using ErrSigMissingGUID.
- if _, isStructuralErr := err.(asn1.StructuralError); isStructuralErr {
- var err2 error
- cert, err2 = x509.ParseCertificate(b)
- if err2 == nil {
- certificates = append(certificates, *cert)
- err = ErrSigMissingGUID
- }
- }
- }
- return certificates, err
-}
diff --git a/metropolis/node/core/tpm/eventlog/secureboot.go b/metropolis/node/core/tpm/eventlog/secureboot.go
deleted file mode 100644
index f117d30..0000000
--- a/metropolis/node/core/tpm/eventlog/secureboot.go
+++ /dev/null
@@ -1,210 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Taken and pruned from go-attestation under Apache 2.0
-package eventlog
-
-import (
- "bytes"
- "errors"
- "fmt"
-
- "github.com/google/certificate-transparency-go/x509"
-
- "git.monogon.dev/source/nexantic.git/metropolis/node/core/tpm/eventlog/internal"
-)
-
-// SecurebootState describes the secure boot status of a machine, as determined
-// by processing its event log.
-type SecurebootState struct {
- Enabled bool
-
- // PlatformKeys enumerates keys which can sign a key exchange key.
- PlatformKeys []x509.Certificate
- // PlatformKeys enumerates key hashes which can sign a key exchange key.
- PlatformKeyHashes [][]byte
-
- // ExchangeKeys enumerates keys which can sign a database of permitted or
- // forbidden keys.
- ExchangeKeys []x509.Certificate
- // ExchangeKeyHashes enumerates key hashes which can sign a database or
- // permitted or forbidden keys.
- ExchangeKeyHashes [][]byte
-
- // PermittedKeys enumerates keys which may sign binaries to run.
- PermittedKeys []x509.Certificate
- // PermittedHashes enumerates hashes which permit binaries to run.
- PermittedHashes [][]byte
-
- // ForbiddenKeys enumerates keys which must not permit a binary to run.
- ForbiddenKeys []x509.Certificate
- // ForbiddenKeys enumerates hashes which must not permit a binary to run.
- ForbiddenHashes [][]byte
-
- // PreSeparatorAuthority describes the use of a secure-boot key to authorize
- // the execution of a binary before the separator.
- PreSeparatorAuthority []x509.Certificate
- // PostSeparatorAuthority describes the use of a secure-boot key to authorize
- // the execution of a binary after the separator.
- PostSeparatorAuthority []x509.Certificate
-}
-
-// ParseSecurebootState parses a series of events to determine the
-// configuration of secure boot on a device. An error is returned if
-// the state cannot be determined, or if the event log is structured
-// in such a way that it may have been tampered post-execution of
-// platform firmware.
-func ParseSecurebootState(events []Event) (*SecurebootState, error) {
- // This algorithm verifies the following:
- // - All events in PCR 7 have event types which are expected in PCR 7.
- // - All events are parsable according to their event type.
- // - All events have digests values corresponding to their data/event type.
- // - No unverifiable events were present.
- // - All variables are specified before the separator and never duplicated.
- // - The SecureBoot variable has a value of 0 or 1.
- // - If SecureBoot was 1 (enabled), authority events were present indicating
- // keys were used to perform verification.
- // - If SecureBoot was 1 (enabled), platform + exchange + database keys
- // were specified.
- // - No UEFI debugger was attached.
-
- var (
- out SecurebootState
- seenSeparator bool
- seenAuthority bool
- seenVars = map[string]bool{}
- )
-
- for _, e := range events {
- if e.Index != 7 {
- continue
- }
-
- et, err := internal.UntrustedParseEventType(uint32(e.Type))
- if err != nil {
- return nil, fmt.Errorf("unrecognised event type: %v", err)
- }
-
- digestVerify := e.digestEquals(e.Data)
- switch et {
- case internal.Separator:
- if seenSeparator {
- return nil, fmt.Errorf("duplicate separator at event %d", e.sequence)
- }
- seenSeparator = true
- if !bytes.Equal(e.Data, []byte{0, 0, 0, 0}) {
- return nil, fmt.Errorf("invalid separator data at event %d: %v", e.sequence, e.Data)
- }
- if digestVerify != nil {
- return nil, fmt.Errorf("invalid separator digest at event %d: %v", e.sequence, digestVerify)
- }
-
- case internal.EFIAction:
- if string(e.Data) == "UEFI Debug Mode" {
- return nil, errors.New("a UEFI debugger was present during boot")
- }
- return nil, fmt.Errorf("event %d: unexpected EFI action event", e.sequence)
-
- case internal.EFIVariableDriverConfig:
- v, err := internal.ParseUEFIVariableData(bytes.NewReader(e.Data))
- if err != nil {
- return nil, fmt.Errorf("failed parsing EFI variable at event %d: %v", e.sequence, err)
- }
- if _, seenBefore := seenVars[v.VarName()]; seenBefore {
- return nil, fmt.Errorf("duplicate EFI variable %q at event %d", v.VarName(), e.sequence)
- }
- seenVars[v.VarName()] = true
- if seenSeparator {
- return nil, fmt.Errorf("event %d: variable %q specified after separator", e.sequence, v.VarName())
- }
-
- if digestVerify != nil {
- return nil, fmt.Errorf("invalid digest for variable %q on event %d: %v", v.VarName(), e.sequence, digestVerify)
- }
-
- switch v.VarName() {
- case "SecureBoot":
- if len(v.VariableData) != 1 {
- return nil, fmt.Errorf("event %d: SecureBoot data len is %d, expected 1", e.sequence, len(v.VariableData))
- }
- out.Enabled = v.VariableData[0] == 1
- case "PK":
- if out.PlatformKeys, out.PlatformKeyHashes, err = v.SignatureData(); err != nil {
- return nil, fmt.Errorf("event %d: failed parsing platform keys: %v", e.sequence, err)
- }
- case "KEK":
- if out.ExchangeKeys, out.ExchangeKeyHashes, err = v.SignatureData(); err != nil {
- return nil, fmt.Errorf("event %d: failed parsing key exchange keys: %v", e.sequence, err)
- }
- case "db":
- if out.PermittedKeys, out.PermittedHashes, err = v.SignatureData(); err != nil {
- return nil, fmt.Errorf("event %d: failed parsing signature database: %v", e.sequence, err)
- }
- case "dbx":
- if out.ForbiddenKeys, out.ForbiddenHashes, err = v.SignatureData(); err != nil {
- return nil, fmt.Errorf("event %d: failed parsing forbidden signature database: %v", e.sequence, err)
- }
- }
-
- case internal.EFIVariableAuthority:
- a, err := internal.ParseUEFIVariableAuthority(bytes.NewReader(e.Data))
- if err != nil {
- // Workaround for: https://github.com/google/go-attestation/issues/157
- if err == internal.ErrSigMissingGUID {
- // Versions of shim which do not carry
- // https://github.com/rhboot/shim/commit/8a27a4809a6a2b40fb6a4049071bf96d6ad71b50
- // have an erroneous additional byte in the event, which breaks digest
- // verification. If verification failed, we try removing the last byte.
- if digestVerify != nil {
- digestVerify = e.digestEquals(e.Data[:len(e.Data)-1])
- }
- } else {
- return nil, fmt.Errorf("failed parsing EFI variable authority at event %d: %v", e.sequence, err)
- }
- }
- seenAuthority = true
- if digestVerify != nil {
- return nil, fmt.Errorf("invalid digest for authority on event %d: %v", e.sequence, digestVerify)
- }
- if !seenSeparator {
- out.PreSeparatorAuthority = append(out.PreSeparatorAuthority, a.Certs...)
- } else {
- out.PostSeparatorAuthority = append(out.PostSeparatorAuthority, a.Certs...)
- }
-
- default:
- return nil, fmt.Errorf("unexpected event type: %v", et)
- }
- }
-
- if !out.Enabled {
- return &out, nil
- }
-
- if !seenAuthority {
- return nil, errors.New("secure boot was enabled but no key was used")
- }
- if len(out.PlatformKeys) == 0 && len(out.PlatformKeyHashes) == 0 {
- return nil, errors.New("secure boot was enabled but no platform keys were known")
- }
- if len(out.ExchangeKeys) == 0 && len(out.ExchangeKeyHashes) == 0 {
- return nil, errors.New("secure boot was enabled but no key exchange keys were known")
- }
- if len(out.PermittedKeys) == 0 && len(out.PermittedHashes) == 0 {
- return nil, errors.New("secure boot was enabled but no keys or hashes were permitted")
- }
- return &out, nil
-}
diff --git a/metropolis/node/core/tpm/tpm.go b/metropolis/node/core/tpm/tpm.go
deleted file mode 100644
index 4106a66..0000000
--- a/metropolis/node/core/tpm/tpm.go
+++ /dev/null
@@ -1,561 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tpm
-
-import (
- "bytes"
- "crypto"
- "crypto/rand"
- "crypto/rsa"
- "crypto/x509"
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "path/filepath"
- "strconv"
- "strings"
- "sync"
- "time"
-
- "github.com/gogo/protobuf/proto"
- tpmpb "github.com/google/go-tpm-tools/proto"
- "github.com/google/go-tpm-tools/tpm2tools"
- "github.com/google/go-tpm/tpm2"
- "github.com/google/go-tpm/tpmutil"
- "github.com/pkg/errors"
- "golang.org/x/sys/unix"
-
- "git.monogon.dev/source/nexantic.git/metropolis/node/common/sysfs"
- "git.monogon.dev/source/nexantic.git/metropolis/node/core/logtree"
-)
-
-var (
- // SecureBootPCRs are all PCRs that measure the current Secure Boot configuration.
- // This is what we want if we rely on secure boot to verify boot integrity. The firmware
- // hashes the secure boot policy and custom keys into the PCR.
- //
- // This requires an extra step that provisions the custom keys.
- //
- // Some background: https://mjg59.dreamwidth.org/48897.html?thread=1847297
- // (the initramfs issue mentioned in the article has been solved by integrating
- // it into the kernel binary, and we don't have a shim bootloader)
- //
- // PCR7 alone is not sufficient - it needs to be combined with firmware measurements.
- SecureBootPCRs = []int{7}
-
- // FirmwarePCRs are alle PCRs that contain the firmware measurements
- // See https://trustedcomputinggroup.org/wp-content/uploads/TCG_EFI_Platform_1_22_Final_-v15.pdf
- FirmwarePCRs = []int{
- 0, // platform firmware
- 2, // option ROM code
- 3, // option ROM configuration and data
- }
-
- // FullSystemPCRs are all PCRs that contain any measurements up to the currently running EFI payload.
- FullSystemPCRs = []int{
- 0, // platform firmware
- 1, // host platform configuration
- 2, // option ROM code
- 3, // option ROM configuration and data
- 4, // EFI payload
- }
-
- // Using FullSystemPCRs is the most secure, but also the most brittle option since updating the EFI
- // binary, updating the platform firmware, changing platform settings or updating the binary
- // would invalidate the sealed data. It's annoying (but possible) to predict values for PCR4,
- // and even more annoying for the firmware PCR (comparison to known values on similar hardware
- // is the only thing that comes to mind).
- //
- // See also: https://github.com/mxre/sealkey (generates PCR4 from EFI image, BSD license)
- //
- // Using only SecureBootPCRs is the easiest and still reasonably secure, if we assume that the
- // platform knows how to take care of itself (i.e. Intel Boot Guard), and that secure boot
- // is implemented properly. It is, however, a much larger amount of code we need to trust.
- //
- // We do not care about PCR 5 (GPT partition table) since modifying it is harmless. All of
- // the boot options and cmdline are hardcoded in the kernel image, and we use no bootloader,
- // so there's no PCR for bootloader configuration or kernel cmdline.
-)
-
-var (
- numSRTMPCRs = 16
- srtmPCRs = tpm2.PCRSelection{Hash: tpm2.AlgSHA256, PCRs: []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}}
- // TCG Trusted Platform Module Library Level 00 Revision 0.99 Table 6
- tpmGeneratedValue = uint32(0xff544347)
-)
-
-var (
- // ErrNotExists is returned when no TPMs are available in the system
- ErrNotExists = errors.New("no TPMs found")
- // ErrNotInitialized is returned when this package was not initialized successfully
- ErrNotInitialized = errors.New("no TPM was initialized")
-)
-
-// Singleton since the TPM is too
-var tpm *TPM
-
-// We're serializing all TPM operations since it has a limited number of handles and recovering
-// if it runs out is difficult to implement correctly. Might also be marginally more secure.
-var lock sync.Mutex
-
-// TPM represents a high-level interface to a connected TPM 2.0
-type TPM struct {
- logger logtree.LeveledLogger
- device io.ReadWriteCloser
-
- // We keep the AK loaded since it's used fairly often and deriving it is expensive
- akHandleCache tpmutil.Handle
- akPublicKey crypto.PublicKey
-}
-
-// Initialize finds and opens the TPM (if any). If there is no TPM available it returns
-// ErrNotExists
-func Initialize(logger logtree.LeveledLogger) error {
- lock.Lock()
- defer lock.Unlock()
- tpmDir, err := os.Open("/sys/class/tpm")
- if err != nil {
- return errors.Wrap(err, "failed to open sysfs TPM class")
- }
- defer tpmDir.Close()
-
- tpms, err := tpmDir.Readdirnames(2)
- if err != nil {
- return errors.Wrap(err, "failed to read TPM device class")
- }
-
- if len(tpms) == 0 {
- return ErrNotExists
- }
- if len(tpms) > 1 {
- // If this is changed GetMeasurementLog() needs to be updated too
- logger.Warningf("Found more than one TPM, using the first one")
- }
- tpmName := tpms[0]
- ueventData, err := sysfs.ReadUevents(filepath.Join("/sys/class/tpm", tpmName, "uevent"))
- majorDev, err := strconv.Atoi(ueventData["MAJOR"])
- if err != nil {
- return fmt.Errorf("failed to convert uevent: %w", err)
- }
- minorDev, err := strconv.Atoi(ueventData["MINOR"])
- if err != nil {
- return fmt.Errorf("failed to convert uevent: %w", err)
- }
- if err := unix.Mknod("/dev/tpm", 0600|unix.S_IFCHR, int(unix.Mkdev(uint32(majorDev), uint32(minorDev)))); err != nil {
- return errors.Wrap(err, "failed to create TPM device node")
- }
- device, err := tpm2.OpenTPM("/dev/tpm")
- if err != nil {
- return errors.Wrap(err, "failed to open TPM")
- }
- tpm = &TPM{
- device: device,
- logger: logger,
- }
- return nil
-}
-
-// GenerateSafeKey uses two sources of randomness (Kernel & TPM) to generate the key
-func GenerateSafeKey(size uint16) ([]byte, error) {
- lock.Lock()
- defer lock.Unlock()
- if tpm == nil {
- return []byte{}, ErrNotInitialized
- }
- encryptionKeyHost := make([]byte, size)
- if _, err := io.ReadFull(rand.Reader, encryptionKeyHost); err != nil {
- return []byte{}, errors.Wrap(err, "failed to generate host portion of new key")
- }
- var encryptionKeyTPM []byte
- for i := 48; i > 0; i-- {
- tpmKeyPart, err := tpm2.GetRandom(tpm.device, size-uint16(len(encryptionKeyTPM)))
- if err != nil {
- return []byte{}, errors.Wrap(err, "failed to generate TPM portion of new key")
- }
- encryptionKeyTPM = append(encryptionKeyTPM, tpmKeyPart...)
- if len(encryptionKeyTPM) >= int(size) {
- break
- }
- }
-
- if len(encryptionKeyTPM) != int(size) {
- return []byte{}, fmt.Errorf("got incorrect amount of TPM randomess: %v, requested %v", len(encryptionKeyTPM), size)
- }
-
- encryptionKey := make([]byte, size)
- for i := uint16(0); i < size; i++ {
- encryptionKey[i] = encryptionKeyHost[i] ^ encryptionKeyTPM[i]
- }
- return encryptionKey, nil
-}
-
-// Seal seals sensitive data and only allows access if the current platform configuration in
-// matches the one the data was sealed on.
-func Seal(data []byte, pcrs []int) ([]byte, error) {
- lock.Lock()
- defer lock.Unlock()
- if tpm == nil {
- return []byte{}, ErrNotInitialized
- }
- srk, err := tpm2tools.StorageRootKeyRSA(tpm.device)
- if err != nil {
- return []byte{}, errors.Wrap(err, "failed to load TPM SRK")
- }
- defer srk.Close()
- sealedKey, err := srk.Seal(pcrs, data)
- sealedKeyRaw, err := proto.Marshal(sealedKey)
- if err != nil {
- return []byte{}, errors.Wrapf(err, "failed to marshal sealed data")
- }
- return sealedKeyRaw, nil
-}
-
-// Unseal unseals sensitive data if the current platform configuration allows and sealing constraints
-// allow it.
-func Unseal(data []byte) ([]byte, error) {
- lock.Lock()
- defer lock.Unlock()
- if tpm == nil {
- return []byte{}, ErrNotInitialized
- }
- srk, err := tpm2tools.StorageRootKeyRSA(tpm.device)
- if err != nil {
- return []byte{}, errors.Wrap(err, "failed to load TPM SRK")
- }
- defer srk.Close()
-
- var sealedKey tpmpb.SealedBytes
- if err := proto.Unmarshal(data, &sealedKey); err != nil {
- return []byte{}, errors.Wrap(err, "failed to decode sealed data")
- }
- // Logging this for auditing purposes
- pcrList := []string{}
- for _, pcr := range sealedKey.Pcrs {
- pcrList = append(pcrList, string(pcr))
- }
- tpm.logger.Infof("Attempting to unseal data protected with PCRs %s", strings.Join(pcrList, ","))
- unsealedData, err := srk.Unseal(&sealedKey)
- if err != nil {
- return []byte{}, errors.Wrap(err, "failed to unseal data")
- }
- return unsealedData, nil
-}
-
-// Standard AK template for RSA2048 non-duplicatable restricted signing for attestation
-var akTemplate = tpm2.Public{
- Type: tpm2.AlgRSA,
- NameAlg: tpm2.AlgSHA256,
- Attributes: tpm2.FlagSignerDefault,
- RSAParameters: &tpm2.RSAParams{
- Sign: &tpm2.SigScheme{
- Alg: tpm2.AlgRSASSA,
- Hash: tpm2.AlgSHA256,
- },
- KeyBits: 2048,
- },
-}
-
-func loadAK() error {
- var err error
- // Rationale: The AK is an EK-equivalent key and used only for attestation. Using a non-primary
- // key here would require us to store the wrapped version somewhere, which is inconvenient.
- // This being a primary key in the Endorsement hierarchy means that it can always be recreated
- // and can never be "destroyed". Under our security model this is of no concern since we identify
- // a node by its IK (Identity Key) which we can destroy.
- tpm.akHandleCache, tpm.akPublicKey, err = tpm2.CreatePrimary(tpm.device, tpm2.HandleEndorsement,
- tpm2.PCRSelection{}, "", "", akTemplate)
- return err
-}
-
-// Process documented in TCG EK Credential Profile 2.2.1
-func loadEK() (tpmutil.Handle, crypto.PublicKey, error) {
- // The EK is a primary key which is supposed to be certified by the manufacturer of the TPM.
- // Its public attributes are standardized in TCG EK Credential Profile 2.0 Table 1. These need
- // to match exactly or we aren't getting the key the manufacturere signed. tpm2tools contains
- // such a template already, so we're using that instead of redoing it ourselves.
- // This ignores the more complicated ways EKs can be specified, the additional stuff you can do
- // is just absolutely crazy (see 2.2.1.2 onward)
- return tpm2.CreatePrimary(tpm.device, tpm2.HandleEndorsement,
- tpm2.PCRSelection{}, "", "", tpm2tools.DefaultEKTemplateRSA())
-}
-
-// GetAKPublic gets the TPM2T_PUBLIC of the AK key
-func GetAKPublic() ([]byte, error) {
- lock.Lock()
- defer lock.Unlock()
- if tpm == nil {
- return []byte{}, ErrNotInitialized
- }
- if tpm.akHandleCache == tpmutil.Handle(0) {
- if err := loadAK(); err != nil {
- return []byte{}, fmt.Errorf("failed to load AK primary key: %w", err)
- }
- }
- public, _, _, err := tpm2.ReadPublic(tpm.device, tpm.akHandleCache)
- if err != nil {
- return []byte{}, err
- }
- return public.Encode()
-}
-
-// TCG TPM v2.0 Provisioning Guidance v1.0 7.8 Table 2 and
-// TCG EK Credential Profile v2.1 2.2.1.4 de-facto Standard for Windows
-// These are both non-normative and reference Windows 10 documentation that's no longer available :(
-// But in practice this is what people are using, so if it's normative or not doesn't really matter
-const ekCertHandle = 0x01c00002
-
-// GetEKPublic gets the public key and (if available) Certificate of the EK
-func GetEKPublic() ([]byte, []byte, error) {
- lock.Lock()
- defer lock.Unlock()
- if tpm == nil {
- return []byte{}, []byte{}, ErrNotInitialized
- }
- ekHandle, publicRaw, err := loadEK()
- if err != nil {
- return []byte{}, []byte{}, fmt.Errorf("failed to load EK primary key: %w", err)
- }
- defer tpm2.FlushContext(tpm.device, ekHandle)
- // Don't question the use of HandleOwner, that's the Standard™
- ekCertRaw, err := tpm2.NVReadEx(tpm.device, ekCertHandle, tpm2.HandleOwner, "", 0)
- if err != nil {
- return []byte{}, []byte{}, err
- }
-
- publicKey, err := x509.MarshalPKIXPublicKey(publicRaw)
- if err != nil {
- return []byte{}, []byte{}, err
- }
-
- return publicKey, ekCertRaw, nil
-}
-
-// MakeAKChallenge generates a challenge for TPM residency and attributes of the AK
-func MakeAKChallenge(ekPubKey, akPub []byte, nonce []byte) ([]byte, []byte, error) {
- ekPubKeyData, err := x509.ParsePKIXPublicKey(ekPubKey)
- if err != nil {
- return []byte{}, []byte{}, fmt.Errorf("failed to decode EK pubkey: %w", err)
- }
- akPubData, err := tpm2.DecodePublic(akPub)
- if err != nil {
- return []byte{}, []byte{}, fmt.Errorf("failed to decode AK public part: %w", err)
- }
- // Make sure we're attesting the right attributes (in particular Restricted)
- if !akPubData.MatchesTemplate(akTemplate) {
- return []byte{}, []byte{}, errors.New("the key being challenged is not a valid AK")
- }
- akName, err := akPubData.Name()
- if err != nil {
- return []byte{}, []byte{}, fmt.Errorf("failed to derive AK name: %w", err)
- }
- return generateRSA(akName.Digest, ekPubKeyData.(*rsa.PublicKey), 16, nonce, rand.Reader)
-}
-
-// SolveAKChallenge solves a challenge for TPM residency of the AK
-func SolveAKChallenge(credBlob, secretChallenge []byte) ([]byte, error) {
- lock.Lock()
- defer lock.Unlock()
- if tpm == nil {
- return []byte{}, ErrNotInitialized
- }
- if tpm.akHandleCache == tpmutil.Handle(0) {
- if err := loadAK(); err != nil {
- return []byte{}, fmt.Errorf("failed to load AK primary key: %w", err)
- }
- }
-
- ekHandle, _, err := loadEK()
- if err != nil {
- return []byte{}, fmt.Errorf("failed to load EK: %w", err)
- }
- defer tpm2.FlushContext(tpm.device, ekHandle)
-
- // This is necessary since the EK requires an endorsement handle policy in its session
- // For us this is stupid because we keep all hierarchies open anyways since a) we cannot safely
- // store secrets on the OS side pre-global unlock and b) it makes no sense in this security model
- // since an uncompromised host OS will not let an untrusted entity attest as itself and a
- // compromised OS can either not pass PCR policy checks or the game's already over (you
- // successfully runtime-exploited a production Metropolis node)
- endorsementSession, _, err := tpm2.StartAuthSession(
- tpm.device,
- tpm2.HandleNull,
- tpm2.HandleNull,
- make([]byte, 16),
- nil,
- tpm2.SessionPolicy,
- tpm2.AlgNull,
- tpm2.AlgSHA256)
- if err != nil {
- panic(err)
- }
- defer tpm2.FlushContext(tpm.device, endorsementSession)
-
- _, err = tpm2.PolicySecret(tpm.device, tpm2.HandleEndorsement, tpm2.AuthCommand{Session: tpm2.HandlePasswordSession, Attributes: tpm2.AttrContinueSession}, endorsementSession, nil, nil, nil, 0)
- if err != nil {
- return []byte{}, fmt.Errorf("failed to make a policy secret session: %w", err)
- }
-
- for {
- solution, err := tpm2.ActivateCredentialUsingAuth(tpm.device, []tpm2.AuthCommand{
- {Session: tpm2.HandlePasswordSession, Attributes: tpm2.AttrContinueSession}, // Use standard no-password authentication
- {Session: endorsementSession, Attributes: tpm2.AttrContinueSession}, // Use a full policy session for the EK
- }, tpm.akHandleCache, ekHandle, credBlob, secretChallenge)
- if warn, ok := err.(tpm2.Warning); ok && warn.Code == tpm2.RCRetry {
- time.Sleep(100 * time.Millisecond)
- continue
- }
- return solution, err
- }
-}
-
-// FlushTransientHandles flushes all sessions and non-persistent handles
-func FlushTransientHandles() error {
- lock.Lock()
- defer lock.Unlock()
- if tpm == nil {
- return ErrNotInitialized
- }
- flushHandleTypes := []tpm2.HandleType{tpm2.HandleTypeTransient, tpm2.HandleTypeLoadedSession, tpm2.HandleTypeSavedSession}
- for _, handleType := range flushHandleTypes {
- handles, err := tpm2tools.Handles(tpm.device, handleType)
- if err != nil {
- return err
- }
- for _, handle := range handles {
- if err := tpm2.FlushContext(tpm.device, handle); err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-// AttestPlatform performs a PCR quote using the AK and returns the quote and its signature
-func AttestPlatform(nonce []byte) ([]byte, []byte, error) {
- lock.Lock()
- defer lock.Unlock()
- if tpm == nil {
- return []byte{}, []byte{}, ErrNotInitialized
- }
- if tpm.akHandleCache == tpmutil.Handle(0) {
- if err := loadAK(); err != nil {
- return []byte{}, []byte{}, fmt.Errorf("failed to load AK primary key: %w", err)
- }
- }
- // We only care about SHA256 since SHA1 is weak. This is supported on at least GCE and
- // Intel / AMD fTPM, which is good enough for now. Alg is null because that would just hash the
- // nonce, which is dumb.
- quote, signature, err := tpm2.Quote(tpm.device, tpm.akHandleCache, "", "", nonce, srtmPCRs,
- tpm2.AlgNull)
- if err != nil {
- return []byte{}, []byte{}, fmt.Errorf("failed to quote PCRs: %w", err)
- }
- return quote, signature.RSA.Signature, err
-}
-
-// VerifyAttestPlatform verifies a given attestation. You can rely on all data coming back as being
-// from the TPM on which the AK is bound to.
-func VerifyAttestPlatform(nonce, akPub, quote, signature []byte) (*tpm2.AttestationData, error) {
- hash := crypto.SHA256.New()
- hash.Write(quote)
-
- akPubData, err := tpm2.DecodePublic(akPub)
- if err != nil {
- return nil, fmt.Errorf("invalid AK: %w", err)
- }
- akPublicKey, err := akPubData.Key()
- if err != nil {
- return nil, fmt.Errorf("invalid AK: %w", err)
- }
- akRSAKey, ok := akPublicKey.(*rsa.PublicKey)
- if !ok {
- return nil, errors.New("invalid AK: invalid key type")
- }
-
- if err := rsa.VerifyPKCS1v15(akRSAKey, crypto.SHA256, hash.Sum(nil), signature); err != nil {
- return nil, err
- }
-
- quoteData, err := tpm2.DecodeAttestationData(quote)
- if err != nil {
- return nil, err
- }
- // quoteData.Magic works together with the TPM's Restricted key attribute. If this attribute is set
- // (which it needs to be for the AK to be considered valid) the TPM will not sign external data
- // having this prefix with such a key. Only data that originates inside the TPM like quotes and
- // key certifications can have this prefix and sill be signed by a restricted key. This check
- // is thus vital, otherwise somebody can just feed the TPM an arbitrary attestation to sign with
- // its AK and this function will happily accept the forged attestation.
- if quoteData.Magic != tpmGeneratedValue {
- return nil, errors.New("invalid TPM quote: data marker for internal data not set - forged attestation")
- }
- if quoteData.Type != tpm2.TagAttestQuote {
- return nil, errors.New("invalid TPM qoute: not a TPM quote")
- }
- if !bytes.Equal(quoteData.ExtraData, nonce) {
- return nil, errors.New("invalid TPM quote: wrong nonce")
- }
-
- return quoteData, nil
-}
-
-// GetPCRs returns all SRTM PCRs in-order
-func GetPCRs() ([][]byte, error) {
- lock.Lock()
- defer lock.Unlock()
- if tpm == nil {
- return [][]byte{}, ErrNotInitialized
- }
- pcrs := make([][]byte, numSRTMPCRs)
-
- // The TPM can (and most do) return partial results. Let's just retry as many times as we have
- // PCRs since each read should return at least one PCR.
-readLoop:
- for i := 0; i < numSRTMPCRs; i++ {
- sel := tpm2.PCRSelection{Hash: tpm2.AlgSHA256}
- for pcrN := 0; pcrN < numSRTMPCRs; pcrN++ {
- if len(pcrs[pcrN]) == 0 {
- sel.PCRs = append(sel.PCRs, pcrN)
- }
- }
-
- readPCRs, err := tpm2.ReadPCRs(tpm.device, sel)
- if err != nil {
- return nil, fmt.Errorf("failed to read PCRs: %w", err)
- }
-
- for pcrN, pcr := range readPCRs {
- pcrs[pcrN] = pcr
- }
- for _, pcr := range pcrs {
- // If at least one PCR is still not read, continue
- if len(pcr) == 0 {
- continue readLoop
- }
- }
- break
- }
-
- return pcrs, nil
-}
-
-// GetMeasurmentLog returns the binary log of all data hashed into PCRs. The result can be parsed by eventlog.
-// As this library currently doesn't support extending PCRs it just returns the log as supplied by the EFI interface.
-func GetMeasurementLog() ([]byte, error) {
- return ioutil.ReadFile("/sys/kernel/security/tpm0/binary_bios_measurements")
-}