core: replace zap with logtree
Test Plan: Effective refactor. Only tests that could be affected are e2e tests that should continue to run, because we still are logging into the qemu console, even if differently.
X-Origin-Diff: phab/D642
GitOrigin-RevId: 0f12b1bc985af08a3cc269569273184321763e4b
diff --git a/core/internal/cluster/BUILD.bazel b/core/internal/cluster/BUILD.bazel
index 3efdab0..7c9dfd5 100644
--- a/core/internal/cluster/BUILD.bazel
+++ b/core/internal/cluster/BUILD.bazel
@@ -21,6 +21,5 @@
"@com_github_golang_protobuf//proto:go_default_library",
"@io_etcd_go_etcd//clientv3:go_default_library",
"@org_golang_x_sys//unix:go_default_library",
- "@org_uber_go_zap//:go_default_library",
],
)
diff --git a/core/internal/cluster/manager.go b/core/internal/cluster/manager.go
index 38c2945..2f1355b 100644
--- a/core/internal/cluster/manager.go
+++ b/core/internal/cluster/manager.go
@@ -32,7 +32,6 @@
"github.com/cenkalti/backoff/v4"
"github.com/golang/protobuf/proto"
"go.etcd.io/etcd/clientv3"
- "go.uber.org/zap"
"git.monogon.dev/source/nexantic.git/core/internal/common"
"git.monogon.dev/source/nexantic.git/core/internal/common/supervisor"
@@ -150,14 +149,13 @@
defer m.stateLock.Unlock()
if !m.allowed(m.state, n) {
- supervisor.Logger(ctx).Error("Attempted invalid enrolment state transition, failing enrolment",
- zap.String("from", m.state.String()), zap.String("to", m.state.String()))
+ supervisor.Logger(ctx).Errorf("Attempted invalid enrolment state transition, failing enrolment; from: %s, to: %s",
+ m.state.String(), n.String())
m.state = StateFailed
return
}
- supervisor.Logger(ctx).Info("Enrolment state change",
- zap.String("from", m.state.String()), zap.String("to", n.String()))
+ supervisor.Logger(ctx).Infof("Enrolment state change; from: %s, to: %s", m.state.String(), n.String())
m.state = n
}
@@ -206,7 +204,7 @@
// Run is the runnable of the Manager, to be started using the Supervisor. It is one-shot, and should not be restarted.
func (m *Manager) Run(ctx context.Context) error {
if state := m.State(); state != StateNew {
- supervisor.Logger(ctx).Error("Manager started with non-New state, failing", zap.String("state", state.String()))
+ supervisor.Logger(ctx).Errorf("Manager started with non-New state %s, failing", state.String())
m.stateLock.Lock()
m.state = StateFailed
m.wakeWaiters()
@@ -236,7 +234,7 @@
}
if state == m.State() && !m.allowed(state, m.State()) {
- supervisor.Logger(ctx).Error("Enrolment got stuck, failing", zap.String("state", m.state.String()))
+ supervisor.Logger(ctx).Errorf("Enrolment got stuck at %s, failing", m.state.String())
m.stateLock.Lock()
m.state = StateFailed
m.stateLock.Unlock()
@@ -248,7 +246,7 @@
m.stateLock.Lock()
state := m.state
if state != StateRunning {
- supervisor.Logger(ctx).Error("Enrolment failed", zap.Error(err), zap.String("state", m.state.String()))
+ supervisor.Logger(ctx).Errorf("Enrolment failed at %s: %v", m.state.String(), err)
} else {
supervisor.Logger(ctx).Info("Enrolment successful!")
}
@@ -317,7 +315,7 @@
if err != nil {
return fmt.Errorf("when getting IP address: %w", err)
}
- logger.Info("Creating new cluster: got IP address", zap.String("address", ip.String()))
+ logger.Infof("Creating new cluster: got IP address %s", ip.String())
logger.Info("Creating new cluster: initializing storage...")
cuk, err := m.storageRoot.Data.MountNew(&m.storageRoot.ESP.LocalUnlock)
@@ -411,7 +409,7 @@
if err != nil {
return fmt.Errorf("when getting IP address: %w", err)
}
- supervisor.Logger(ctx).Info("Joining cluster: got IP address", zap.String("address", ip.String()))
+ supervisor.Logger(ctx).Info("Joining cluster: got IP address %s", ip.String())
supervisor.Logger(ctx).Info("Joining cluster: initializing storage...")
cuk, err := m.storageRoot.Data.MountNew(&m.storageRoot.ESP.LocalUnlock)
@@ -448,8 +446,7 @@
}
initialCluster = append(initialCluster, https(t.This))
- supervisor.Logger(ctx).Info("Joining cluster: starting etcd join...",
- zap.String("initial_cluster", strings.Join(initialCluster, ",")), zap.String("name", node.ID()))
+ supervisor.Logger(ctx).Infof("Joining cluster: starting etcd join, name: %s, initial_cluster: %s", node.ID(), strings.Join(initialCluster, ","))
m.consensus = consensus.New(consensus.Config{
Data: &m.storageRoot.Data.Etcd,
Ephemeral: &m.storageRoot.Ephemeral.Consensus,
diff --git a/core/internal/common/supervisor/BUILD.bazel b/core/internal/common/supervisor/BUILD.bazel
index ca8b513..9f940f0 100644
--- a/core/internal/common/supervisor/BUILD.bazel
+++ b/core/internal/common/supervisor/BUILD.bazel
@@ -12,9 +12,9 @@
importpath = "git.monogon.dev/source/nexantic.git/core/internal/common/supervisor",
visibility = ["//core:__subpackages__"],
deps = [
+ "//core/pkg/logtree:go_default_library",
"@com_github_cenkalti_backoff_v4//:go_default_library",
"@org_golang_google_grpc//:go_default_library",
- "@org_uber_go_zap//:go_default_library",
],
)
@@ -22,5 +22,4 @@
name = "go_default_test",
srcs = ["supervisor_test.go"],
embed = [":go_default_library"],
- deps = ["@org_uber_go_zap//:go_default_library"],
)
diff --git a/core/internal/common/supervisor/supervisor.go b/core/internal/common/supervisor/supervisor.go
index db4489e..ef8a9cd 100644
--- a/core/internal/common/supervisor/supervisor.go
+++ b/core/internal/common/supervisor/supervisor.go
@@ -22,9 +22,10 @@
import (
"context"
+ "io"
"sync"
- "go.uber.org/zap"
+ "git.monogon.dev/source/nexantic.git/core/pkg/logtree"
)
// A Runnable is a function that will be run in a goroutine, and supervised throughout its lifetime. It can in turn
@@ -70,14 +71,6 @@
SignalDone
)
-// Logger returns a Zap logger that will be named after the Distinguished Name of a the runnable (ie its place in the
-// supervision tree, dot-separated).
-func Logger(ctx context.Context) *zap.Logger {
- node, unlock := fromContext(ctx)
- defer unlock()
- return node.getLogger()
-}
-
// supervisor represents and instance of the supervision system. It keeps track of a supervision tree and a request
// channel to its internal processor goroutine.
type supervisor struct {
@@ -86,10 +79,10 @@
// root is the root node of the supervision tree, named 'root'. It represents the Runnable started with the
// supervisor.New call.
root *node
- // logger is the Zap logger used to create loggers available to runnables.
- logger *zap.Logger
- // ilogger is the Zap logger used for internal logging by the supervisor.
- ilogger *zap.Logger
+ // logtree is the main logtree exposed to runnables and used internally.
+ logtree *logtree.LogTree
+ // ilogger is the internal logger logging to "supervisor" in the logtree.
+ ilogger logtree.LeveledLogger
// pReq is an interface channel to the lifecycle processor of the supervisor.
pReq chan *processorRequest
@@ -109,12 +102,17 @@
}
)
+func WithExistingLogtree(lt *logtree.LogTree) SupervisorOpt {
+ return func(s *supervisor) {
+ s.logtree = lt
+ }
+}
+
// New creates a new supervisor with its root running the given root runnable.
// The given context can be used to cancel the entire supervision tree.
-func New(ctx context.Context, logger *zap.Logger, rootRunnable Runnable, opts ...SupervisorOpt) *supervisor {
+func New(ctx context.Context, rootRunnable Runnable, opts ...SupervisorOpt) *supervisor {
sup := &supervisor{
- logger: logger,
- ilogger: logger.Named("supervisor"),
+ logtree: logtree.New(),
pReq: make(chan *processorRequest),
}
@@ -122,6 +120,7 @@
o(sup)
}
+ sup.ilogger = sup.logtree.MustLeveledFor("supervisor")
sup.root = newNode("root", rootRunnable, sup, nil)
go sup.processor(ctx)
@@ -132,3 +131,15 @@
return sup
}
+
+func Logger(ctx context.Context) logtree.LeveledLogger {
+ node, unlock := fromContext(ctx)
+ defer unlock()
+ return node.sup.logtree.MustLeveledFor(logtree.DN(node.dn()))
+}
+
+func RawLogger(ctx context.Context) io.Writer {
+ node, unlock := fromContext(ctx)
+ defer unlock()
+ return node.sup.logtree.MustRawFor(logtree.DN(node.dn()))
+}
diff --git a/core/internal/common/supervisor/supervisor_node.go b/core/internal/common/supervisor/supervisor_node.go
index e2af62c..a7caf82 100644
--- a/core/internal/common/supervisor/supervisor_node.go
+++ b/core/internal/common/supervisor/supervisor_node.go
@@ -23,7 +23,6 @@
"strings"
"github.com/cenkalti/backoff/v4"
- "go.uber.org/zap"
)
// node is a supervision tree node. It represents the state of a Runnable within this tree, its relation to other tree
@@ -281,8 +280,3 @@
n.bo.Reset()
}
}
-
-// getLogger creates a new logger for a given supervisor node, to be used by its runnable.
-func (n *node) getLogger() *zap.Logger {
- return n.sup.logger.Named(n.dn())
-}
diff --git a/core/internal/common/supervisor/supervisor_processor.go b/core/internal/common/supervisor/supervisor_processor.go
index c72ef89..965a667 100644
--- a/core/internal/common/supervisor/supervisor_processor.go
+++ b/core/internal/common/supervisor/supervisor_processor.go
@@ -22,8 +22,6 @@
"fmt"
"runtime/debug"
"time"
-
- "go.uber.org/zap"
)
// The processor maintains runnable goroutines - ie., when requested will start one, and then once it exists it will
@@ -76,7 +74,7 @@
for {
select {
case <-ctx.Done():
- s.ilogger.Info("supervisor processor exiting...", zap.Error(ctx.Err()))
+ s.ilogger.Infof("supervisor processor exiting: %v", ctx.Err())
s.processKill()
s.ilogger.Info("supervisor exited")
return
@@ -213,7 +211,7 @@
err = fmt.Errorf("returned error when %s: %w", n.state, err)
}
- s.ilogger.Error("Runnable died", zap.String("dn", n.dn()), zap.Error(err))
+ s.ilogger.Errorf("Runnable %s died: %v", n.dn(), err)
// Mark as dead.
n.state = nodeStateDead
@@ -393,7 +391,7 @@
// Prepare node for rescheduling - remove its children, reset its state to new.
n.reset()
- s.ilogger.Info("rescheduling supervised node", zap.String("dn", dn), zap.Duration("backoff", bo))
+ s.ilogger.Infof("rescheduling supervised node %s with backoff %s", dn, bo.String())
// Reschedule node runnable to run after backoff.
go func(n *node, bo time.Duration) {
diff --git a/core/internal/common/supervisor/supervisor_test.go b/core/internal/common/supervisor/supervisor_test.go
index 9a190c9..9c7bdb7 100644
--- a/core/internal/common/supervisor/supervisor_test.go
+++ b/core/internal/common/supervisor/supervisor_test.go
@@ -21,8 +21,6 @@
"fmt"
"testing"
"time"
-
- "go.uber.org/zap"
)
func runnableBecomesHealthy(healthy, done chan struct{}) Runnable {
@@ -182,10 +180,9 @@
h2 := make(chan struct{})
d2 := make(chan struct{})
- log, _ := zap.NewDevelopment()
ctx, ctxC := context.WithCancel(context.Background())
defer ctxC()
- s := New(ctx, log, func(ctx context.Context) error {
+ s := New(ctx, func(ctx context.Context) error {
err := RunGroup(ctx, map[string]Runnable{
"one": runnableBecomesHealthy(h1, d1),
"two": runnableBecomesHealthy(h2, d2),
@@ -217,10 +214,9 @@
d1 := make(chan struct{})
two := newRC()
- log, _ := zap.NewDevelopment()
ctx, ctxC := context.WithTimeout(context.Background(), 10*time.Second)
defer ctxC()
- s := New(ctx, log, func(ctx context.Context) error {
+ s := New(ctx, func(ctx context.Context) error {
err := RunGroup(ctx, map[string]Runnable{
"one": runnableBecomesHealthy(h1, d1),
"two": two.runnable(),
@@ -266,11 +262,9 @@
d1 := make(chan struct{})
two := newRC()
- log, _ := zap.NewDevelopment()
-
ctx, ctxC := context.WithTimeout(context.Background(), 10*time.Second)
defer ctxC()
- s := New(ctx, log, func(ctx context.Context) error {
+ s := New(ctx, func(ctx context.Context) error {
err := RunGroup(ctx, map[string]Runnable{
"one": runnableSpawnsMore(h1, d1, 5),
"two": two.runnable(),
@@ -315,10 +309,9 @@
d1 := make(chan struct{})
two := newRC()
- log, _ := zap.NewDevelopment()
ctx, ctxC := context.WithCancel(context.Background())
defer ctxC()
- s := New(ctx, log, func(ctx context.Context) error {
+ s := New(ctx, func(ctx context.Context) error {
err := RunGroup(ctx, map[string]Runnable{
"one": runnableBecomesHealthy(h1, d1),
"two": two.runnable(),
@@ -359,10 +352,9 @@
}
func TestMultipleLevelFailure(t *testing.T) {
- log, _ := zap.NewDevelopment()
ctx, ctxC := context.WithCancel(context.Background())
defer ctxC()
- New(ctx, log, func(ctx context.Context) error {
+ New(ctx, func(ctx context.Context) error {
err := RunGroup(ctx, map[string]Runnable{
"one": runnableSpawnsMore(nil, nil, 4),
"two": runnableSpawnsMore(nil, nil, 4),
@@ -379,11 +371,10 @@
func TestBackoff(t *testing.T) {
one := newRC()
- log, _ := zap.NewDevelopment()
ctx, ctxC := context.WithTimeout(context.Background(), 20*time.Second)
defer ctxC()
- s := New(ctx, log, func(ctx context.Context) error {
+ s := New(ctx, func(ctx context.Context) error {
if err := Run(ctx, "one", one.runnable()); err != nil {
return err
}
@@ -485,10 +476,9 @@
}
}
- log, _ := zap.NewDevelopment()
ctx, ctxC := context.WithCancel(context.Background())
defer ctxC()
- New(ctx, log, func(ctx context.Context) error {
+ New(ctx, func(ctx context.Context) error {
RunGroup(ctx, map[string]Runnable{
"one": one,
"oneSibling": oneSibling.runnable(),
@@ -538,12 +528,10 @@
return nil
}
- log, _ := zap.NewDevelopment()
-
// Start a supervision tree with a root runnable.
ctx, ctxC := context.WithCancel(context.Background())
defer ctxC()
- New(ctx, log, func(ctx context.Context) error {
+ New(ctx, func(ctx context.Context) error {
err := Run(ctx, "child", child)
if err != nil {
return fmt.Errorf("could not run 'child': %w", err)
diff --git a/core/internal/consensus/BUILD.bazel b/core/internal/consensus/BUILD.bazel
index 74b70d9..b8b45a7 100644
--- a/core/internal/consensus/BUILD.bazel
+++ b/core/internal/consensus/BUILD.bazel
@@ -14,8 +14,6 @@
"@io_etcd_go_etcd//clientv3/namespace:go_default_library",
"@io_etcd_go_etcd//embed:go_default_library",
"@org_uber_go_atomic//:go_default_library",
- "@org_uber_go_zap//:go_default_library",
- "@org_uber_go_zap//zapcore:go_default_library",
],
)
@@ -28,6 +26,5 @@
"//core/internal/localstorage:go_default_library",
"//core/internal/localstorage/declarative:go_default_library",
"//golibs/common:go_default_library",
- "@org_uber_go_zap//:go_default_library",
],
)
diff --git a/core/internal/consensus/consensus.go b/core/internal/consensus/consensus.go
index 94d84b2..1403964 100644
--- a/core/internal/consensus/consensus.go
+++ b/core/internal/consensus/consensus.go
@@ -43,8 +43,6 @@
"go.etcd.io/etcd/clientv3/namespace"
"go.etcd.io/etcd/embed"
"go.uber.org/atomic"
- "go.uber.org/zap"
- "go.uber.org/zap/zapcore"
"git.monogon.dev/source/nexantic.git/core/internal/common"
"git.monogon.dev/source/nexantic.git/core/internal/common/supervisor"
@@ -157,13 +155,9 @@
cfg.InitialCluster = s.config.InitialCluster
}
- logger := supervisor.Logger(ctx)
+ // TODO(q3k): pipe logs from etcd to supervisor.RawLogger via a file.
cfg.Logger = DefaultLogger
- cfg.ZapLoggerBuilder = embed.NewZapCoreLoggerBuilder(
- logger.With(zap.String("component", "etcd")).WithOptions(zap.IncreaseLevel(zapcore.WarnLevel)),
- logger.Core(),
- nil,
- )
+ cfg.LogOutputs = []string{"stderr"}
return cfg, nil
}
@@ -354,9 +348,9 @@
// Luckily etcd already does sanity checks internally and will refuse to promote nodes that aren't
// connected or are still behind on transactions.
if _, err := st.etcd.Server.PromoteMember(ctx, uint64(member.ID)); err != nil {
- supervisor.Logger(ctx).Info("Failed to promote consensus node", zap.String("node", member.Name), zap.Error(err))
+ supervisor.Logger(ctx).Infof("Failed to promote consensus node %s: %v", member.Name, err)
} else {
- supervisor.Logger(ctx).Info("Promoted new consensus node", zap.String("node", member.Name))
+ supervisor.Logger(ctx).Infof("Promoted new consensus node %s", member.Name)
}
}
}
diff --git a/core/internal/consensus/consensus_test.go b/core/internal/consensus/consensus_test.go
index 6e225e0..a308cf4 100644
--- a/core/internal/consensus/consensus_test.go
+++ b/core/internal/consensus/consensus_test.go
@@ -26,8 +26,6 @@
"testing"
"time"
- "go.uber.org/zap"
-
"git.monogon.dev/source/nexantic.git/core/internal/common/supervisor"
"git.monogon.dev/source/nexantic.git/core/internal/localstorage"
"git.monogon.dev/source/nexantic.git/core/internal/localstorage/declarative"
@@ -38,7 +36,6 @@
ctx context.Context
ctxC context.CancelFunc
root *localstorage.Root
- logger *zap.Logger
tmpdir string
}
@@ -56,13 +53,10 @@
os.MkdirAll(root.Data.Etcd.FullPath(), 0700)
os.MkdirAll(root.Ephemeral.Consensus.FullPath(), 0700)
- logger, _ := zap.NewDevelopment()
-
return &boilerplate{
ctx: ctx,
ctxC: ctxC,
root: root,
- logger: logger,
tmpdir: tmp,
}
}
@@ -99,7 +93,7 @@
Port: common.MustConsume(common.AllocateTCPPort()),
})
- supervisor.New(b.ctx, b.logger, etcd.Run)
+ supervisor.New(b.ctx, etcd.Run)
waitEtcd(t, etcd)
kv := etcd.KV("foo", "bar")
@@ -124,7 +118,7 @@
ListenHost: "127.0.0.1",
Port: common.MustConsume(common.AllocateTCPPort()),
})
- supervisor.New(b.ctx, b.logger, etcd.Run)
+ supervisor.New(b.ctx, etcd.Run)
waitEtcd(t, etcd)
id, name, err := etcd.MemberInfo(b.ctx)
@@ -170,7 +164,7 @@
Port: common.MustConsume(common.AllocateTCPPort()),
})
ctx, ctxC := context.WithCancel(b.ctx)
- supervisor.New(ctx, b.logger, etcd.Run)
+ supervisor.New(ctx, etcd.Run)
waitEtcd(t, etcd)
kv := etcd.KV("foo", "bar")
if new {
@@ -215,7 +209,7 @@
ListenHost: "127.0.0.1",
Port: common.MustConsume(common.AllocateTCPPort()),
})
- supervisor.New(b.ctx, b.logger, etcd.Run)
+ supervisor.New(b.ctx, etcd.Run)
waitEtcd(t, etcd)
etcd.stateMu.Lock()
diff --git a/core/internal/containerd/BUILD.bazel b/core/internal/containerd/BUILD.bazel
index 16269d1..5c74a63 100644
--- a/core/internal/containerd/BUILD.bazel
+++ b/core/internal/containerd/BUILD.bazel
@@ -11,7 +11,6 @@
"//core/pkg/logbuffer:go_default_library",
"@com_github_containerd_containerd//:go_default_library",
"@com_github_containerd_containerd//namespaces:go_default_library",
- "@org_uber_go_zap//:go_default_library",
],
)
diff --git a/core/internal/containerd/main.go b/core/internal/containerd/main.go
index 0c6d497..192039e 100644
--- a/core/internal/containerd/main.go
+++ b/core/internal/containerd/main.go
@@ -24,15 +24,14 @@
"os"
"os/exec"
"path/filepath"
+ "strings"
"time"
ctr "github.com/containerd/containerd"
"github.com/containerd/containerd/namespaces"
- "go.uber.org/zap"
-
- "git.monogon.dev/source/nexantic.git/core/internal/localstorage"
"git.monogon.dev/source/nexantic.git/core/internal/common/supervisor"
+ "git.monogon.dev/source/nexantic.git/core/internal/localstorage"
"git.monogon.dev/source/nexantic.git/core/pkg/logbuffer"
)
@@ -75,7 +74,7 @@
// debug logs) is not an issue for us.
time.Sleep(10 * time.Millisecond)
} else if err != nil {
- logger.Error("gVisor log pump failed, stopping it", zap.Error(err))
+ logger.Errorf("gVisor log pump failed, stopping it: %v", err)
return // It's likely that this will busy-loop printing errors if it encounters one, so bail
}
}
@@ -111,7 +110,7 @@
}
for _, dir := range preseedNamespaceDirs {
if !dir.IsDir() {
- logger.Warn("Non-Directory found in preseed folder, ignoring", zap.String("name", dir.Name()))
+ logger.Warningf("Non-Directory %q found in preseed folder, ignoring", dir.Name())
continue
}
namespace := dir.Name()
@@ -122,7 +121,7 @@
ctxWithNS := namespaces.WithNamespace(ctx, namespace)
for _, image := range images {
if image.IsDir() {
- logger.Warn("Directory found in preseed namespaced folder, ignoring", zap.String("name", image.Name()))
+ logger.Warningf("Directory %q found in preseed namespaced folder, ignoring", image.Name())
continue
}
imageFile, err := os.Open(filepath.Join(preseedNamespacesDir, namespace, image.Name()))
@@ -140,8 +139,7 @@
for _, img := range importedImages {
importedImageNames = append(importedImageNames, img.Name)
}
- logger.Info("Successfully imported preseeded bundle into containerd",
- zap.String("namespace", namespace), zap.Strings("images", importedImageNames))
+ logger.Infof("Successfully imported preseeded bundle %s/%s into containerd", namespace, strings.Join(importedImageNames, ","))
}
}
supervisor.Signal(ctx, supervisor.SignalHealthy)
diff --git a/core/internal/kubernetes/BUILD.bazel b/core/internal/kubernetes/BUILD.bazel
index 69afe18..bef0eb7 100644
--- a/core/internal/kubernetes/BUILD.bazel
+++ b/core/internal/kubernetes/BUILD.bazel
@@ -26,6 +26,7 @@
"//core/pkg/fileargs:go_default_library",
"//core/pkg/fsquota:go_default_library",
"//core/pkg/logbuffer:go_default_library",
+ "//core/pkg/logtree:go_default_library",
"//core/proto/api:go_default_library",
"@com_github_container_storage_interface_spec//lib/go/csi:go_default_library",
"@io_bazel_rules_go//proto/wkt:wrappers_go_proto",
@@ -50,6 +51,5 @@
"@org_golang_google_grpc//codes:go_default_library",
"@org_golang_google_grpc//status:go_default_library",
"@org_golang_x_sys//unix:go_default_library",
- "@org_uber_go_zap//:go_default_library",
],
)
diff --git a/core/internal/kubernetes/clusternet/BUILD.bazel b/core/internal/kubernetes/clusternet/BUILD.bazel
index dd5c58e..d011e2f 100644
--- a/core/internal/kubernetes/clusternet/BUILD.bazel
+++ b/core/internal/kubernetes/clusternet/BUILD.bazel
@@ -13,6 +13,7 @@
"//core/internal/common/supervisor:go_default_library",
"//core/internal/localstorage:go_default_library",
"//core/pkg/jsonpatch:go_default_library",
+ "//core/pkg/logtree:go_default_library",
"@com_github_vishvananda_netlink//:go_default_library",
"@com_zx2c4_golang_wireguard_wgctrl//:go_default_library",
"@com_zx2c4_golang_wireguard_wgctrl//wgtypes:go_default_library",
@@ -22,6 +23,5 @@
"@io_k8s_client_go//informers:go_default_library",
"@io_k8s_client_go//kubernetes:go_default_library",
"@io_k8s_client_go//tools/cache:go_default_library",
- "@org_uber_go_zap//:go_default_library",
],
)
diff --git a/core/internal/kubernetes/clusternet/clusternet.go b/core/internal/kubernetes/clusternet/clusternet.go
index bb1f183..15e3d5b 100644
--- a/core/internal/kubernetes/clusternet/clusternet.go
+++ b/core/internal/kubernetes/clusternet/clusternet.go
@@ -33,12 +33,13 @@
"net"
"os"
+ "git.monogon.dev/source/nexantic.git/core/pkg/logtree"
+
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"github.com/vishvananda/netlink"
- "go.uber.org/zap"
"golang.zx2c4.com/wireguard/wgctrl"
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
corev1 "k8s.io/api/core/v1"
@@ -66,7 +67,7 @@
wgClient *wgctrl.Client
privKey wgtypes.Key
- logger *zap.Logger
+ logger logtree.LeveledLogger
}
// ensureNode creates/updates the corresponding WireGuard peer entry for the given node objet
@@ -87,12 +88,12 @@
for _, addr := range newNode.Status.Addresses {
if addr.Type == corev1.NodeInternalIP {
if internalIP != nil {
- s.logger.Warn("More than one NodeInternalIP specified, using the first one")
+ s.logger.Warningf("More than one NodeInternalIP specified, using the first one")
break
}
internalIP = net.ParseIP(addr.Address)
if internalIP == nil {
- s.logger.Warn("failed to parse Internal IP")
+ s.logger.Warningf("Failed to parse Internal IP %s", addr.Address)
}
}
}
@@ -103,14 +104,13 @@
for _, podNetStr := range newNode.Spec.PodCIDRs {
_, podNet, err := net.ParseCIDR(podNetStr)
if err != nil {
- s.logger.Warn("Node PodCIDR failed to parse, ignored", zap.Error(err), zap.String("node", newNode.Name))
+ s.logger.Warningf("Node %s PodCIDR failed to parse, ignored: %v", newNode.Name, err)
continue
}
allowedIPs = append(allowedIPs, *podNet)
}
allowedIPs = append(allowedIPs, net.IPNet{IP: internalIP, Mask: net.CIDRMask(32, 32)})
- s.logger.Debug("Adding/Updating WireGuard peer node", zap.String("node", newNode.Name),
- zap.String("endpointIP", internalIP.String()), zap.Any("allowedIPs", allowedIPs))
+ s.logger.V(1).Infof("Adding/Updating WireGuard peer node %s, endpoint %s, allowedIPs %+v", newNode.Name, internalIP.String(), allowedIPs)
// WireGuard's kernel side has create/update semantics on peers by default. So we can just add the peer multiple
// times to update it.
err = s.wgClient.ConfigureDevice(clusterNetDeviceName, wgtypes.Config{
@@ -244,31 +244,31 @@
AddFunc: func(new interface{}) {
newNode, ok := new.(*corev1.Node)
if !ok {
- logger.Error("Received non-node item in node event handler", zap.Reflect("item", new))
+ logger.Errorf("Received non-node item %+v in node event handler", new)
return
}
if err := s.ensureNode(newNode); err != nil {
- logger.Warn("Failed to sync node", zap.Error(err))
+ logger.Warningf("Failed to sync node: %v", err)
}
},
UpdateFunc: func(old, new interface{}) {
newNode, ok := new.(*corev1.Node)
if !ok {
- logger.Error("Received non-node item in node event handler", zap.Reflect("item", new))
+ logger.Errorf("Received non-node item %+v in node event handler", new)
return
}
if err := s.ensureNode(newNode); err != nil {
- logger.Warn("Failed to sync node", zap.Error(err))
+ logger.Warningf("Failed to sync node: %v", err)
}
},
DeleteFunc: func(old interface{}) {
oldNode, ok := old.(*corev1.Node)
if !ok {
- logger.Error("Received non-node item in node event handler", zap.Reflect("item", oldNode))
+ logger.Errorf("Received non-node item %+v in node event handler", oldNode)
return
}
if err := s.removeNode(oldNode); err != nil {
- logger.Warn("Failed to sync node", zap.Error(err))
+ logger.Warningf("Failed to sync node: %v", err)
}
},
})
diff --git a/core/internal/kubernetes/csi.go b/core/internal/kubernetes/csi.go
index e151396..def1d6d 100644
--- a/core/internal/kubernetes/csi.go
+++ b/core/internal/kubernetes/csi.go
@@ -24,9 +24,10 @@
"path/filepath"
"regexp"
+ "git.monogon.dev/source/nexantic.git/core/pkg/logtree"
+
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/golang/protobuf/ptypes/wrappers"
- "go.uber.org/zap"
"golang.org/x/sys/unix"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
@@ -49,7 +50,7 @@
KubeletDirectory *localstorage.DataKubernetesKubeletDirectory
VolumesDirectory *localstorage.DataVolumesDirectory
- logger *zap.Logger
+ logger logtree.LeveledLogger
}
func (s *csiPluginServer) Run(ctx context.Context) error {
@@ -240,7 +241,7 @@
func (s *csiPluginServer) NotifyRegistrationStatus(ctx context.Context, req *pluginregistration.RegistrationStatus) (*pluginregistration.RegistrationStatusResponse, error) {
if req.Error != "" {
- s.logger.Warn("Kubelet failed registering CSI plugin", zap.String("error", req.Error))
+ s.logger.Warningf("Kubelet failed registering CSI plugin: %v", req.Error)
}
return &pluginregistration.RegistrationStatusResponse{}, nil
}
diff --git a/core/internal/kubernetes/pki/BUILD.bazel b/core/internal/kubernetes/pki/BUILD.bazel
index e188bfa..ab45382 100644
--- a/core/internal/kubernetes/pki/BUILD.bazel
+++ b/core/internal/kubernetes/pki/BUILD.bazel
@@ -11,9 +11,9 @@
visibility = ["//core:__subpackages__"],
deps = [
"//core/internal/common:go_default_library",
+ "//core/pkg/logtree:go_default_library",
"@io_etcd_go_etcd//clientv3:go_default_library",
"@io_k8s_client_go//tools/clientcmd:go_default_library",
"@io_k8s_client_go//tools/clientcmd/api:go_default_library",
- "@org_uber_go_zap//:go_default_library",
],
)
diff --git a/core/internal/kubernetes/pki/kubernetes.go b/core/internal/kubernetes/pki/kubernetes.go
index 0de8f6d..9931f03 100644
--- a/core/internal/kubernetes/pki/kubernetes.go
+++ b/core/internal/kubernetes/pki/kubernetes.go
@@ -25,7 +25,7 @@
"fmt"
"net"
- "go.uber.org/zap"
+ "git.monogon.dev/source/nexantic.git/core/pkg/logtree"
"go.etcd.io/etcd/clientv3"
"k8s.io/client-go/tools/clientcmd"
@@ -77,12 +77,12 @@
// KubernetesPKI manages all PKI resources required to run Kubernetes on Smalltown. It contains all static certificates,
// which can be retrieved, or be used to generate Kubeconfigs from.
type KubernetesPKI struct {
- logger *zap.Logger
+ logger logtree.LeveledLogger
KV clientv3.KV
Certificates map[KubeCertificateName]*Certificate
}
-func NewKubernetes(l *zap.Logger, kv clientv3.KV) *KubernetesPKI {
+func NewKubernetes(l logtree.LeveledLogger, kv clientv3.KV) *KubernetesPKI {
pki := KubernetesPKI{
logger: l,
KV: kv,
@@ -121,7 +121,7 @@
// EnsureAll ensures that all static certificates (and the serviceaccount key) are present on etcd.
func (k *KubernetesPKI) EnsureAll(ctx context.Context) error {
for n, v := range k.Certificates {
- k.logger.Info("ensureing certificate existence", zap.String("name", string(n)))
+ k.logger.Infof("Ensuring %s exists", string(n))
_, _, err := v.Ensure(ctx, k.KV)
if err != nil {
return fmt.Errorf("could not ensure certificate %q exists: %w", n, err)
diff --git a/core/internal/kubernetes/provisioner.go b/core/internal/kubernetes/provisioner.go
index 0e9e419..3be25e0 100644
--- a/core/internal/kubernetes/provisioner.go
+++ b/core/internal/kubernetes/provisioner.go
@@ -24,7 +24,8 @@
"os"
"path/filepath"
- "go.uber.org/zap"
+ "git.monogon.dev/source/nexantic.git/core/pkg/logtree"
+
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
@@ -64,7 +65,7 @@
pvcInformer coreinformers.PersistentVolumeClaimInformer
pvInformer coreinformers.PersistentVolumeInformer
storageClassInformer storageinformers.StorageClassInformer
- logger *zap.Logger
+ logger logtree.LeveledLogger
}
// runCSIProvisioner runs the main provisioning machinery. It consists of a bunch of informers which keep track of
@@ -144,7 +145,7 @@
func (p *csiProvisionerServer) enqueueClaim(obj interface{}) {
key, err := cache.MetaNamespaceKeyFunc(obj)
if err != nil {
- p.logger.Error("Not queuing PVC because key could not be derived", zap.Error(err))
+ p.logger.Errorf("Not queuing PVC because key could not be derived: %v", err)
return
}
p.claimQueue.Add(key)
@@ -154,7 +155,7 @@
func (p *csiProvisionerServer) enqueuePV(obj interface{}) {
key, err := cache.MetaNamespaceKeyFunc(obj)
if err != nil {
- p.logger.Error("Not queuing PV because key could not be derived", zap.Error(err))
+ p.logger.Errorf("Not queuing PV because key could not be derived: %v", err)
return
}
p.pvQueue.Add(key)
@@ -174,13 +175,12 @@
key, ok := obj.(string)
if !ok {
queue.Forget(obj)
- p.logger.Error("Expected string in workqueue", zap.Any("actual", obj))
+ p.logger.Errorf("Expected string in workqueue, got %+v", obj)
return
}
if err := process(key); err != nil {
- p.logger.Warn("Failed processing item, requeueing", zap.String("name", key),
- zap.Int("num_requeues", queue.NumRequeues(obj)), zap.Error(err))
+ p.logger.Warningf("Failed processing item %q, requeueing (numrequeues: %d): %v", key, queue.NumRequeues(obj), err)
queue.AddRateLimited(obj)
}
@@ -263,7 +263,7 @@
volumeID := "pvc-" + string(pvc.ObjectMeta.UID)
volumePath := p.volumePath(volumeID)
- p.logger.Info("Creating local PV", zap.String("volume-id", volumeID))
+ p.logger.Infof("Creating local PV %s", volumeID)
if err := os.Mkdir(volumePath, 0644); err != nil && !os.IsExist(err) {
return fmt.Errorf("failed to create volume directory: %w", err)
}
@@ -346,7 +346,7 @@
volumePath := p.volumePath(pv.Spec.CSI.VolumeHandle)
// Log deletes for auditing purposes
- p.logger.Info("Deleting persistent volume", zap.String("name", pv.Spec.CSI.VolumeHandle))
+ p.logger.Infof("Deleting persistent volume %s", pv.Spec.CSI.VolumeHandle)
if err := fsquota.SetQuota(volumePath, 0, 0); err != nil {
// We record these here manually since a successful deletion removes the PV we'd be attaching them to
p.recorder.Eventf(pv, v1.EventTypeWarning, "DeprovisioningFailed", "Failed to remove quota: %v", err)
diff --git a/core/internal/kubernetes/reconciler/BUILD.bazel b/core/internal/kubernetes/reconciler/BUILD.bazel
index fb77ae2..dcfbc49 100644
--- a/core/internal/kubernetes/reconciler/BUILD.bazel
+++ b/core/internal/kubernetes/reconciler/BUILD.bazel
@@ -21,7 +21,6 @@
"@io_k8s_api//storage/v1:go_default_library",
"@io_k8s_apimachinery//pkg/apis/meta/v1:go_default_library",
"@io_k8s_client_go//kubernetes:go_default_library",
- "@org_uber_go_zap//:go_default_library",
],
)
diff --git a/core/internal/kubernetes/reconciler/reconciler.go b/core/internal/kubernetes/reconciler/reconciler.go
index c972996..24a34ef 100644
--- a/core/internal/kubernetes/reconciler/reconciler.go
+++ b/core/internal/kubernetes/reconciler/reconciler.go
@@ -31,7 +31,6 @@
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
- "go.uber.org/zap"
"k8s.io/client-go/kubernetes"
)
@@ -120,7 +119,7 @@
reconcileAll := func() {
for name, resource := range resources {
if err := reconcile(ctx, resource); err != nil {
- log.Warn("Failed to reconcile built-in resources", zap.String("kind", name), zap.Error(err))
+ log.Warningf("Failed to reconcile built-in resources %s: %v", name, err)
}
}
}
diff --git a/core/internal/network/BUILD.bazel b/core/internal/network/BUILD.bazel
index b2b486f..af82b8b 100644
--- a/core/internal/network/BUILD.bazel
+++ b/core/internal/network/BUILD.bazel
@@ -9,10 +9,10 @@
"//core/internal/common/supervisor:go_default_library",
"//core/internal/network/dhcp:go_default_library",
"//core/internal/network/dns:go_default_library",
+ "//core/pkg/logtree:go_default_library",
"@com_github_google_nftables//:go_default_library",
"@com_github_google_nftables//expr:go_default_library",
"@com_github_vishvananda_netlink//:go_default_library",
"@org_golang_x_sys//unix:go_default_library",
- "@org_uber_go_zap//:go_default_library",
],
)
diff --git a/core/internal/network/dhcp/BUILD.bazel b/core/internal/network/dhcp/BUILD.bazel
index 40ac372..8962e28 100644
--- a/core/internal/network/dhcp/BUILD.bazel
+++ b/core/internal/network/dhcp/BUILD.bazel
@@ -10,6 +10,5 @@
"@com_github_insomniacslk_dhcp//dhcpv4:go_default_library",
"@com_github_insomniacslk_dhcp//dhcpv4/nclient4:go_default_library",
"@com_github_vishvananda_netlink//:go_default_library",
- "@org_uber_go_zap//:go_default_library",
],
)
diff --git a/core/internal/network/dhcp/dhcp.go b/core/internal/network/dhcp/dhcp.go
index 0eef2cc..b2c0446 100644
--- a/core/internal/network/dhcp/dhcp.go
+++ b/core/internal/network/dhcp/dhcp.go
@@ -26,7 +26,6 @@
"github.com/insomniacslk/dhcp/dhcpv4"
"github.com/insomniacslk/dhcp/dhcpv4/nclient4"
"github.com/vishvananda/netlink"
- "go.uber.org/zap"
)
type Client struct {
@@ -88,7 +87,7 @@
_, ack, err := client.Request(ctx)
if err != nil {
// TODO(q3k): implement retry logic with full state machine
- logger.Error("DHCP lease request failed", zap.Error(err))
+ logger.Errorf("DHCP lease request failed: %v", err)
return err
}
newC <- parseAck(ack)
@@ -116,7 +115,7 @@
case cfg := <-newC:
current = cfg
- logger.Info("DHCP client ASSIGNED", zap.String("ip", current.String()))
+ logger.Info("DHCP client ASSIGNED IP %s", current.String())
for _, w := range waiters {
w.fulfill(current)
}
diff --git a/core/internal/network/dns/BUILD.bazel b/core/internal/network/dns/BUILD.bazel
index d197191..efc2727 100644
--- a/core/internal/network/dns/BUILD.bazel
+++ b/core/internal/network/dns/BUILD.bazel
@@ -12,6 +12,5 @@
"//core/internal/common/supervisor:go_default_library",
"//core/pkg/fileargs:go_default_library",
"//core/pkg/logbuffer:go_default_library",
- "@org_uber_go_zap//:go_default_library",
],
)
diff --git a/core/internal/network/dns/coredns.go b/core/internal/network/dns/coredns.go
index 8c70c4f..037c7c1 100644
--- a/core/internal/network/dns/coredns.go
+++ b/core/internal/network/dns/coredns.go
@@ -26,8 +26,6 @@
"sync"
"syscall"
- "go.uber.org/zap"
-
"git.monogon.dev/source/nexantic.git/core/internal/common/supervisor"
"git.monogon.dev/source/nexantic.git/core/pkg/fileargs"
"git.monogon.dev/source/nexantic.git/core/pkg/logbuffer"
@@ -165,7 +163,7 @@
if s.cmd != nil && s.cmd.Process != nil && s.cmd.ProcessState == nil {
s.args.ArgPath("Corefile", s.makeCorefile(s.args))
if err := s.cmd.Process.Signal(syscall.SIGUSR1); err != nil {
- supervisor.Logger(ctx).Warn("Failed to send SIGUSR1 to CoreDNS for reload", zap.Error(err))
+ supervisor.Logger(ctx).Warningf("Failed to send SIGUSR1 to CoreDNS for reload: %v", err)
}
}
}
diff --git a/core/internal/network/main.go b/core/internal/network/main.go
index 31c0b68..414d971 100644
--- a/core/internal/network/main.go
+++ b/core/internal/network/main.go
@@ -27,12 +27,12 @@
"github.com/google/nftables/expr"
"github.com/vishvananda/netlink"
- "go.uber.org/zap"
"golang.org/x/sys/unix"
"git.monogon.dev/source/nexantic.git/core/internal/common/supervisor"
"git.monogon.dev/source/nexantic.git/core/internal/network/dhcp"
"git.monogon.dev/source/nexantic.git/core/internal/network/dns"
+ "git.monogon.dev/source/nexantic.git/core/pkg/logtree"
)
const (
@@ -44,7 +44,7 @@
config Config
dhcp *dhcp.Client
- logger *zap.Logger
+ logger logtree.LeveledLogger
}
type Config struct {
@@ -87,7 +87,7 @@
}
if gw.IsUnspecified() {
- s.logger.Info("No default route set, only local network will be reachable", zap.String("local", addr.String()))
+ s.logger.Infof("No default route set, only local network %s will be reachable", addr.String())
return nil
}
@@ -123,7 +123,7 @@
s.config.CorednsRegistrationChan <- dns.NewUpstreamDirective(status.DNS)
if err := s.addNetworkRoutes(iface, status.Address, status.Gateway); err != nil {
- s.logger.Warn("failed to add routes", zap.Error(err))
+ s.logger.Warning("Failed to add routes: %v", err)
}
c := nftables.Conn{}
@@ -179,12 +179,12 @@
supervisor.Run(ctx, "interfaces", s.runInterfaces)
if err := ioutil.WriteFile("/proc/sys/net/ipv4/ip_forward", []byte("1\n"), 0644); err != nil {
- logger.Panic("Failed to enable IPv4 forwarding", zap.Error(err))
+ logger.Fatalf("Failed to enable IPv4 forwarding: %v", err)
}
// We're handling all DNS requests with CoreDNS, including local ones
if err := setResolvconf([]net.IP{{127, 0, 0, 1}}, []string{}); err != nil {
- logger.Warn("failed to set resolvconf", zap.Error(err))
+ logger.Fatalf("Failed to set resolv.conf: %v", err)
}
supervisor.Signal(ctx, supervisor.SignalHealthy)
@@ -198,7 +198,7 @@
links, err := netlink.LinkList()
if err != nil {
- s.logger.Fatal("Failed to list network links", zap.Error(err))
+ s.logger.Fatalf("Failed to list network links: %s", err)
}
var ethernetLinks []netlink.Link
@@ -211,16 +211,16 @@
}
ethernetLinks = append(ethernetLinks, link)
} else {
- s.logger.Info("Ignoring non-Ethernet interface", zap.String("interface", attrs.Name))
+ s.logger.Infof("Ignoring non-Ethernet interface %s", attrs.Name)
}
} else if link.Attrs().Name == "lo" {
if err := netlink.LinkSetUp(link); err != nil {
- s.logger.Error("Failed to take up loopback interface", zap.Error(err))
+ s.logger.Errorf("Failed to bring up loopback interface: %v", err)
}
}
}
if len(ethernetLinks) != 1 {
- s.logger.Warn("Network service needs exactly one link, bailing")
+ s.logger.Warningf("Network service needs exactly one link, bailing")
} else {
link := ethernetLinks[0]
if err := s.useInterface(ctx, link); err != nil {