treewide: introduce osbase package and move things around
All except localregistry moved from metropolis/pkg to osbase,
localregistry moved to metropolis/test as its only used there anyway.
Change-Id: If1a4bf377364bef0ac23169e1b90379c71b06d72
Reviewed-on: https://review.monogon.dev/c/monogon/+/3079
Tested-by: Jenkins CI
Reviewed-by: Serge Bazanski <serge@monogon.tech>
diff --git a/osbase/event/BUILD.bazel b/osbase/event/BUILD.bazel
new file mode 100644
index 0000000..a8a2a0c
--- /dev/null
+++ b/osbase/event/BUILD.bazel
@@ -0,0 +1,9 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+go_library(
+ name = "event",
+ srcs = ["event.go"],
+ importpath = "source.monogon.dev/osbase/event",
+ visibility = ["//visibility:public"],
+ deps = ["//osbase/supervisor"],
+)
diff --git a/osbase/event/etcd/BUILD.bazel b/osbase/event/etcd/BUILD.bazel
new file mode 100644
index 0000000..925eeaa
--- /dev/null
+++ b/osbase/event/etcd/BUILD.bazel
@@ -0,0 +1,30 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+go_library(
+ name = "etcd",
+ srcs = ["etcd.go"],
+ importpath = "source.monogon.dev/osbase/event/etcd",
+ visibility = ["//visibility:public"],
+ deps = [
+ "//osbase/event",
+ "@com_github_cenkalti_backoff_v4//:backoff",
+ "@io_etcd_go_etcd_client_v3//:client",
+ ],
+)
+
+go_test(
+ name = "etcd_test",
+ srcs = ["etcd_test.go"],
+ embed = [":etcd"],
+ deps = [
+ "//osbase/event",
+ "//osbase/logtree",
+ "@io_etcd_go_etcd_api_v3//v3rpc/rpctypes",
+ "@io_etcd_go_etcd_client_pkg_v3//testutil",
+ "@io_etcd_go_etcd_client_v3//:client",
+ "@io_etcd_go_etcd_tests_v3//integration",
+ "@org_golang_google_grpc//codes",
+ "@org_golang_google_grpc//grpclog",
+ "@org_uber_go_zap//:zap",
+ ],
+)
diff --git a/osbase/event/etcd/etcd.go b/osbase/event/etcd/etcd.go
new file mode 100644
index 0000000..8cf2440
--- /dev/null
+++ b/osbase/event/etcd/etcd.go
@@ -0,0 +1,444 @@
+package etcd
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+
+ "github.com/cenkalti/backoff/v4"
+ clientv3 "go.etcd.io/etcd/client/v3"
+
+ "source.monogon.dev/osbase/event"
+)
+
+var (
+ // Type assert that *Value implements event.ValueWatcher. We do this
+ // artificially, as there currently is no code path that needs this to be
+ // strictly true. However, users of this library might want to rely on the
+ // Value type instead of particular Value implementations.
+ _ event.ValueWatch[StringAt] = &Value[StringAt]{}
+)
+
+// ThinClient is a small wrapper interface to combine
+// clientv3.KV and clientv3.Watcher.
+type ThinClient interface {
+ clientv3.KV
+ clientv3.Watcher
+}
+
+// Value is an 'Event Value' backed in an etcd cluster, accessed over an
+// etcd client. This is a stateless handle and can be copied and shared across
+// goroutines.
+type Value[T any] struct {
+ decoder func(key, value []byte) (T, error)
+ etcd ThinClient
+ key string
+ keyEnd string
+}
+
+type Option struct {
+ rangeEnd string
+}
+
+// Range creates a Value that is backed a range of etcd key/value pairs from
+// 'key' passed to NewValue to 'end' passed to Range.
+//
+// The key range semantics (ie. lexicographic ordering) are the same as in etcd
+// ranges, so for example to retrieve all keys prefixed by `foo/` key should be
+// `foo/` and end should be `foo0`.
+//
+// For any update in the given range, the decoder will be called and its result
+// will trigger the return of a Get() call. The decoder should return a type
+// that lets the user distinguish which of the multiple objects in the range got
+// updated, as the Get() call returns no additional information about the
+// location of the retrieved object by itself.
+//
+// The order of values retrieved by Get() is currently fully arbitrary and must
+// not be relied on. It's possible that in the future the order of updates and
+// the blocking behaviour of Get will be formalized, but this is not yet the
+// case. Instead, the data returned should be treated as eventually consistent
+// with the etcd state.
+//
+// For some uses, it might be necessary to first retrieve all the objects
+// contained within the range before starting to block on updates - in this
+// case, the BacklogOnly option should be used when calling Get.
+func Range(end string) *Option {
+ return &Option{
+ rangeEnd: end,
+ }
+}
+
+// NewValue creates a new Value for a given key(s) in an etcd client. The
+// given decoder will be used to convert bytes retrieved from etcd into the
+// interface{} value retrieved by Get by this value's watcher.
+func NewValue[T any](etcd ThinClient, key string, decoder func(key, value []byte) (T, error), options ...*Option) *Value[T] {
+ res := &Value[T]{
+ decoder: decoder,
+ etcd: etcd,
+ key: key,
+ keyEnd: key,
+ }
+
+ for _, opt := range options {
+ if end := opt.rangeEnd; end != "" {
+ res.keyEnd = end
+ }
+ }
+
+ return res
+}
+
+func DecoderNoop(_, value []byte) ([]byte, error) {
+ return value, nil
+}
+
+func DecoderStringAt(key, value []byte) (StringAt, error) {
+ return StringAt{
+ Key: string(key),
+ Value: string(value),
+ }, nil
+}
+
+type StringAt struct {
+ Key string
+ Value string
+}
+
+func (e *Value[T]) Watch() event.Watcher[T] {
+ ctx, ctxC := context.WithCancel(context.Background())
+ return &watcher[T]{
+ Value: *e,
+
+ ctx: ctx,
+ ctxC: ctxC,
+
+ current: make(map[string][]byte),
+
+ getSem: make(chan struct{}, 1),
+ }
+}
+
+type watcher[T any] struct {
+ // Value copy, used to configure the behaviour of this watcher.
+ Value[T]
+
+ // ctx is the context that expresses the liveness of this watcher. It is
+ // canceled when the watcher is closed, and the etcd Watch hangs off of it.
+ ctx context.Context
+ ctxC context.CancelFunc
+
+ // getSem is a semaphore used to limit concurrent Get calls and throw an
+ // error if concurrent access is attempted.
+ getSem chan struct{}
+
+ // backlogged is a list of keys retrieved from etcd but not yet returned via
+ // Get. These items are not a replay of all the updates from etcd, but are
+ // already compacted to deduplicate updates to the same object (ie., if the
+ // update stream from etcd is for keys A, B, and A, the backlogged list will
+ // only contain one update for A and B each, with the first update for A being
+ // discarded upon arrival of the second update).
+ //
+ // The keys are an index into the current map, which contains the values
+ // retrieved, including ones that have already been returned via Get. This
+ // persistence allows us to deduplicate spurious updates to the user, in which
+ // etcd returned a new revision of a key, but the data stayed the same.
+ backlogged [][]byte
+ // current map, keyed from etcd key into etcd value at said key. This map
+ // persists alongside an etcd connection, permitting deduplication of spurious
+ // etcd updates even across multiple Get calls.
+ current map[string][]byte
+
+ // prev is the etcd store revision of a previously completed etcd Get/Watch
+ // call, used to resume a Watch call in case of failures.
+ prev *int64
+ // wc is the etcd watch channel, or nil if no channel is yet open.
+ wc clientv3.WatchChan
+
+ // testRaceWG is an optional WaitGroup that, if set, will be waited upon
+ // after the initial KV value retrieval, but before the watch is created.
+ // This is only used for testing.
+ testRaceWG *sync.WaitGroup
+ // testSetupWG is an optional WaitGroup that, if set, will be waited upon
+ // after the etcd watch is created.
+ // This is only used for testing.
+ testSetupWG *sync.WaitGroup
+}
+
+// setup initiates wc (the watch channel from etcd) after retrieving the initial
+// value(s) with a get operation.
+func (w *watcher[T]) setup(ctx context.Context) error {
+ if w.wc != nil {
+ return nil
+ }
+ ranged := w.key != w.keyEnd
+
+ // First, check if some data under this key/range already exists.
+
+ // We use an exponential backoff and retry here as the initial Get can fail
+ // if the cluster is unstable (eg. failing over). We only fail the retry if
+ // the context expires.
+ bo := backoff.NewExponentialBackOff()
+ bo.MaxElapsedTime = 0
+
+ err := backoff.Retry(func() error {
+
+ var getOpts []clientv3.OpOption
+ if ranged {
+ getOpts = append(getOpts, clientv3.WithRange(w.keyEnd))
+ }
+ get, err := w.etcd.Get(ctx, w.key, getOpts...)
+ if err != nil {
+ return fmt.Errorf("when retrieving initial value: %w", err)
+ }
+
+ // Assert that the etcd API is behaving as expected.
+ if !ranged && len(get.Kvs) > 1 {
+ panic("More than one key returned in unary GET response")
+ }
+
+ // After a successful Get, save the revision to watch from and re-build the
+ // backlog from scratch based on what was available in the etcd store at that
+ // time.
+ w.prev = &get.Header.Revision
+
+ w.backlogged = nil
+ w.current = make(map[string][]byte)
+ for _, kv := range get.Kvs {
+ w.backlogged = append(w.backlogged, kv.Key)
+ w.current[string(kv.Key)] = kv.Value
+ }
+ return nil
+
+ }, backoff.WithContext(bo, ctx))
+
+ if w.testRaceWG != nil {
+ w.testRaceWG.Wait()
+ }
+ if err != nil {
+ return err
+ }
+
+ watchOpts := []clientv3.OpOption{
+ clientv3.WithRev(*w.prev + 1),
+ }
+ if ranged {
+ watchOpts = append(watchOpts, clientv3.WithRange(w.keyEnd))
+ }
+ w.wc = w.etcd.Watch(w.ctx, w.key, watchOpts...)
+
+ if w.testSetupWG != nil {
+ w.testSetupWG.Wait()
+ }
+ return nil
+}
+
+// backfill blocks until a backlog of items is available. An error is returned
+// if the context is canceled.
+func (w *watcher[T]) backfill(ctx context.Context) error {
+ // Keep watching for watch events.
+ for {
+ var resp *clientv3.WatchResponse
+ select {
+ case r := <-w.wc:
+ resp = &r
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+
+ if resp.Canceled {
+ // Only allow for watches to be canceled due to context
+ // cancellations. Any other error is something we need to handle,
+ // eg. a client close or compaction error.
+ if errors.Is(resp.Err(), ctx.Err()) {
+ return fmt.Errorf("watch canceled: %w", resp.Err())
+ }
+
+ // Attempt to reconnect.
+ if w.wc != nil {
+ // If a wc already exists, close it. This forces a reconnection
+ // by the next setup call.
+ w.ctxC()
+ w.ctx, w.ctxC = context.WithCancel(context.Background())
+ w.wc = nil
+ }
+ if err := w.setup(ctx); err != nil {
+ return fmt.Errorf("failed to setup watcher: %w", err)
+ }
+ continue
+ }
+
+ w.prev = &resp.Header.Revision
+ // Spurious watch event with no update? Keep trying.
+ if len(resp.Events) == 0 {
+ continue
+ }
+
+ // Process updates into compacted list, transforming deletions into value: nil
+ // keyValues. This maps an etcd key into a pointer in the already existing
+ // backlog list. It will then be used to compact all updates into the smallest
+ // backlog possible (by overriding previously backlogged items for a key if this
+ // key is encountered again).
+ //
+ // TODO(q3k): this could be stored in the watcher state to not waste time on
+ // each update, but it's good enough for now.
+
+ // Prepare a set of keys that already exist in the backlog. This will be used
+ // to make sure we don't duplicate backlog entries while maintaining a stable
+ // backlog order.
+ seen := make(map[string]bool)
+ for _, k := range w.backlogged {
+ seen[string(k)] = true
+ }
+
+ for _, ev := range resp.Events {
+ var value []byte
+ switch ev.Type {
+ case clientv3.EventTypeDelete:
+ case clientv3.EventTypePut:
+ value = ev.Kv.Value
+ default:
+ return fmt.Errorf("invalid event type %v", ev.Type)
+ }
+
+ keyS := string(ev.Kv.Key)
+ prev := w.current[keyS]
+ // Short-circuit and skip updates with the same content as already present.
+ // These are sometimes emitted by etcd.
+ if bytes.Equal(prev, value) {
+ continue
+ }
+
+ // Only insert to backlog if not yet present, but maintain order.
+ if !seen[string(ev.Kv.Key)] {
+ w.backlogged = append(w.backlogged, ev.Kv.Key)
+ seen[string(ev.Kv.Key)] = true
+ }
+ // Regardless of backlog list, always update the key to its newest value.
+ w.current[keyS] = value
+ }
+
+ // Still nothing in backlog? Keep trying.
+ if len(w.backlogged) == 0 {
+ continue
+ }
+
+ return nil
+ }
+}
+
+type GetOption struct {
+ backlogOnly bool
+}
+
+// Get implements the Get method of the Watcher interface.
+// It can return an error in three cases:
+// - the given context is canceled (in which case, the given error will wrap
+// the context error)
+// - the watcher's BytesDecoder returned an error (in which case the error
+// returned by the BytesDecoder will be returned verbatim)
+// - it has been called with BacklogOnly and the Watcher has no more local
+// event data to return (see BacklogOnly for more information on the
+// semantics of this mode of operation)
+//
+// Note that transient and permanent etcd errors are never returned, and the
+// Get call will attempt to recover from these errors as much as possible. This
+// also means that the user of the Watcher will not be notified if the
+// underlying etcd client disconnects from the cluster, or if the cluster loses
+// quorum.
+//
+// TODO(q3k): implement leases to allow clients to be notified when there are
+// transient cluster/quorum/partition errors, if needed.
+//
+// TODO(q3k): implement internal, limited buffering for backlogged data not yet
+// consumed by client, as etcd client library seems to use an unbound buffer in
+// case this happens ( see: watcherStream.buf in clientv3).
+func (w *watcher[T]) Get(ctx context.Context, opts ...event.GetOption[T]) (T, error) {
+ var empty T
+ select {
+ case w.getSem <- struct{}{}:
+ default:
+ return empty, fmt.Errorf("cannot Get() concurrently on a single waiter")
+ }
+ defer func() {
+ <-w.getSem
+ }()
+
+ backlogOnly := false
+ var predicate func(t T) bool
+ for _, opt := range opts {
+ if opt.Predicate != nil {
+ predicate = opt.Predicate
+ }
+ if opt.BacklogOnly {
+ backlogOnly = true
+ }
+ }
+
+ ranged := w.key != w.keyEnd
+ if ranged && predicate != nil {
+ return empty, errors.New("filtering unimplemented for ranged etcd values")
+ }
+ if backlogOnly && predicate != nil {
+ return empty, errors.New("filtering unimplemented for backlog-only requests")
+ }
+
+ for {
+ v, err := w.getUnlocked(ctx, ranged, backlogOnly)
+ if err != nil {
+ return empty, err
+ }
+ if predicate == nil || predicate(v) {
+ return v, nil
+ }
+ }
+}
+
+func (w *watcher[T]) getUnlocked(ctx context.Context, ranged, backlogOnly bool) (T, error) {
+ var empty T
+ // Early check for context cancelations, preventing spurious contact with etcd
+ // if there's no need to.
+ if w.ctx.Err() != nil {
+ return empty, w.ctx.Err()
+ }
+
+ if err := w.setup(ctx); err != nil {
+ return empty, fmt.Errorf("when setting up watcher: %w", err)
+ }
+
+ if backlogOnly && len(w.backlogged) == 0 {
+ return empty, event.ErrBacklogDone
+ }
+
+ // Update backlog from etcd if needed.
+ if len(w.backlogged) < 1 {
+ err := w.backfill(ctx)
+ if err != nil {
+ return empty, fmt.Errorf("when watching for new value: %w", err)
+ }
+ }
+ // Backlog is now guaranteed to contain at least one element.
+
+ if !ranged {
+ // For non-ranged queries, drain backlog fully.
+ if len(w.backlogged) != 1 {
+ panic(fmt.Sprintf("multiple keys in nonranged value: %v", w.backlogged))
+ }
+ k := w.backlogged[0]
+ v := w.current[string(k)]
+ w.backlogged = nil
+ return w.decoder(k, v)
+ } else {
+ // For ranged queries, pop one ranged query off the backlog.
+ k := w.backlogged[0]
+ v := w.current[string(k)]
+ w.backlogged = w.backlogged[1:]
+ return w.decoder(k, v)
+ }
+}
+
+func (w *watcher[T]) Close() error {
+ w.ctxC()
+ return nil
+}
diff --git a/osbase/event/etcd/etcd_test.go b/osbase/event/etcd/etcd_test.go
new file mode 100644
index 0000000..ee3672f
--- /dev/null
+++ b/osbase/event/etcd/etcd_test.go
@@ -0,0 +1,865 @@
+package etcd
+
+import (
+ "context"
+ "errors"
+ "flag"
+ "fmt"
+ "log"
+ "os"
+ "strconv"
+ "sync"
+ "testing"
+ "time"
+
+ "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+ "go.etcd.io/etcd/client/pkg/v3/testutil"
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/tests/v3/integration"
+ "go.uber.org/zap"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/grpclog"
+
+ "source.monogon.dev/osbase/event"
+ "source.monogon.dev/osbase/logtree"
+)
+
+var (
+ cluster *integration.ClusterV3
+ endpoints []string
+)
+
+// TestMain brings up a 3 node etcd cluster for tests to use.
+func TestMain(m *testing.M) {
+ // This logtree's data is not output anywhere.
+ lt := logtree.New()
+
+ cfg := integration.ClusterConfig{
+ Size: 3,
+ GRPCKeepAliveMinTime: time.Millisecond,
+ LoggerBuilder: func(memberName string) *zap.Logger {
+ dn := logtree.DN("etcd." + memberName)
+ return logtree.Zapify(lt.MustLeveledFor(dn), zap.WarnLevel)
+ },
+ }
+ tb, cancel := testutil.NewTestingTBProthesis("curator")
+ defer cancel()
+ flag.Parse()
+ integration.BeforeTestExternal(tb)
+ grpclog.SetLoggerV2(logtree.GRPCify(lt.MustLeveledFor("grpc")))
+ cluster = integration.NewClusterV3(tb, &cfg)
+ endpoints = make([]string, 3)
+ for i := range endpoints {
+ endpoints[i] = cluster.Client(i).Endpoints()[0]
+ }
+
+ v := m.Run()
+ cluster.Terminate(tb)
+ os.Exit(v)
+}
+
+// setRaceWg creates a new WaitGroup and sets the given watcher to wait on this
+// WG after it performs the initial retrieval of a value from etcd, but before
+// it starts the watcher. This is used to test potential race conditions
+// present between these two steps.
+func setRaceWg[T any](w event.Watcher[T]) *sync.WaitGroup {
+ var wg sync.WaitGroup
+ w.(*watcher[T]).testRaceWG = &wg
+ return &wg
+}
+
+// setSetupWg creates a new WaitGroup and sets the given watcher to wait on
+// thie WG after an etcd watch channel is created. This is used in tests to
+// ensure that the watcher is fully created before it is tested.
+func setSetupWg[T any](w event.Watcher[T]) *sync.WaitGroup {
+ var wg sync.WaitGroup
+ w.(*watcher[T]).testSetupWG = &wg
+ return &wg
+}
+
+// testClient is an etcd connection to the test cluster.
+type testClient struct {
+ client *clientv3.Client
+}
+
+func newTestClient(t *testing.T) *testClient {
+ t.Helper()
+ cli, err := clientv3.New(clientv3.Config{
+ Endpoints: endpoints,
+ DialTimeout: 1 * time.Second,
+ DialKeepAliveTime: 1 * time.Second,
+ DialKeepAliveTimeout: 1 * time.Second,
+ })
+ if err != nil {
+ t.Fatalf("clientv3.New: %v", err)
+ }
+
+ return &testClient{
+ client: cli,
+ }
+}
+
+func (d *testClient) close() {
+ d.client.Close()
+}
+
+// setEndpoints configures which endpoints (from {0,1,2}) the testClient is
+// connected to.
+func (d *testClient) setEndpoints(nums ...uint) {
+ var eps []string
+ for _, num := range nums {
+ eps = append(eps, endpoints[num])
+ }
+ d.client.SetEndpoints(eps...)
+}
+
+// put uses the testClient to store key with a given string value in etcd. It
+// contains retry logic that will block until the put is successful.
+func (d *testClient) put(t *testing.T, key, value string) {
+ t.Helper()
+ ctx, ctxC := context.WithCancel(context.Background())
+ defer ctxC()
+
+ for {
+ ctxT, ctxC := context.WithTimeout(ctx, 100*time.Millisecond)
+ _, err := d.client.Put(ctxT, key, value)
+ ctxC()
+ if err == nil {
+ return
+ }
+ if errors.Is(err, ctxT.Err()) {
+ log.Printf("Retrying after %v", err)
+ continue
+ }
+ // Retry on etcd unavailability - this will happen in this code as the
+ // etcd cluster repeatedly loses quorum.
+ var eerr rpctypes.EtcdError
+ if errors.As(err, &eerr) && eerr.Code() == codes.Unavailable {
+ log.Printf("Retrying after %v", err)
+ continue
+ }
+ t.Fatalf("Put: %v", err)
+ }
+
+}
+
+// remove uses the testClient to remove the given key from etcd. It contains
+// retry logic that will block until the removal is successful.
+func (d *testClient) remove(t *testing.T, key string) {
+ t.Helper()
+ ctx, ctxC := context.WithCancel(context.Background())
+ defer ctxC()
+
+ _, err := d.client.Delete(ctx, key)
+ if err == nil {
+ return
+ }
+ t.Fatalf("Delete: %v", err)
+}
+
+// expect runs a Get on the given Watcher, ensuring the returned value is a
+// given string.
+func expect(t *testing.T, w event.Watcher[StringAt], value string) {
+ t.Helper()
+ ctx, ctxC := context.WithCancel(context.Background())
+ defer ctxC()
+
+ got, err := w.Get(ctx)
+ if err != nil {
+ t.Fatalf("Get: %v", err)
+ }
+
+ if got, want := got.Value, value; got != want {
+ t.Errorf("Wanted value %q, got %q", want, got)
+ }
+}
+
+// expectTimeout ensures that the given watcher blocks on a Get call for at
+// least 100 milliseconds. This is used by tests to attempt to verify that the
+// watcher Get is fully blocked, but can cause false positives (eg. when Get
+// blocks for 101 milliseconds). Thus, this function should be used sparingly
+// and in tests that perform other baseline behaviour checks alongside this
+// test.
+func expectTimeout[T any](t *testing.T, w event.Watcher[T]) {
+ t.Helper()
+ ctx, ctxC := context.WithTimeout(context.Background(), 100*time.Millisecond)
+ got, err := w.Get(ctx)
+ ctxC()
+
+ if !errors.Is(err, ctx.Err()) {
+ t.Fatalf("Expected timeout error, got %v, %v", got, err)
+ }
+}
+
+// wait wraps a watcher into a channel of strings, ensuring that the watcher
+// never errors on Get calls and always returns strings.
+func wait(t *testing.T, w event.Watcher[StringAt]) (chan string, func()) {
+ t.Helper()
+ ctx, ctxC := context.WithCancel(context.Background())
+
+ c := make(chan string)
+
+ go func() {
+ for {
+ got, err := w.Get(ctx)
+ if err != nil && errors.Is(err, ctx.Err()) {
+ return
+ }
+ if err != nil {
+ t.Errorf("Get: %v", err)
+ close(c)
+ return
+ }
+ c <- got.Value
+ }
+ }()
+
+ return c, ctxC
+}
+
+// TestSimple exercises the simplest possible interaction with a watched value.
+func TestSimple(t *testing.T) {
+ tc := newTestClient(t)
+ defer tc.close()
+
+ k := "test-simple"
+ value := NewValue(tc.client, k, DecoderStringAt)
+ tc.put(t, k, "one")
+
+ watcher := value.Watch()
+ defer watcher.Close()
+ expect(t, watcher, "one")
+
+ tc.put(t, k, "two")
+ expect(t, watcher, "two")
+
+ tc.put(t, k, "three")
+ tc.put(t, k, "four")
+ tc.put(t, k, "five")
+ tc.put(t, k, "six")
+
+ q, cancel := wait(t, watcher)
+ // Test will hang here if the above value does not receive the set "six".
+ log.Printf("a")
+ for el := range q {
+ log.Printf("%q", el)
+ if el == "six" {
+ break
+ }
+ }
+ log.Printf("b")
+ cancel()
+}
+
+// stringAtGet performs a Get from a Watcher, expecting a stringAt and updating
+// the given map with the retrieved value.
+func stringAtGet(ctx context.Context, t *testing.T, w event.Watcher[StringAt], m map[string]string) {
+ t.Helper()
+
+ vr, err := w.Get(ctx)
+ if err != nil {
+ t.Fatalf("Get: %v", err)
+ }
+ m[vr.Key] = vr.Value
+}
+
+// TestSimpleRange exercises the simplest behaviour of a ranged watcher,
+// retrieving updaates via Get in a fully blocking fashion.
+func TestSimpleRange(t *testing.T) {
+ tc := newTestClient(t)
+ defer tc.close()
+
+ ks := "test-simple-range/"
+ ke := "test-simple-range0"
+ value := NewValue(tc.client, ks, DecoderStringAt, Range(ke))
+ tc.put(t, ks+"a", "one")
+ tc.put(t, ks+"b", "two")
+ tc.put(t, ks+"c", "three")
+ tc.put(t, ks+"b", "four")
+
+ w := value.Watch()
+ defer w.Close()
+
+ ctx, ctxC := context.WithCancel(context.Background())
+ defer ctxC()
+
+ res := make(map[string]string)
+ stringAtGet(ctx, t, w, res)
+ stringAtGet(ctx, t, w, res)
+ stringAtGet(ctx, t, w, res)
+
+ tc.put(t, ks+"a", "five")
+ tc.put(t, ks+"e", "six")
+
+ stringAtGet(ctx, t, w, res)
+ stringAtGet(ctx, t, w, res)
+
+ for _, te := range []struct {
+ k, w string
+ }{
+ {ks + "a", "five"},
+ {ks + "b", "four"},
+ {ks + "c", "three"},
+ {ks + "e", "six"},
+ } {
+ if want, got := te.w, res[te.k]; want != got {
+ t.Errorf("res[%q]: wanted %q, got %q", te.k, want, got)
+ }
+ }
+}
+
+// TestCancel ensures that watchers can resume after being canceled.
+func TestCancel(t *testing.T) {
+ tc := newTestClient(t)
+ defer tc.close()
+
+ k := "test-cancel"
+ value := NewValue(tc.client, k, DecoderStringAt)
+ tc.put(t, k, "one")
+
+ watcher := value.Watch()
+ defer watcher.Close()
+ expect(t, watcher, "one")
+
+ ctx, ctxC := context.WithCancel(context.Background())
+ errs := make(chan error, 1)
+ go func() {
+ _, err := watcher.Get(ctx)
+ errs <- err
+ }()
+ ctxC()
+ if want, got := ctx.Err(), <-errs; !errors.Is(got, want) {
+ t.Fatalf("Wanted err %v, got %v", want, got)
+ }
+
+ // Successfully canceled watch, resuming should continue to work.
+ q, cancel := wait(t, watcher)
+ defer cancel()
+
+ tc.put(t, k, "two")
+ if want, got := "two", <-q; want != got {
+ t.Fatalf("Wanted val %q, got %q", want, got)
+ }
+}
+
+// TestCancelOnGet ensures that a context cancellation on an initial Get (which
+// translates to an etcd Get in a backoff loop) doesn't block.
+func TestCancelOnGet(t *testing.T) {
+ tc := newTestClient(t)
+ defer tc.close()
+
+ k := "test-cancel-on-get"
+ value := NewValue(tc.client, k, DecoderStringAt)
+ watcher := value.Watch()
+ tc.put(t, k, "one")
+
+ // Cause partition between client endpoint and rest of cluster. Any read/write
+ // operations will now hang.
+ tc.setEndpoints(0)
+ cluster.Members[0].InjectPartition(t, cluster.Members[1], cluster.Members[2])
+ // Let raft timeouts expire so that the leader is aware a partition has occurred
+ // and stops serving data if it is not part of a quorum anymore.
+ //
+ // Otherwise, if Member[0] was the leader, there will be a window of opportunity
+ // during which it will continue to serve read data even though it has been
+ // partitioned off. This is an effect of how etcd handles linearizable reads:
+ // they go through the leader, but do not go through raft.
+ //
+ // The value is the default etcd leader timeout (1s) + some wiggle room.
+ time.Sleep(time.Second + time.Millisecond*100)
+
+ // Perform the initial Get(), which should attempt to retrieve a KV entry from
+ // the etcd service. This should hang. Unfortunately, there's no easy way to do
+ // this without an arbitrary sleep hoping that the client actually gets to the
+ // underlying etcd.Get call. This can cause false positives (eg. false 'pass'
+ // results) in this test.
+ ctx, ctxC := context.WithCancel(context.Background())
+ errs := make(chan error, 1)
+ go func() {
+ _, err := watcher.Get(ctx)
+ errs <- err
+ }()
+ time.Sleep(time.Second)
+
+ // Now that the etcd.Get is hanging, cancel the context.
+ ctxC()
+ // And now unpartition the cluster, resuming reads.
+ cluster.Members[0].RecoverPartition(t, cluster.Members[1], cluster.Members[2])
+
+ // The etcd.Get() call should've returned with a context cancellation.
+ err := <-errs
+ switch {
+ case err == nil:
+ t.Errorf("watcher.Get() returned no error, wanted context error")
+ case errors.Is(err, ctx.Err()):
+ // Okay.
+ default:
+ t.Errorf("watcher.Get() returned %v, wanted context error", err)
+ }
+}
+
+// TestClientReconnect forces a 'reconnection' of an active watcher from a
+// running member to another member, by stopping the original member and
+// explicitly reconnecting the client to other available members.
+//
+// This doe not reflect a situation expected during Metropolis runtime, as we
+// do not expect splits between an etcd client and its connected member
+// (instead, all etcd clients only connect to their local member). However, it
+// is still an important safety test to perform, and it also exercies the
+// equivalent behaviour of an etcd client re-connecting for any other reason.
+func TestClientReconnect(t *testing.T) {
+ tc := newTestClient(t)
+ defer tc.close()
+ tc.setEndpoints(0)
+
+ k := "test-client-reconnect"
+ value := NewValue(tc.client, k, DecoderStringAt)
+ tc.put(t, k, "one")
+
+ watcher := value.Watch()
+ defer watcher.Close()
+ expect(t, watcher, "one")
+
+ q, cancel := wait(t, watcher)
+ defer cancel()
+
+ cluster.Members[0].Stop(t)
+ defer cluster.Members[0].Restart(t)
+ cluster.WaitLeader(t)
+
+ tc.setEndpoints(1, 2)
+ tc.put(t, k, "two")
+
+ if want, got := "two", <-q; want != got {
+ t.Fatalf("Watcher received incorrect data after client restart, wanted %q, got %q", want, got)
+ }
+}
+
+// TestClientPartition forces a temporary partition of the etcd member while a
+// watcher is running, updates the value from across the partition, and undoes
+// the partition.
+// The partition is expected to be entirely transparent to the watcher.
+func TestClientPartition(t *testing.T) {
+ tcOne := newTestClient(t)
+ defer tcOne.close()
+ tcOne.setEndpoints(0)
+
+ tcRest := newTestClient(t)
+ defer tcRest.close()
+ tcRest.setEndpoints(1, 2)
+
+ k := "test-client-partition"
+ valueOne := NewValue(tcOne.client, k, DecoderStringAt)
+ watcherOne := valueOne.Watch()
+ defer watcherOne.Close()
+ valueRest := NewValue(tcRest.client, k, DecoderStringAt)
+ watcherRest := valueRest.Watch()
+ defer watcherRest.Close()
+
+ tcRest.put(t, k, "a")
+ expect(t, watcherOne, "a")
+ expect(t, watcherRest, "a")
+
+ cluster.Members[0].InjectPartition(t, cluster.Members[1], cluster.Members[2])
+
+ tcRest.put(t, k, "b")
+ expect(t, watcherRest, "b")
+ expectTimeout(t, watcherOne)
+
+ cluster.Members[0].RecoverPartition(t, cluster.Members[1], cluster.Members[2])
+
+ expect(t, watcherOne, "b")
+ tcRest.put(t, k, "c")
+ expect(t, watcherOne, "c")
+ expect(t, watcherRest, "c")
+
+}
+
+// TestEarlyUse exercises the correct behaviour of the value watcher on a value
+// that is not yet set.
+func TestEarlyUse(t *testing.T) {
+ tc := newTestClient(t)
+ defer tc.close()
+
+ k := "test-early-use"
+
+ value := NewValue(tc.client, k, DecoderStringAt)
+ watcher := value.Watch()
+ defer watcher.Close()
+
+ wg := setSetupWg(watcher)
+ wg.Add(1)
+ q, cancel := wait(t, watcher)
+ defer cancel()
+
+ wg.Done()
+
+ tc.put(t, k, "one")
+
+ if want, got := "one", <-q; want != got {
+ t.Fatalf("Expected %q, got %q", want, got)
+ }
+}
+
+// TestRemove exercises the basic functionality of handling deleted values.
+func TestRemove(t *testing.T) {
+ tc := newTestClient(t)
+ defer tc.close()
+
+ k := "test-remove"
+ tc.put(t, k, "one")
+
+ value := NewValue(tc.client, k, DecoderStringAt)
+ watcher := value.Watch()
+ defer watcher.Close()
+
+ expect(t, watcher, "one")
+ tc.remove(t, k)
+ expect(t, watcher, "")
+}
+
+// TestRemoveRange exercises the behaviour of a Get on a ranged watcher when a
+// value is removed.
+func TestRemoveRange(t *testing.T) {
+ tc := newTestClient(t)
+ defer tc.close()
+
+ ks := "test-remove-range/"
+ ke := "test-remove-range0"
+ value := NewValue(tc.client, ks, DecoderStringAt, Range(ke))
+ tc.put(t, ks+"a", "one")
+ tc.put(t, ks+"b", "two")
+ tc.put(t, ks+"c", "three")
+ tc.put(t, ks+"b", "four")
+ tc.remove(t, ks+"c")
+
+ w := value.Watch()
+ defer w.Close()
+
+ ctx, ctxC := context.WithCancel(context.Background())
+ defer ctxC()
+
+ res := make(map[string]string)
+ stringAtGet(ctx, t, w, res)
+ stringAtGet(ctx, t, w, res)
+
+ for _, te := range []struct {
+ k, w string
+ }{
+ {ks + "a", "one"},
+ {ks + "b", "four"},
+ {ks + "c", ""},
+ } {
+ if want, got := te.w, res[te.k]; want != got {
+ t.Errorf("res[%q]: wanted %q, got %q", te.k, want, got)
+ }
+ }
+}
+
+// TestEmptyRace forces the watcher to retrieve an empty value from the K/V
+// store at first, and establishing the watch channel after a new value has
+// been stored in the same place.
+func TestEmptyRace(t *testing.T) {
+ tc := newTestClient(t)
+ defer tc.close()
+
+ k := "test-remove-race"
+ tc.put(t, k, "one")
+ tc.remove(t, k)
+
+ value := NewValue(tc.client, k, DecoderStringAt)
+ watcher := value.Watch()
+ defer watcher.Close()
+
+ wg := setRaceWg(watcher)
+ wg.Add(1)
+ q, cancel := wait(t, watcher)
+ defer cancel()
+
+ tc.put(t, k, "two")
+ wg.Done()
+
+ if want, got := "two", <-q; want != got {
+ t.Fatalf("Watcher received incorrect data after client restart, wanted %q, got %q", want, got)
+ }
+}
+
+type errOrInt struct {
+ val int64
+ err error
+}
+
+// TestDecoder exercises the BytesDecoder functionality of the watcher, by
+// creating a value with a decoder that only accepts string-encoded integers
+// that are divisible by three. The test then proceeds to put a handful of
+// values into etcd, ensuring that undecodable values correctly return an error
+// on Get, but that the watcher continues to work after the error has been
+// returned.
+func TestDecoder(t *testing.T) {
+ decoderDivisibleByThree := func(_, value []byte) (int64, error) {
+ num, err := strconv.ParseInt(string(value), 10, 64)
+ if err != nil {
+ return 0, fmt.Errorf("not a valid number")
+ }
+ if (num % 3) != 0 {
+ return 0, fmt.Errorf("not divisible by 3")
+ }
+ return num, nil
+ }
+
+ tc := newTestClient(t)
+ defer tc.close()
+
+ ctx, ctxC := context.WithCancel(context.Background())
+ defer ctxC()
+
+ k := "test-decoder"
+ value := NewValue(tc.client, k, decoderDivisibleByThree)
+ watcher := value.Watch()
+ defer watcher.Close()
+ tc.put(t, k, "3")
+ _, err := watcher.Get(ctx)
+ if err != nil {
+ t.Fatalf("Initial Get: %v", err)
+ }
+
+ // Stream updates into arbitrarily-bounded test channel.
+ queue := make(chan errOrInt, 100)
+ go func() {
+ for {
+ res, err := watcher.Get(ctx)
+ if err != nil && errors.Is(err, ctx.Err()) {
+ return
+ }
+ if err != nil {
+ queue <- errOrInt{
+ err: err,
+ }
+ } else {
+ queue <- errOrInt{
+ val: res,
+ }
+ }
+ }
+ }()
+
+ var wantList []*int64
+ wantError := func(val string) {
+ wantList = append(wantList, nil)
+ tc.put(t, k, val)
+ }
+ wantValue := func(val string, decoded int64) {
+ wantList = append(wantList, &decoded)
+ tc.put(t, k, val)
+ }
+
+ wantError("")
+ wantValue("9", 9)
+ wantError("foo")
+ wantValue("18", 18)
+ wantError("10")
+ wantValue("27", 27)
+ wantValue("36", 36)
+
+ for i, want := range wantList {
+ q := <-queue
+ if want == nil && q.err == nil {
+ t.Fatalf("Case %d: wanted error, got no error and value %d", i, q.val)
+ }
+ if want != nil && (*want) != q.val {
+ t.Fatalf("Case %d: wanted value %d, got error %v and value %d", i, *want, q.err, q.val)
+ }
+ }
+}
+
+// TestBacklog ensures that the watcher can handle a large backlog of changes
+// in etcd that the client didnt' keep up with, and that whatever final state
+// is available to the client when it actually gets to calling Get().
+func TestBacklog(t *testing.T) {
+ tc := newTestClient(t)
+ defer tc.close()
+
+ k := "test-backlog"
+ value := NewValue(tc.client, k, DecoderStringAt)
+ watcher := value.Watch()
+ defer watcher.Close()
+
+ tc.put(t, k, "initial")
+ expect(t, watcher, "initial")
+
+ for i := 0; i < 1000; i++ {
+ tc.put(t, k, fmt.Sprintf("val-%d", i))
+ }
+
+ ctx, ctxC := context.WithTimeout(context.Background(), time.Second)
+ defer ctxC()
+ for {
+ valB, err := watcher.Get(ctx)
+ if err != nil {
+ t.Fatalf("Get() returned error before expected final value: %v", err)
+ }
+ if valB.Value == "val-999" {
+ break
+ }
+ }
+}
+
+// TestBacklogRange ensures that the ranged etcd watcher can handle a large
+// backlog of changes in etcd that the client didn't keep up with.
+func TestBacklogRange(t *testing.T) {
+ tc := newTestClient(t)
+ defer tc.close()
+
+ ks := "test-backlog-range/"
+ ke := "test-backlog-range0"
+ value := NewValue(tc.client, ks, DecoderStringAt, Range(ke))
+ w := value.Watch()
+ defer w.Close()
+
+ for i := 0; i < 100; i++ {
+ if i%2 == 0 {
+ tc.put(t, ks+"a", fmt.Sprintf("val-%d", i))
+ } else {
+ tc.put(t, ks+"b", fmt.Sprintf("val-%d", i))
+ }
+ }
+
+ ctx, ctxC := context.WithCancel(context.Background())
+ defer ctxC()
+
+ res := make(map[string]string)
+ stringAtGet(ctx, t, w, res)
+ stringAtGet(ctx, t, w, res)
+
+ for _, te := range []struct {
+ k, w string
+ }{
+ {ks + "a", "val-98"},
+ {ks + "b", "val-99"},
+ } {
+ if want, got := te.w, res[te.k]; want != got {
+ t.Errorf("res[%q]: wanted %q, got %q", te.k, want, got)
+ }
+ }
+}
+
+// TestBacklogOnly exercises the BacklogOnly option for non-ranged watchers,
+// which effectively makes any Get operation non-blocking (but also showcases
+// that unless a Get without BacklogOnly is issues, no new data will appear by
+// itself in the watcher - which is an undocumented implementation detail of the
+// option).
+func TestBacklogOnly(t *testing.T) {
+ tc := newTestClient(t)
+ defer tc.close()
+ ctx, ctxC := context.WithCancel(context.Background())
+ defer ctxC()
+
+ k := "test-backlog-only"
+ tc.put(t, k, "initial")
+
+ value := NewValue(tc.client, k, DecoderStringAt)
+ watcher := value.Watch()
+ defer watcher.Close()
+
+ d, err := watcher.Get(ctx, event.BacklogOnly[StringAt]())
+ if err != nil {
+ t.Fatalf("First Get failed: %v", err)
+ }
+ if want, got := "initial", d.Value; want != got {
+ t.Fatalf("First Get: wanted value %q, got %q", want, got)
+ }
+
+ // As expected, next call to Get with BacklogOnly fails - there truly is no new
+ // updates to emit.
+ _, err = watcher.Get(ctx, event.BacklogOnly[StringAt]())
+ if want, got := event.ErrBacklogDone, err; !errors.Is(got, want) {
+ t.Fatalf("Second Get: wanted %v, got %v", want, got)
+ }
+
+ // Implementation detail: even though there is a new value ('second'),
+ // BacklogOnly will still return ErrBacklogDone.
+ tc.put(t, k, "second")
+ _, err = watcher.Get(ctx, event.BacklogOnly[StringAt]())
+ if want, got := event.ErrBacklogDone, err; !errors.Is(got, want) {
+ t.Fatalf("Third Get: wanted %v, got %v", want, got)
+ }
+
+ // ... However, a Get without BacklogOnly will return the new value.
+ d, err = watcher.Get(ctx)
+ if err != nil {
+ t.Fatalf("Fourth Get failed: %v", err)
+ }
+ if want, got := "second", d.Value; want != got {
+ t.Fatalf("Fourth Get: wanted value %q, got %q", want, got)
+ }
+}
+
+// TestBacklogOnlyRange exercises the BacklogOnly option for ranged watchers,
+// showcasing how it expected to be used for keeping up with the external state
+// of a range by synchronizing to a local map.
+func TestBacklogOnlyRange(t *testing.T) {
+ tc := newTestClient(t)
+ defer tc.close()
+ ctx, ctxC := context.WithCancel(context.Background())
+ defer ctxC()
+
+ ks := "test-backlog-only-range/"
+ ke := "test-backlog-only-range0"
+
+ for i := 0; i < 100; i++ {
+ if i%2 == 0 {
+ tc.put(t, ks+"a", fmt.Sprintf("val-%d", i))
+ } else {
+ tc.put(t, ks+"b", fmt.Sprintf("val-%d", i))
+ }
+ }
+
+ value := NewValue(tc.client, ks, DecoderStringAt, Range(ke))
+ w := value.Watch()
+ defer w.Close()
+
+ // Collect results into a map from key to value.
+ res := make(map[string]string)
+
+ // Run first Get - this is the barrier defining what's part of the backlog.
+ g, err := w.Get(ctx, event.BacklogOnly[StringAt]())
+ if err != nil {
+ t.Fatalf("Get: %v", err)
+ }
+ res[g.Key] = g.Value
+
+ // These won't be part of the backlog.
+ tc.put(t, ks+"a", "val-100")
+ tc.put(t, ks+"b", "val-101")
+
+ // Retrieve the rest of the backlog until ErrBacklogDone is returned.
+ nUpdates := 1
+ for {
+ g, err := w.Get(ctx, event.BacklogOnly[StringAt]())
+ if errors.Is(err, event.ErrBacklogDone) {
+ break
+ }
+ if err != nil {
+ t.Fatalf("Get: %v", err)
+ }
+ nUpdates += 1
+ res[g.Key] = g.Value
+ }
+
+ // The backlog should've been compacted to just two entries at their newest
+ // state.
+ if want, got := 2, nUpdates; want != got {
+ t.Fatalf("wanted backlog in %d updates, got it in %d", want, got)
+ }
+
+ for _, te := range []struct {
+ k, w string
+ }{
+ {ks + "a", "val-98"},
+ {ks + "b", "val-99"},
+ } {
+ if want, got := te.w, res[te.k]; want != got {
+ t.Errorf("res[%q]: wanted %q, got %q", te.k, want, got)
+ }
+ }
+}
diff --git a/osbase/event/event.go b/osbase/event/event.go
new file mode 100644
index 0000000..e2e0bd8
--- /dev/null
+++ b/osbase/event/event.go
@@ -0,0 +1,237 @@
+// Copyright 2020 The Monogon Project Authors.
+//
+// SPDX-License-Identifier: Apache-2.0
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package event defines and implements Event Values, a mechanism in which
+// multiple consumers can watch a value for updates in a reliable way.
+//
+// Values currently are kept in memory (see: MemoryValue), but a future
+// implementation might exist for other storage backends, eg. etcd.
+//
+// # Background and intended use
+//
+// The Event Value library is intended to be used within Metropolis'
+// supervisor-based runnables to communicate state changes to other runnables,
+// while permitting both sides to restart if needed. It grew out of multiple
+// codebases reimplementing an ad-hoc observer pattern, and from the
+// realization that implementing all possible edge cases of such patterns is
+// non-trivial and subject to programming errors. As such, it was turned into a
+// self-standing library.
+//
+// Why not just channels?
+//
+// Plain channels have multiple deficiencies for this usecase:
+// - Strict FIFO behaviour: all values sent to a channel must be received, and
+// historic and newest data must be treated in the same way. This means that
+// a consumer of state changes must process all updates to the value as if
+// they are the newest, and unable to skip rapid updates when a system is
+// slowly settling due to a cascading state change.
+// - Implementation overhead: implementing an observer
+// registration/unregistration pattern is prone to programming bugs,
+// especially for features like always first sending the current state to a
+// new observer.
+// - Strict buffer size: due to their FIFO nature and the possibility of
+// consumers not receiving actively, channels would have to buffer all
+// existing updates, requiring some arbitrary best-guess channel buffer
+// sizing that would still not prevent blocking writes or data loss in a
+// worst case scenario.
+//
+// Or, in other words: Go channels are a synchronization primitive, not a
+// ready-made solution to this problem. The Event Value implementation in fact
+// extensively uses Go channels within its implementation as a building block.
+//
+// Why not just condition variables (sync.Cond)?
+//
+// Go's condition variable implementation doesn't fully address our needs
+// either:
+// - No context/canceling support: once a condition is being Wait()ed on,
+// this cannot be interrupted. This is especially painful and unwieldy when
+// dealing with context-heavy code, such as Metropolis.
+// - Spartan API: expecting users to plainly use sync.Cond is risky, as the API
+// is fairly low-level.
+// - No solution for late consumers: late consumers (ones that missed the value
+// being set by a producer) would still have to implement logic in order to
+// find out such a value, as sync.Cond only supports what amounts to
+// edge-level triggers as part of its Broadcast/Signal system.
+//
+// It would be possible to implement MemoryValue using a sync.Cond internally,
+// but such an implementation would likely be more complex than the current
+// implementation based on channels and mutexes, as it would have to work
+// around issues like lack of canceling, etc.
+package event
+
+import (
+ "context"
+ "errors"
+
+ "source.monogon.dev/osbase/supervisor"
+)
+
+// A Value is an 'Event Value', some piece of data that can be updated ('Set')
+// by Producers and retrieved by Consumers.
+type Value[T any] interface {
+ // Set updates the Value to the given data. It is safe to call this from
+ // multiple goroutines, including concurrently.
+ //
+ // Any time Set is called, any consumers performing a Watch on this Value
+ // will be notified with the new data - even if the Set data is the same as
+ // the one that was already stored.
+ //
+ // A Value will initially have no data set. This 'no data' state is seen by
+ // consumers by the first .Get() call on the Watcher blocking until data is Set.
+ //
+ // All updates will be serialized in an arbitrary order - if multiple
+ // producers wish to perform concurrent actions to update the Value partially,
+ // this should be negotiated and serialized externally by the producers.
+ Set(val T)
+
+ // ValueWatch implements the Watch method. It is split out into another
+ // interface to allow some 'Event Values' to implement only the watch/read
+ // part, with the write side being implicit or defined by a more complex
+ // interface than a simple Set().
+ ValueWatch[T]
+}
+
+// ValueWatch is the read side of an 'Event Value', witch can by retrieved by
+// Consumers by performing a Watch operation on it.
+type ValueWatch[T any] interface {
+ // Watch retrieves a Watcher that keeps track on the version of the data
+ // contained within the Value that was last seen by a consumer. Once a
+ // Watcher is retrieved, it can be used to then get the actual data stored
+ // within the Value, and to reliably retrieve updates to it without having
+ // to poll for changes.
+ Watch() Watcher[T]
+}
+
+// A Watcher keeps track of the last version of data seen by a consumer for a
+// given Value. Each consumer should use an own Watcher instance, and it is not
+// safe to use this type concurrently. However, it is safe to move/copy it
+// across different goroutines, as long as no two goroutines access it
+// simultaneously.
+type Watcher[T any] interface {
+ // Get blocks until a Value's data is available:
+ // - On first use of a Watcher, Get will return the data contained in the
+ // value at the time of calling .Watch(), or block if no data has been
+ // .Set() on it yet. If a value has been Set() since the initial
+ // creation of the Watch() but before Get() is called for the first
+ // time, the first Get() call will immediately return the new value.
+ // - On subsequent uses of a Watcher, Get will block until the given Value
+ // has been Set with new data. This does not necessarily mean that the
+ // new data is different - consumers should always perform their own
+ // checks on whether the update is relevant to them (ie., the data has
+ // changed in a significant way), unless specified otherwise by a Value
+ // publisher.
+ //
+ // Get() will always return the current newest data that has been Set() on
+ // the Value, and not a full log of historical events. This is geared
+ // towards event values where consumers only care about changes to data
+ // since last retrieval, not every value that has been Set along the way.
+ // Thus, consumers need not make sure that they actively .Get() on a
+ // watcher all the times.
+ //
+ // If the context is canceled before data is available to be returned, the
+ // context's error will be returned. However, the Watcher will still need to be
+ // Closed, as it is still fully functional after the context has been canceled.
+ //
+ // Concurrent requests to Get result in an error. The reasoning to return
+ // an error instead of attempting to serialize the requests is that any
+ // concurrent access from multiple goroutines would cause a desync in the
+ // next usage of the Watcher. For example:
+ // 1) w.Get() (in G0) and w.Get(G1) start. They both block waiting for an
+ // initial value.
+ // 2) v.Set(0)
+ // 3) w.Get() in G0 returns 0,
+ // 4) v.Set(1)
+ // 4) w.Get() in G1 returns 1,
+ // This would cause G0 and G1 to become desynchronized between eachother
+ // (both have different value data) and subsequent updates will also
+ // continue skipping some updates.
+ // If multiple goroutines need to access the Value, they should each use
+ // their own Watcher.
+ Get(context.Context, ...GetOption[T]) (T, error)
+
+ // Close must be called if the Watcher is not going to be used anymore -
+ // otherwise, it will not be garbage collected.
+ Close() error
+}
+
+type GetOption[T any] struct {
+ Predicate func(t T) bool
+ BacklogOnly bool
+}
+
+func Filter[T any](pred func(T) bool) GetOption[T] {
+ return GetOption[T]{
+ Predicate: pred,
+ }
+}
+
+// BacklogOnly will prevent Get from blocking on waiting for more updates from
+// etcd, by instead returning ErrBacklogDone whenever no more data is currently
+// locally available. This is different however, from establishing that there
+// are no more pending updates from the etcd cluster - the only way to ensure
+// the local client is up to date is by performing Get calls without this option
+// set.
+//
+// This mode of retrieval should only be used for the retrieval of the existing
+// data in the etcd cluster on the initial creation of the Watcher (by
+// repeatedly calling Get until ErrBacklogDone is returned), and shouldn't be set
+// for any subsequent call. Any use of this option after that initial fetch is
+// undefined behaviour that exposes the internals of the Get implementation, and
+// must not be relied on. However, in the future, this behaviour might be
+// formalized.
+//
+// This mode is particularly useful for ranged watchers. Non-ranged watchers can
+// still use this option to distinguish between blocking because of the
+// nonexistence of an object vs. blocking because of networking issues. However,
+// non-ranged retrieval semantics generally will rarely need to make this
+// distinction.
+func BacklogOnly[T any]() GetOption[T] {
+ return GetOption[T]{BacklogOnly: true}
+}
+
+var (
+ // ErrBacklogDone is returned by Get when BacklogOnly is set and there is no more
+ // event data stored in the Watcher client, ie. when the initial cluster state
+ // of the requested key has been retrieved.
+ ErrBacklogDone = errors.New("no more backlogged data")
+)
+
+// Pipe a Value's initial state and subsequent updates to an already existing
+// channel in a supervisor.Runnable. This is mostly useful when wanting to select
+// {} on many Values.
+//
+// The given channel will NOT be closed when the runnable exits. The process
+// receiving from the channel should be running in a group with the pipe
+// runnable, so that both restart if either does. This ensures that there is always
+// at least one value in the channel when the receiver starts.
+func Pipe[T any](value Value[T], c chan<- T, opts ...GetOption[T]) supervisor.Runnable {
+ return func(ctx context.Context) error {
+ supervisor.Signal(ctx, supervisor.SignalHealthy)
+ w := value.Watch()
+ defer w.Close()
+ for {
+ v, err := w.Get(ctx, opts...)
+ if err != nil {
+ return err
+ }
+ select {
+ case c <- v:
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ }
+ }
+}
diff --git a/osbase/event/memory/BUILD.bazel b/osbase/event/memory/BUILD.bazel
new file mode 100644
index 0000000..f2cd4bd
--- /dev/null
+++ b/osbase/event/memory/BUILD.bazel
@@ -0,0 +1,19 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+go_library(
+ name = "memory",
+ srcs = ["memory.go"],
+ importpath = "source.monogon.dev/osbase/event/memory",
+ visibility = ["//visibility:public"],
+ deps = ["//osbase/event"],
+)
+
+go_test(
+ name = "memory_test",
+ srcs = [
+ "example_test.go",
+ "memory_test.go",
+ ],
+ embed = [":memory"],
+ deps = ["//osbase/event"],
+)
diff --git a/osbase/event/memory/example_test.go b/osbase/event/memory/example_test.go
new file mode 100644
index 0000000..1ae12c6
--- /dev/null
+++ b/osbase/event/memory/example_test.go
@@ -0,0 +1,114 @@
+// Copyright 2020 The Monogon Project Authors.
+//
+// SPDX-License-Identifier: Apache-2.0
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package memory
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "time"
+)
+
+// NetworkStatus is example data that will be stored in a Value.
+type NetworkStatus struct {
+ ExternalAddress net.IP
+ DefaultGateway net.IP
+}
+
+// NetworkService is a fake/example network service that is responsible for
+// communicating the newest information about a machine's network configuration
+// to consumers/watchers.
+type NetworkService struct {
+ Provider Value[NetworkStatus]
+}
+
+// Run pretends to execute the network service's main logic loop, in which it
+// pretends to have received an IP address over DHCP, and communicates that to
+// consumers/watchers.
+func (s *NetworkService) Run(ctx context.Context) {
+ s.Provider.Set(NetworkStatus{
+ ExternalAddress: nil,
+ DefaultGateway: nil,
+ })
+
+ select {
+ case <-time.After(100 * time.Millisecond):
+ case <-ctx.Done():
+ return
+ }
+
+ fmt.Printf("NS: Got DHCP Lease\n")
+ s.Provider.Set(NetworkStatus{
+ ExternalAddress: net.ParseIP("203.0.113.24"),
+ DefaultGateway: net.ParseIP("203.0.113.1"),
+ })
+
+ select {
+ case <-time.After(100 * time.Millisecond):
+ case <-ctx.Done():
+ return
+ }
+
+ fmt.Printf("NS: DHCP Address changed\n")
+ s.Provider.Set(NetworkStatus{
+ ExternalAddress: net.ParseIP("203.0.113.103"),
+ DefaultGateway: net.ParseIP("203.0.113.1"),
+ })
+
+ time.Sleep(100 * time.Millisecond)
+}
+
+// ExampleValue_full demonstrates a typical usecase for Event Values, in which
+// a mock network service lets watchers know that the machine on which the code
+// is running has received a new network configuration.
+// It also shows the typical boilerplate required in order to wrap a Value (eg.
+// MemoryValue) within a typesafe wrapper.
+func ExampleValue_full() {
+ ctx, ctxC := context.WithCancel(context.Background())
+ defer ctxC()
+
+ // Create a fake NetworkService.
+ var ns NetworkService
+
+ // Run an /etc/hosts updater. It will watch for updates from the NetworkService
+ // about the current IP address of the node.
+ go func() {
+ w := ns.Provider.Watch()
+ for {
+ status, err := w.Get(ctx)
+ if err != nil {
+ break
+ }
+ if status.ExternalAddress == nil {
+ continue
+ }
+ // Pretend to write /etc/hosts with the newest ExternalAddress.
+ // In production code, you would also check for whether ExternalAddress has
+ // changed from the last written value, if writing to /etc/hosts is expensive.
+ fmt.Printf("/etc/hosts: foo.example.com is now %s\n", status.ExternalAddress.String())
+ }
+ }()
+
+ // Run fake network service.
+ ns.Run(ctx)
+
+ // Output:
+ // NS: Got DHCP Lease
+ // /etc/hosts: foo.example.com is now 203.0.113.24
+ // NS: DHCP Address changed
+ // /etc/hosts: foo.example.com is now 203.0.113.103
+}
diff --git a/osbase/event/memory/memory.go b/osbase/event/memory/memory.go
new file mode 100644
index 0000000..16818a0
--- /dev/null
+++ b/osbase/event/memory/memory.go
@@ -0,0 +1,233 @@
+// Copyright 2020 The Monogon Project Authors.
+//
+// SPDX-License-Identifier: Apache-2.0
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package memory
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+
+ "source.monogon.dev/osbase/event"
+)
+
+var (
+ // Type assert that *Value implements Value. We do this artificially, as
+ // there currently is no code path that needs this to be strictly true. However,
+ // users of this library might want to rely on the Value type instead of
+ // particular Value implementations.
+ _ event.Value[int] = &Value[int]{}
+)
+
+// Value is a 'memory value', which implements a event.Value stored in memory.
+// It is safe to construct an empty object of this type. However, this must not
+// be copied.
+type Value[T any] struct {
+ // mu guards the inner, innerSet and watchers fields.
+ mu sync.RWMutex
+ // inner is the latest data Set on the Value. It is used to provide the
+ // newest version of the Set data to new watchers.
+ inner T
+ // innerSet is true when inner has been Set at least once. It is used to
+ // differentiate between a nil and unset value.
+ innerSet bool
+ // watchers is the list of watchers that should be updated when new data is
+ // Set. It will grow on every .Watch() and shrink any time a watcher is
+ // determined to have been closed.
+ watchers []*watcher[T]
+
+ // Sync, if set to true, blocks all .Set() calls on the Value until all
+ // Watchers derived from it actively .Get() the new value. This can be used
+ // to ensure Watchers always receive a full log of all Set() calls.
+ //
+ // This must not be changed after the first .Set/.Watch call.
+ //
+ // This is an experimental API and subject to change. It might be migrated
+ // to per-Watcher settings defined within the main event.Value/Watcher
+ // interfaces.
+ Sync bool
+}
+
+// Set updates the Value to the given data. It is safe to call this from
+// multiple goroutines, including concurrently.
+//
+// For more information about guarantees, see event.Value.Set.
+func (m *Value[T]) Set(val T) {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+
+ // Update the data that is provided on first Get() to watchers.
+ m.inner = val
+ m.innerSet = true
+
+ // Go through all watchers, updating them on the new value and filtering out
+ // all closed watchers.
+ newWatchers := m.watchers[:0]
+ for _, w := range m.watchers {
+ if w.closed() {
+ continue
+ }
+ w.update(m.Sync, val)
+ newWatchers = append(newWatchers, w)
+ }
+ if cap(newWatchers) > len(newWatchers)*3 {
+ reallocated := make([]*watcher[T], 0, len(newWatchers)*2)
+ newWatchers = append(reallocated, newWatchers...)
+ }
+ m.watchers = newWatchers
+}
+
+// watcher implements the event.Watcher interface for watchers returned by
+// Value.
+type watcher[T any] struct {
+ // bufferedC is a buffered channel of size 1 for submitting values to the
+ // watcher.
+ bufferedC chan T
+ // unbufferedC is an unbuffered channel, which is used when Sync is enabled.
+ unbufferedC chan T
+
+ // getSem is a channel-based semaphore (which is of size 1, and thus in
+ // fact a mutex) that is used to ensure that only a single .Get() call is
+ // active. It is implemented as a channel to permit concurrent .Get() calls
+ // to error out instead of blocking.
+ getSem chan struct{}
+ // close is a channel that is closed when this watcher is itself Closed.
+ close chan struct{}
+}
+
+// Watch retrieves a Watcher that keeps track on the version of the data
+// contained within the Value that was last seen by a consumer.
+//
+// For more information about guarantees, see event.Value.Watch.
+func (m *Value[T]) Watch() event.Watcher[T] {
+ waiter := &watcher[T]{
+ bufferedC: make(chan T, 1),
+ unbufferedC: make(chan T),
+ close: make(chan struct{}),
+ getSem: make(chan struct{}, 1),
+ }
+
+ m.mu.Lock()
+ // If the watchers slice is at capacity, drop closed watchers, and
+ // reallocate the slice at 2x length if it is not between 1.5x and 3x.
+ if len(m.watchers) == cap(m.watchers) {
+ newWatchers := m.watchers[:0]
+ for _, w := range m.watchers {
+ if !w.closed() {
+ newWatchers = append(newWatchers, w)
+ }
+ }
+ if cap(newWatchers)*2 < len(newWatchers)*3 || cap(newWatchers) > len(newWatchers)*3 {
+ reallocated := make([]*watcher[T], 0, len(newWatchers)*2)
+ newWatchers = append(reallocated, newWatchers...)
+ }
+ m.watchers = newWatchers
+ }
+ // Append this watcher to the Value.
+ m.watchers = append(m.watchers, waiter)
+ // If the Value already has some value set, put it in the buffered channel.
+ if m.innerSet {
+ waiter.bufferedC <- m.inner
+ }
+ m.mu.Unlock()
+
+ return waiter
+}
+
+// closed returns whether this watcher has been closed.
+func (m *watcher[T]) closed() bool {
+ select {
+ case _, ok := <-m.close:
+ if !ok {
+ return true
+ }
+ default:
+ }
+ return false
+}
+
+// update is the high level update-this-watcher function called by Value.
+func (m *watcher[T]) update(sync bool, val T) {
+ // If synchronous delivery was requested, block until a watcher .Gets it,
+ // or is closed.
+ if sync {
+ select {
+ case m.unbufferedC <- val:
+ case <-m.close:
+ }
+ return
+ }
+
+ // Otherwise, deliver asynchronously. If there is already a value in the
+ // buffered channel that was not retrieved, drop it.
+ select {
+ case <-m.bufferedC:
+ default:
+ }
+ // The channel is now empty, so sending to it cannot block.
+ m.bufferedC <- val
+}
+
+func (m *watcher[T]) Close() error {
+ close(m.close)
+ return nil
+}
+
+// Get blocks until a Value's data is available. See event.Watcher.Get for
+// guarantees and more information.
+func (m *watcher[T]) Get(ctx context.Context, opts ...event.GetOption[T]) (T, error) {
+ // Make sure we're the only active .Get call.
+ var empty T
+ select {
+ case m.getSem <- struct{}{}:
+ default:
+ return empty, fmt.Errorf("cannot Get() concurrently on a single waiter")
+ }
+ defer func() {
+ <-m.getSem
+ }()
+
+ var predicate func(t T) bool
+ for _, opt := range opts {
+ if opt.Predicate != nil {
+ predicate = opt.Predicate
+ }
+ if opt.BacklogOnly {
+ return empty, errors.New("BacklogOnly is not implemented for memory watchers")
+ }
+ }
+
+ for {
+ var val T
+ // For Sync values, ensure the initial value in the buffered
+ // channel is delivered first.
+ select {
+ case val = <-m.bufferedC:
+ default:
+ select {
+ case <-ctx.Done():
+ return empty, ctx.Err()
+ case val = <-m.bufferedC:
+ case val = <-m.unbufferedC:
+ }
+ }
+ if predicate != nil && !predicate(val) {
+ continue
+ }
+ return val, nil
+ }
+}
diff --git a/osbase/event/memory/memory_test.go b/osbase/event/memory/memory_test.go
new file mode 100644
index 0000000..b622565
--- /dev/null
+++ b/osbase/event/memory/memory_test.go
@@ -0,0 +1,371 @@
+// Copyright 2020 The Monogon Project Authors.
+//
+// SPDX-License-Identifier: Apache-2.0
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package memory
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "source.monogon.dev/osbase/event"
+)
+
+// TestAsync exercises the high-level behaviour of a Value, in which a
+// watcher is able to catch up to the newest Set value.
+func TestAsync(t *testing.T) {
+ p := Value[int]{}
+ p.Set(0)
+
+ ctx := context.Background()
+
+ // The 0 from Set() should be available via .Get().
+ watcher := p.Watch()
+ val, err := watcher.Get(ctx)
+ if err != nil {
+ t.Fatalf("Get: %v", err)
+ }
+ if want, got := 0, val; want != got {
+ t.Fatalf("Value: got %d, wanted %d", got, want)
+ }
+
+ // Send a large amount of updates that the watcher does not actively .Get().
+ for i := 1; i <= 100; i++ {
+ p.Set(i)
+ }
+
+ // The watcher should still end up with the newest .Set() value on the next
+ // .Get() call.
+ val, err = watcher.Get(ctx)
+ if err != nil {
+ t.Fatalf("Get: %v", err)
+ }
+ if want, got := 100, val; want != got {
+ t.Fatalf("Value: got %d, wanted %d", got, want)
+ }
+}
+
+// TestSyncBlocks exercises the Value's 'Sync' field, which makes all
+// Set() calls block until all respective watchers .Get() the updated data.
+// This particular test ensures that .Set() calls to a Watcher result in a
+// prefect log of updates being transmitted to a watcher.
+func TestSync(t *testing.T) {
+ p := Value[int]{
+ Sync: true,
+ }
+ values := make(chan int, 100)
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ ctx := context.Background()
+ watcher := p.Watch()
+ wg.Done()
+ for {
+ value, err := watcher.Get(ctx)
+ if err != nil {
+ panic(err)
+ }
+ values <- value
+ }
+ }()
+
+ p.Set(0)
+ wg.Wait()
+
+ want := []int{1, 2, 3, 4}
+ for _, w := range want {
+ p.Set(w)
+ }
+
+ timeout := time.After(time.Second)
+ for i, w := range append([]int{0}, want...) {
+ select {
+ case <-timeout:
+ t.Fatalf("timed out on value %d (%d)", i, w)
+ case val := <-values:
+ if w != val {
+ t.Errorf("value %d was %d, wanted %d", i, val, w)
+ }
+ }
+ }
+}
+
+// TestSyncBlocks exercises the Value's 'Sync' field, which makes all
+// Set() calls block until all respective watchers .Get() the updated data.
+// This particular test ensures that .Set() calls actually block when a watcher
+// is unattended.
+func TestSyncBlocks(t *testing.T) {
+ p := Value[int]{
+ Sync: true,
+ }
+ ctx := context.Background()
+
+ // Shouldn't block, as there's no declared watchers.
+ p.Set(0)
+
+ watcher := p.Watch()
+
+ // Should retrieve the zero, more requests will pend.
+ value, err := watcher.Get(ctx)
+ if err != nil {
+ t.Fatalf("Get: %v", err)
+ }
+ if want, got := 0, value; want != got {
+ t.Fatalf("Got initial value %d, wanted %d", got, want)
+ }
+
+ // .Set() Should block, as watcher is unattended.
+ //
+ // Whether something blocks in Go is untestable in a robust way (see: halting
+ // problem). We work around this this by introducing a 'stage' int64, which is
+ // put on the 'c' channel after the needs-to-block function returns. We then
+ // perform an action that should unblock this function right after updating
+ // 'stage' to a different value.
+ // Then, we observe what was put on the channel: If it's the initial value, it
+ // means the function didn't block when expected. Otherwise, it means the
+ // function unblocked when expected.
+ stage := int64(0)
+ c := make(chan int64, 1)
+ go func() {
+ p.Set(1)
+ c <- atomic.LoadInt64(&stage)
+ }()
+
+ // Getting should unblock the provider. Mark via 'stage' variable that
+ // unblocking now is expected.
+ atomic.StoreInt64(&stage, int64(1))
+ // Potential race: .Set() unblocks here due to some bug, before .Get() is
+ // called, and we record a false positive.
+ value, err = watcher.Get(ctx)
+ if err != nil {
+ t.Fatalf("Get: %v", err)
+ }
+
+ res := <-c
+ if res != int64(1) {
+ t.Fatalf("Set() returned before Get()")
+ }
+
+ if want, got := 1, value; want != got {
+ t.Fatalf("Wanted value %d, got %d", want, got)
+ }
+
+ // Closing the watcher and setting should not block anymore.
+ if err := watcher.Close(); err != nil {
+ t.Fatalf("Close: %v", err)
+ }
+ // Last step, if this blocks we will get a deadlock error and the test will panic.
+ p.Set(2)
+}
+
+// TestMultipleGets verifies that calling .Get() on a single watcher from two
+// goroutines is prevented by returning an error in exactly one of them.
+func TestMultipleGets(t *testing.T) {
+ p := Value[int]{}
+ ctx := context.Background()
+
+ w := p.Watch()
+
+ tryError := func(errs chan error) {
+ _, err := w.Get(ctx)
+ errs <- err
+ }
+ errs := make(chan error, 2)
+ go tryError(errs)
+ go tryError(errs)
+
+ for err := range errs {
+ if err == nil {
+ t.Fatalf("A Get call succeeded, while it should have blocked or returned an error")
+ } else {
+ // Found the error, test succeeded.
+ break
+ }
+ }
+}
+
+// TestConcurrency attempts to stress the Value/Watcher
+// implementation to design limits (a hundred simultaneous watchers), ensuring
+// that the watchers all settle to the final set value.
+func TestConcurrency(t *testing.T) {
+ ctx := context.Background()
+
+ p := Value[int]{}
+ p.Set(0)
+
+ // Number of watchers to create.
+ watcherN := 100
+ // Expected final value to be Set().
+ final := 100
+ // Result channel per watcher.
+ resC := make([]chan error, watcherN)
+
+ // Spawn watcherN watchers.
+ for i := 0; i < watcherN; i++ {
+ resC[i] = make(chan error, 1)
+ go func(id int) {
+ // done is a helper function that will put an error on the
+ // respective watcher's resC.
+ done := func(err error) {
+ resC[id] <- err
+ close(resC[id])
+ }
+
+ watcher := p.Watch()
+ // prev is used to ensure the values received are monotonic.
+ prev := -1
+ for {
+ val, err := watcher.Get(ctx)
+ if err != nil {
+ done(err)
+ return
+ }
+
+ // Ensure monotonicity of received data.
+ if val <= prev {
+ done(fmt.Errorf("received out of order data: %d after %d", val, prev))
+ }
+ prev = val
+
+ // Quit when the final value is received.
+ if val == final {
+ done(nil)
+ return
+ }
+
+ // Sleep a bit, depending on the watcher. This makes each
+ // watcher behave slightly differently, and attempts to
+ // exercise races dependent on sleep time between subsequent
+ // Get calls.
+ time.Sleep(time.Millisecond * time.Duration(id))
+ }
+ }(i)
+ }
+
+ // Set 1..final on the value.
+ for i := 1; i <= final; i++ {
+ p.Set(i)
+ }
+
+ // Ensure all watchers exit with no error.
+ for i, c := range resC {
+ err := <-c
+ if err != nil {
+ t.Errorf("Watcher %d returned %v", i, err)
+ }
+ }
+}
+
+// TestCanceling exercises whether a context canceling in a .Get() gracefully
+// aborts that particular Get call, but also allows subsequent use of the same
+// watcher.
+func TestCanceling(t *testing.T) {
+ p := Value[int]{
+ Sync: true,
+ }
+
+ ctx, ctxC := context.WithCancel(context.Background())
+
+ watcher := p.Watch()
+
+ // errs will contain the error returned by Get.
+ errs := make(chan error, 1)
+ go func() {
+ // This Get will block, as no initial data has been Set on the value.
+ _, err := watcher.Get(ctx)
+ errs <- err
+ }()
+
+ // Cancel the context, and expect that context error to propagate to the .Get().
+ ctxC()
+ if want, got := ctx.Err(), <-errs; !errors.Is(got, want) {
+ t.Fatalf("Get should've returned %v, got %v", want, got)
+ }
+
+ // Do another .Get() on the same watcher with a new context. Even though the
+ // call was aborted via a context cancel, the watcher should continue working.
+ ctx = context.Background()
+ go func() {
+ _, err := watcher.Get(ctx)
+ errs <- err
+ }()
+
+ // Unblock the .Get now.
+ p.Set(1)
+ if want, got := error(nil), <-errs; !errors.Is(got, want) {
+ t.Fatalf("Get should've returned %v, got %v", want, got)
+ }
+}
+
+// TestSetAfterWatch ensures that if a value is updated between a Watch and the
+// initial Get, only the newest Set value is returns.
+func TestSetAfterWatch(t *testing.T) {
+ ctx := context.Background()
+
+ p := Value[int]{}
+ p.Set(0)
+
+ watcher := p.Watch()
+ p.Set(1)
+
+ data, err := watcher.Get(ctx)
+ if err != nil {
+ t.Fatalf("Get: %v", err)
+ }
+ if want, got := 1, data; want != got {
+ t.Errorf("Get should've returned %v, got %v", want, got)
+ }
+}
+
+// TestWatchersList ensures that the list of watchers is managed correctly,
+// i.e. there is no memory leak and closed watchers are removed while
+// keeping all non-closed watchers.
+func TestWatchersList(t *testing.T) {
+ ctx := context.Background()
+ p := Value[int]{}
+
+ var watchers []event.Watcher[int]
+ for i := 0; i < 100; i++ {
+ watchers = append(watchers, p.Watch())
+ }
+ for i := 0; i < 10000; i++ {
+ watchers[10].Close()
+ watchers[10] = p.Watch()
+ }
+
+ if want, got := 1000, cap(p.watchers); want <= got {
+ t.Fatalf("Got capacity %d, wanted less than %d", got, want)
+ }
+
+ p.Set(1)
+ if want, got := 100, len(p.watchers); want != got {
+ t.Fatalf("Got %d watchers, wanted %d", got, want)
+ }
+
+ for _, watcher := range watchers {
+ data, err := watcher.Get(ctx)
+ if err != nil {
+ t.Fatalf("Get: %v", err)
+ }
+ if want, got := 1, data; want != got {
+ t.Errorf("Get should've returned %v, got %v", want, got)
+ }
+ }
+}