blob: 144e6da7bd4baca2f289de7f2e1d2fd75e3b0a8f [file] [log] [blame]
Serge Bazanski35e8d792022-10-11 11:32:30 +02001package bmdb
2
3import (
4 "context"
5 "database/sql"
6 "errors"
7 "fmt"
8 "time"
9
10 "github.com/cockroachdb/cockroach-go/v2/crdb"
11 "github.com/google/uuid"
12 "github.com/lib/pq"
13 "k8s.io/klog/v2"
14
15 "source.monogon.dev/cloud/bmaas/bmdb/model"
Serge Bazanski35e8d792022-10-11 11:32:30 +020016)
17
Serge Bazanski35e8d792022-10-11 11:32:30 +020018// StartSession creates a new BMDB session which will be maintained in a
19// background goroutine as long as the given context is valid. Each Session is
20// represented by an entry in a sessions table within the BMDB, and subsequent
21// Transact calls emit SQL transactions which depend on that entry still being
22// present and up to date. A garbage collection system (to be implemented) will
23// remove expired sessions from the BMDB, but this mechanism is not necessary
24// for the session expiry mechanism to work.
25//
26// When the session becomes invalid (for example due to network partition),
27// subsequent attempts to call Transact will fail with ErrSessionExpired. This
28// means that the caller within the component is responsible for recreating a
29// new Session if a previously used one expires.
30func (c *Connection) StartSession(ctx context.Context) (*Session, error) {
31 intervalSeconds := 5
32
33 res, err := model.New(c.db).NewSession(ctx, model.NewSessionParams{
34 SessionComponentName: c.bmdb.ComponentName,
35 SessionRuntimeInfo: c.bmdb.RuntimeInfo,
36 SessionIntervalSeconds: int64(intervalSeconds),
37 })
38 if err != nil {
39 return nil, fmt.Errorf("creating session failed: %w", err)
40 }
41
42 klog.Infof("Started session %s", res.SessionID)
43
44 ctx2, ctxC := context.WithCancel(ctx)
45
46 s := &Session{
47 connection: c,
48 interval: time.Duration(intervalSeconds) * time.Second,
49
50 UUID: res.SessionID,
51
52 ctx: ctx2,
53 ctxC: ctxC,
54 }
55 go s.maintainHeartbeat(ctx2)
56 return s, nil
57}
58
59// Session is a session (identified by UUID) that has been started in the BMDB.
60// Its liveness is maintained by a background goroutine, and as long as that
61// session is alive, it can perform transactions and work on the BMDB.
62type Session struct {
63 connection *Connection
64 interval time.Duration
65
66 UUID uuid.UUID
67
68 ctx context.Context
69 ctxC context.CancelFunc
70}
71
Serge Bazanski42f13462023-04-19 15:00:06 +020072// Expired returns true if this session is expired and will fail all subsequent
73// transactions/work.
74func (s *Session) Expired() bool {
75 return s.ctx.Err() != nil
76}
77
78// expire is a helper which marks this session as expired and returns
79// ErrSessionExpired.
80func (s *Session) expire() error {
81 s.ctxC()
82 return ErrSessionExpired
83}
84
Serge Bazanski35e8d792022-10-11 11:32:30 +020085var (
86 // ErrSessionExpired is returned when attempting to Transact or Work on a
87 // Session that has expired or been canceled. Once a Session starts returning
88 // these errors, it must be re-created by another StartSession call, as no other
89 // calls will succeed.
90 ErrSessionExpired = errors.New("session expired")
91 // ErrWorkConflict is returned when attempting to Work on a Session with a
92 // process name that's already performing some work, concurrently, on the
93 // requested machine.
94 ErrWorkConflict = errors.New("conflicting work on machine")
95)
96
97// maintainHeartbeat will attempt to repeatedly poke the session at a frequency
98// twice of that of the minimum frequency mandated by the configured 5-second
99// interval. It will exit if it detects that the session cannot be maintained
100// anymore, canceling the session's internal context and causing future
101// Transact/Work calls to fail.
102func (s *Session) maintainHeartbeat(ctx context.Context) {
103 // Internal deadline, used to check whether we haven't dropped the ball on
104 // performing the updates due to a lot of transient errors.
105 deadline := time.Now().Add(s.interval)
106 for {
107 if ctx.Err() != nil {
108 klog.Infof("Session %s: context over, exiting: %v", s.UUID, ctx.Err())
109 return
110 }
111
112 err := s.Transact(ctx, func(q *model.Queries) error {
113 sessions, err := q.SessionCheck(ctx, s.UUID)
114 if err != nil {
115 return fmt.Errorf("when retrieving session: %w", err)
116 }
117 if len(sessions) < 1 {
Serge Bazanski42f13462023-04-19 15:00:06 +0200118 return s.expire()
Serge Bazanski35e8d792022-10-11 11:32:30 +0200119 }
120 err = q.SessionPoke(ctx, s.UUID)
121 if err != nil {
122 return fmt.Errorf("when poking session: %w", err)
123 }
124 return nil
125 })
126 if err != nil {
127 klog.Errorf("Session %s: update failed: %v", s.UUID, err)
128 if errors.Is(err, ErrSessionExpired) || deadline.After(time.Now()) {
129 // No way to recover.
130 klog.Errorf("Session %s: exiting", s.UUID)
131 s.ctxC()
132 return
133 }
134 // Just retry in a bit. One second seems about right for a 5 second interval.
135 //
136 // TODO(q3k): calculate this based on the configured interval.
137 time.Sleep(time.Second)
138 }
139 // Success. Keep going.
140 deadline = time.Now().Add(s.interval)
Serge Bazanskibe6c3ad2022-12-12 15:11:39 +0100141 select {
142 case <-ctx.Done():
143 // Do nothing, next loop iteration will exit.
144 case <-time.After(s.interval / 2):
145 // Do nothing, next loop iteration will heartbeat.
146 }
Serge Bazanski35e8d792022-10-11 11:32:30 +0200147 }
148}
149
150// Transact runs a given function in the context of both a CockroachDB and BMDB
151// transaction, retrying as necessary.
152//
153// Most pure (meaning without side effects outside the database itself) BMDB
154// transactions should be run this way.
155func (s *Session) Transact(ctx context.Context, fn func(q *model.Queries) error) error {
156 return crdb.ExecuteTx(ctx, s.connection.db, nil, func(tx *sql.Tx) error {
157 qtx := model.New(tx)
158 sessions, err := qtx.SessionCheck(ctx, s.UUID)
159 if err != nil {
160 return fmt.Errorf("when retrieving session: %w", err)
161 }
162 if len(sessions) < 1 {
Serge Bazanski42f13462023-04-19 15:00:06 +0200163 return s.expire()
Serge Bazanski35e8d792022-10-11 11:32:30 +0200164 }
165
166 if err := fn(qtx); err != nil {
167 return err
168 }
169
170 return nil
171 })
172}
173
Serge Bazanskibe6c3ad2022-12-12 15:11:39 +0100174var (
175 ErrNothingToDo = errors.New("nothing to do")
176 // PostgresUniqueViolation is returned by the lib/pq driver when a mutation
177 // cannot be performed due to a UNIQUE constraint being violated as a result of
178 // the query.
179 postgresUniqueViolation = pq.ErrorCode("23505")
180)
Serge Bazanski35e8d792022-10-11 11:32:30 +0200181
Serge Bazanskibe6c3ad2022-12-12 15:11:39 +0100182// Work starts work on a machine. Full work execution is performed in three
183// phases:
184//
185// 1. Retrieval phase. This is performed by 'fn' given to this function.
186// The retrieval function must return zero or more machines that some work
187// should be performed on per the BMDB. The first returned machine will be
188// locked for work under the given process and made available in the Work
189// structure returned by this call. The function may be called multiple times,
190// as it's run within a CockroachDB transaction which may be retried an
191// arbitrary number of times. Thus, it should be side-effect free, ideally only
192// performing read queries to the database.
193// 2. Work phase. This is performed by user code while holding on to the Work
194// structure instance.
195// 3. Commit phase. This is performed by the function passed to Work.Finish. See
196// that method's documentation for more details.
197//
198// Important: after retrieving Work successfully, either Finish or Cancel must be
199// called, otherwise the machine will be locked until the parent session expires
200// or is closed! It's safe and recommended to `defer work.Close()` after calling
201// Work().
202//
203// If no machine is eligible for work, ErrNothingToDo should be returned by the
204// retrieval function, and the same error (wrapped) will be returned by Work. In
205// case the retrieval function returns no machines and no error, that error will
206// also be returned.
207//
208// The returned Work object is _not_ goroutine safe.
209func (s *Session) Work(ctx context.Context, process model.Process, fn func(q *model.Queries) ([]uuid.UUID, error)) (*Work, error) {
210 var mid *uuid.UUID
Serge Bazanski20312b42023-04-19 13:49:47 +0200211 var exisingingBackoff *existingBackoff
Serge Bazanskibe6c3ad2022-12-12 15:11:39 +0100212 err := s.Transact(ctx, func(q *model.Queries) error {
213 mids, err := fn(q)
214 if err != nil {
215 return fmt.Errorf("could not retrieve machines for work: %w", err)
216 }
217 if len(mids) < 1 {
218 return ErrNothingToDo
219 }
220 mid = &mids[0]
221 err = q.StartWork(ctx, model.StartWorkParams{
222 MachineID: mids[0],
Serge Bazanski35e8d792022-10-11 11:32:30 +0200223 SessionID: s.UUID,
224 Process: process,
225 })
Serge Bazanskibe6c3ad2022-12-12 15:11:39 +0100226 if err != nil {
227 var perr *pq.Error
228 if errors.As(err, &perr) && perr.Code == postgresUniqueViolation {
229 return ErrWorkConflict
230 }
231 return fmt.Errorf("could not start work on %q: %w", mids[0], err)
Serge Bazanski35e8d792022-10-11 11:32:30 +0200232 }
Serge Bazanskia9580a72023-01-12 14:44:35 +0100233 err = q.WorkHistoryInsert(ctx, model.WorkHistoryInsertParams{
234 MachineID: mids[0],
235 Event: model.WorkHistoryEventStarted,
236 Process: process,
237 })
238 if err != nil {
239 return fmt.Errorf("could not insert history event: %w", err)
240 }
Serge Bazanski20312b42023-04-19 13:49:47 +0200241 backoffs, err := q.WorkBackoffOf(ctx, model.WorkBackoffOfParams{
242 MachineID: mids[0],
243 Process: process,
244 })
245 if err != nil {
246 return fmt.Errorf("could not get backoffs: %w", err)
247 }
248 if len(backoffs) > 0 {
249 // If the backoff exists but the last interval is null (e.g. is from a previous
250 // version of the schema when backoffs had no interval data) pretend it doesn't
251 // exist. Then the backoff mechanism can restart from a clean slate and populate
252 // a new, full backoff row.
253 if backoff := backoffs[0]; backoff.LastIntervalSeconds.Valid {
254 klog.Infof("Existing backoff: %d seconds", backoff.LastIntervalSeconds.Int64)
255 exisingingBackoff = &existingBackoff{
256 lastInterval: time.Second * time.Duration(backoff.LastIntervalSeconds.Int64),
257 }
258 }
259 }
Serge Bazanskibe6c3ad2022-12-12 15:11:39 +0100260 return nil
261 })
262 if err != nil {
263 return nil, err
264 }
265 klog.Infof("Started work %q on machine %q (sess %q)", process, *mid, s.UUID)
266 return &Work{
267 Machine: *mid,
268 s: s,
269 process: process,
Serge Bazanski20312b42023-04-19 13:49:47 +0200270 backoff: exisingingBackoff,
Serge Bazanskibe6c3ad2022-12-12 15:11:39 +0100271 }, nil
272}
Serge Bazanski35e8d792022-10-11 11:32:30 +0200273
Serge Bazanski20312b42023-04-19 13:49:47 +0200274// existingBackoff contains backoff information retrieved from a work item that
275// has previously failed with a backoff.
276type existingBackoff struct {
277 // lastInterval is the last interval as stored in the backoff table.
278 lastInterval time.Duration
279}
280
281// Backoff describes the configuration of backoff for a failed work item. It can
282// be passed to Work.Fail to cause an item to not be processed again (to be 'in
283// backoff') for a given period of time. Exponential backoff can be configured so
284// that subsequent failures of a process will have exponentially increasing
285// backoff periods, up to some maximum length.
286//
287// The underlying unit of backoff period length in the database is one second.
288// What that means is that all effective calculated backoff periods must be an
289// integer number of seconds. This is performed by always rounding up this period
290// to the nearest second. A side effect of this is that with exponential backoff,
291// non-integer exponents will be less precisely applied for small backoff values,
292// e.g. an exponent of 1.1 with initial backoff of 1s will generate the following
293// sequence of backoff periods:
294//
295// 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17
296//
297// Which corresponds to the following approximate multipliers in between periods:
298//
299// 2.00, 1.50, 1.33, 1.25, 1.20, 1.17, 1.14, 1.12, 1.11, 1.10, 1.18, 1.15, 1.13
300//
301// Thus, the exponent value should be treated more as a limit that the sequence
302// of periods will approach than a hard rule for calculating the periods.
303// However, if the exponent is larger than 1 (i.e. any time exponential backoff
304// is requested), this guarantees that the backoff won't get 'stuck' on a
305// repeated period value due to a rounding error.
306//
307// A zero backoff structure is valid and represents a non-exponential backoff of
308// one second.
309//
310// A partially filled structure is also valid. See the field comments for more
311// information about how fields are capped if not set. The described behaviour
312// allows for two useful shorthands:
313//
314// 1. If only Initial is set, then the backoff is non-exponential and will always
315// be of value Initial (or whatever the previous period already persisted the
316// database).
317// 2. If only Maximum and Exponent are set, the backoff will be exponential,
318// starting at one second, and exponentially increasing to Maximum.
319//
320// It is recommended to construct Backoff structures as const values and treat
321// them as read-only 'descriptors', one per work kind / process.
322//
323// One feature currently missing from the Backoff implementation is jitter. This
324// might be introduced in the future if deemed necessary.
325type Backoff struct {
326 // Initial backoff period, used for the backoff if this item failed for the first
327 // time (i.e. has not had a Finish call in between two Fail calls).
328 //
329 // Subsequent calls will ignore this field if the backoff is exponential. If
330 // non-exponential, the initial time will always override whatever was previously
331 // persisted in the database, i.e. the backoff will always be of value 'Initial'.
332 //
333 // Cannot be lower than one second. If it is, it will be capped to it.
334 Initial time.Duration `u:"initial"`
335
336 // Maximum time for backoff. If the calculation of the next back off period
337 // (based on the Exponent and last backoff value) exceeds this maximum, it will
338 // be capped to it.
339 //
340 // Maximum is not persisted in the database. Instead, it is always read from this
341 // structure.
342 //
343 // Cannot be lower than Initial. If it is, it will be capped to it.
344 Maximum time.Duration `u:"maximum"`
345
346 // Exponent used for next backoff calculation. Any time a work item fails
347 // directly after another failure, the previous backoff period will be multiplied
348 // by the exponent to yield the new backoff period. The new period will then be
349 // capped to Maximum.
350 //
351 // Exponent is not persisted in the database. Instead, it is always read from
352 // this structure.
353 //
354 // Cannot be lower than 1.0. If it is, it will be capped to it.
355 Exponent float64 `u:"exponent"`
356}
357
358// normalized copies the given backoff and returns a 'normalized' version of it,
359// with the 'when zero/unset' rules described in the Backoff documentation
360// strings.
361func (b *Backoff) normalized() *Backoff {
362 c := *b
363
364 if c.Exponent < 1.0 {
365 c.Exponent = 1.0
366 }
367 if c.Initial < time.Second {
368 c.Initial = time.Second
369 }
370 if c.Maximum < c.Initial {
371 c.Maximum = c.Initial
372 }
373 return &c
374}
375
376func (b *Backoff) simple() bool {
377 // Non-normalized simple backoffs will have a zero exponent.
378 if b.Exponent == 0.0 {
379 return true
380 }
381 // Normalized simple backoffs will have a 1.0 exponent.
382 if b.Exponent == 1.0 {
383 return true
384 }
385 return false
386}
387
388// next calculates the backoff period based on a backoff descriptor and previous
389// existing backoff information. Both or either can be nil.
390func (b *Backoff) next(e *existingBackoff) int64 {
391 second := time.Second.Nanoseconds()
392
393 // Minimum interval is one second. Start with that.
394 last := second
395 // Then, if we have a previous interval, and it's greater than a second, use that
396 // as the last interval.
397 if e != nil {
398 if previous := e.lastInterval.Nanoseconds(); previous > second {
399 last = previous
400 }
401 }
402
403 // If no backoff is configured, go with either the minimum of one second, or
404 // whatever the last previous interval was.
405 if b == nil {
406 return last / second
407 }
408
409 // Make a copy of the backoff descriptor, normalizing as necessary.
410 c := b.normalized()
411
412 // Simple backoffs always return Initial.
413 if b.simple() {
414 return c.Initial.Nanoseconds() / second
415 }
416
417 // If there is no existing backoff, return the initial backoff value directly.
418 if e == nil {
419 return c.Initial.Nanoseconds() / second
420 }
421
422 // Start out with the persisted interval.
423 next := last
424 // If by any chance we persisted an interval less than one second, clamp it.
425 if next < second {
426 next = second
427 }
428
429 // Multiply by exponent from descriptor.
430 next = int64(float64(next) * c.Exponent)
431
432 // Handle overflows. If multiplying by a positive number resulted in a lower
433 // value than what we started with, it means we overflowed and wrapped around. If
434 // so, clamp to maximum.
435 if next < last {
436 next = c.Maximum.Nanoseconds()
437 }
438
439 // Clamp to maximum.
440 if next > c.Maximum.Nanoseconds() {
441 next = c.Maximum.Nanoseconds()
442 }
443 // Round up to the nearest second.
444 if next%second == 0 {
445 return next / second
446 } else {
447 return next/second + 1
448 }
449}
450
Serge Bazanskibe6c3ad2022-12-12 15:11:39 +0100451// Work being performed on a machine.
452type Work struct {
453 // Machine that this work is being performed on, as retrieved by the retrieval
454 // function passed to the Work method.
455 Machine uuid.UUID
456 // s is the parent session.
457 s *Session
458 // done marks that this work has already been canceled or finished.
459 done bool
460 // process that this work performs.
461 process model.Process
Serge Bazanski20312b42023-04-19 13:49:47 +0200462
463 backoff *existingBackoff
Serge Bazanskibe6c3ad2022-12-12 15:11:39 +0100464}
465
466// Cancel the Work started on a machine. If the work has already been finished
467// or canceled, this is a no-op. In case of error, a log line will be emitted.
468func (w *Work) Cancel(ctx context.Context) {
469 if w.done {
470 return
471 }
472 w.done = true
473
474 klog.Infof("Canceling work %q on machine %q (sess %q)", w.process, w.Machine, w.s.UUID)
475 // Eat error and log. There's nothing we can do if this fails, and if it does, it's
476 // probably because our connectivity to the BMDB has failed. If so, our session
477 // will be invalidated soon and so will the work being performed on this
478 // machine.
479 err := w.s.Transact(ctx, func(q *model.Queries) error {
Serge Bazanskia9580a72023-01-12 14:44:35 +0100480 err := q.FinishWork(ctx, model.FinishWorkParams{
Serge Bazanskibe6c3ad2022-12-12 15:11:39 +0100481 MachineID: w.Machine,
482 SessionID: w.s.UUID,
483 Process: w.process,
484 })
Serge Bazanskia9580a72023-01-12 14:44:35 +0100485 if err != nil {
486 return err
487 }
488 return q.WorkHistoryInsert(ctx, model.WorkHistoryInsertParams{
489 MachineID: w.Machine,
490 Process: w.process,
491 Event: model.WorkHistoryEventCanceled,
492 })
Serge Bazanskibe6c3ad2022-12-12 15:11:39 +0100493 })
494 if err != nil {
495 klog.Errorf("Failed to cancel work %q on %q (sess %q): %v", w.process, w.Machine, w.s.UUID, err)
496 }
497}
498
499// Finish work by executing a commit function 'fn' and releasing the machine
500// from the work performed. The function given should apply tags to the
501// processed machine in a way that causes it to not be eligible for retrieval
502// again. As with the retriever function, the commit function might be called an
503// arbitrary number of times as part of cockroachdb transaction retries.
504//
505// This may be called only once.
506func (w *Work) Finish(ctx context.Context, fn func(q *model.Queries) error) error {
507 if w.done {
508 return fmt.Errorf("already finished")
509 }
510 w.done = true
511 klog.Infof("Finishing work %q on machine %q (sess %q)", w.process, w.Machine, w.s.UUID)
512 return w.s.Transact(ctx, func(q *model.Queries) error {
513 err := q.FinishWork(ctx, model.FinishWorkParams{
514 MachineID: w.Machine,
515 SessionID: w.s.UUID,
516 Process: w.process,
517 })
518 if err != nil {
519 return err
520 }
Serge Bazanski20312b42023-04-19 13:49:47 +0200521 err = q.WorkBackoffDelete(ctx, model.WorkBackoffDeleteParams{
522 MachineID: w.Machine,
523 Process: w.process,
524 })
525 if err != nil {
526 return err
527 }
Serge Bazanskia9580a72023-01-12 14:44:35 +0100528 err = q.WorkHistoryInsert(ctx, model.WorkHistoryInsertParams{
529 MachineID: w.Machine,
530 Process: w.process,
531 Event: model.WorkHistoryEventFinished,
532 })
533 if err != nil {
534 return err
535 }
Serge Bazanskibe6c3ad2022-12-12 15:11:39 +0100536 return fn(q)
537 })
Serge Bazanski35e8d792022-10-11 11:32:30 +0200538}
Serge Bazanskia9580a72023-01-12 14:44:35 +0100539
Serge Bazanski20312b42023-04-19 13:49:47 +0200540// Fail work and introduce backoff. The given cause is an operator-readable
Serge Bazanskia9580a72023-01-12 14:44:35 +0100541// string that will be persisted alongside the backoff and the work history/audit
542// table.
Serge Bazanski20312b42023-04-19 13:49:47 +0200543//
544// The backoff describes a period during which the same process will not be
545// retried on this machine until its expiration.
546//
547// The given backoff is a structure which describes both the initial backoff
548// period if the work failed for the first time, and a mechanism to exponentially
549// increase the backoff period if that work failed repeatedly. The work is
550// defined to have failed repeatedly if it only resulted in Cancel/Fail calls
551// without any Finish calls in the meantime.
552//
553// Only the last backoff period is persisted in the database. The exponential
554// backoff behaviour (including its maximum time) is always calculated based on
555// the given backoff structure.
556//
557// If nil, the backoff defaults to a non-exponential, one second backoff. This is
558// the minimum designed to keep the system chugging along without repeatedly
559// trying a failed job in a loop. However, the backoff should generally be set to
560// some well engineered value to prevent spurious retries.
561func (w *Work) Fail(ctx context.Context, backoff *Backoff, cause string) error {
Serge Bazanskia9580a72023-01-12 14:44:35 +0100562 if w.done {
563 return fmt.Errorf("already finished")
564 }
565 w.done = true
566
567 return w.s.Transact(ctx, func(q *model.Queries) error {
568 err := q.FinishWork(ctx, model.FinishWorkParams{
569 MachineID: w.Machine,
570 SessionID: w.s.UUID,
571 Process: w.process,
572 })
573 if err != nil {
574 return err
575 }
576 err = q.WorkHistoryInsert(ctx, model.WorkHistoryInsertParams{
577 MachineID: w.Machine,
578 Process: w.process,
579 Event: model.WorkHistoryEventFailed,
580 FailedCause: sql.NullString{
581 String: cause,
582 Valid: true,
583 },
584 })
585 if err != nil {
586 return err
587 }
Serge Bazanski20312b42023-04-19 13:49:47 +0200588 if backoff == nil {
589 klog.Warningf("Nil backoff for %q on machine %q: defaulting to one second non-exponential.", w.process, w.Machine)
Serge Bazanskia9580a72023-01-12 14:44:35 +0100590 }
Serge Bazanski20312b42023-04-19 13:49:47 +0200591 seconds := backoff.next(w.backoff)
592 klog.Infof("Adding backoff for %q on machine %q: %d seconds", w.process, w.Machine, seconds)
593 return q.WorkBackoffInsert(ctx, model.WorkBackoffInsertParams{
594 MachineID: w.Machine,
595 Process: w.process,
596 Cause: cause,
597 Seconds: seconds,
598 })
Serge Bazanskia9580a72023-01-12 14:44:35 +0100599 })
600}