blob: f825431865c886ea00f898c8065b35921d8e443a [file] [log] [blame]
Serge Bazanski50009e02021-07-07 14:35:27 +02001package consensus
2
3import (
4 "encoding/json"
5 "fmt"
6 "sort"
7 "strconv"
8 "strings"
9 "time"
10
Serge Bazanski3c5d0632024-09-12 10:49:12 +000011 "source.monogon.dev/go/logging"
Tim Windelschmidt9f21f532024-05-07 15:14:20 +020012 "source.monogon.dev/osbase/logbuffer"
13 "source.monogon.dev/osbase/logtree"
14 "source.monogon.dev/osbase/logtree/unraw"
Serge Bazanski50009e02021-07-07 14:35:27 +020015)
16
17// etcdLogEntry is a JSON-encoded, structured log entry received from a running
18// etcd server. The format comes from the logging library used there,
19// github.com/uber-go/zap.
20type etcdLogEntry struct {
21 Level string `json:"level"`
22 TS time.Time `json:"ts"`
23 Caller string `json:"caller"`
24 Message string `json:"msg"`
25 Extras map[string]interface{} `json:"-"`
26}
27
28// parseEtcdLogEntry is a logtree/unraw compatible parser for etcd log lines.
29// It is fairly liberal in what it will accept, falling back to writing a
30// message that outlines the given log entry could not have been parsed. This
31// ensures that no lines are lost, even if malformed.
32func parseEtcdLogEntry(l *logbuffer.Line, write unraw.LeveledWriter) {
33 if l.Truncated() {
34 write(&logtree.ExternalLeveledPayload{
35 Message: "Log line truncated: " + l.Data,
36 })
37 return
38 }
39
Tim Windelschmidt3b5a9172024-05-23 13:33:52 +020040 var e etcdLogEntry
Serge Bazanski50009e02021-07-07 14:35:27 +020041 // Parse constant fields
42 if err := json.Unmarshal([]byte(l.Data), &e); err != nil {
43 write(&logtree.ExternalLeveledPayload{
44 Message: "Log line unparseable: " + l.Data,
45 })
46 return
47 }
48 // Parse extra fields.
49 if err := json.Unmarshal([]byte(l.Data), &e.Extras); err != nil {
50 // Not exactly sure how this could ever happen - the previous parse
51 // went fine, so why wouldn't this one? But to be on the safe side,
52 // just don't attempt to parse this line any further.
53 write(&logtree.ExternalLeveledPayload{
54 Message: "Log line unparseable: " + l.Data,
55 })
56 return
57 }
58 delete(e.Extras, "level")
59 delete(e.Extras, "ts")
60 delete(e.Extras, "caller")
61 delete(e.Extras, "msg")
62
63 out := logtree.ExternalLeveledPayload{
64 Timestamp: e.TS,
65 }
66
67 // Attempt to parse caller (eg. raft/raft.go:765) into file/line (eg.
68 // raft.go 765).
69 if len(e.Caller) > 0 {
70 parts := strings.Split(e.Caller, "/")
71 fileLine := parts[len(parts)-1]
72 parts = strings.Split(fileLine, ":")
73 if len(parts) == 2 {
74 out.File = parts[0]
75 if line, err := strconv.ParseInt(parts[1], 10, 32); err == nil {
76 out.Line = int(line)
77 }
78 }
79 }
80
81 // Convert zap level into logtree severity.
82 switch e.Level {
83 case "info":
Serge Bazanski3c5d0632024-09-12 10:49:12 +000084 out.Severity = logging.INFO
Serge Bazanski50009e02021-07-07 14:35:27 +020085 case "warn":
Serge Bazanski3c5d0632024-09-12 10:49:12 +000086 out.Severity = logging.WARNING
Serge Bazanski50009e02021-07-07 14:35:27 +020087 case "error":
Serge Bazanski3c5d0632024-09-12 10:49:12 +000088 out.Severity = logging.ERROR
Serge Bazanski50009e02021-07-07 14:35:27 +020089 case "fatal", "panic", "dpanic":
Serge Bazanski3c5d0632024-09-12 10:49:12 +000090 out.Severity = logging.FATAL
Serge Bazanski50009e02021-07-07 14:35:27 +020091 }
92
93 // Sort extra keys alphabetically.
94 extraKeys := make([]string, 0, len(e.Extras))
Tim Windelschmidt6b6428d2024-04-11 01:35:41 +020095 for k := range e.Extras {
Serge Bazanski50009e02021-07-07 14:35:27 +020096 extraKeys = append(extraKeys, k)
97 }
98 sort.Strings(extraKeys)
99
100 // Convert structured extras into a human-friendly logline. We will
101 // comma-join the received message and any structured logging data after
102 // it.
103 parts := make([]string, 0, len(e.Extras)+1)
104 parts = append(parts, e.Message)
105 for _, k := range extraKeys {
106
107 // Format the value for logs. We elect to use JSON for representing
108 // each element, as:
109 // - this quotes strings
110 // - all the data we retrieved must already be representable in JSON,
111 // as we just decoded it from an existing blob.
112 // - the extra data might be arbitrarily nested (eg. an array or
113 // object) and we don't want to be in the business of coming up with
114 // our own serialization format in case of such data.
115 var v string
116 vbytes, err := json.Marshal(e.Extras[k])
117 if err != nil {
118 // Fall back to +%v just in case. We don't make any API promises
119 // that the log line will be machine parseable or in any stable
120 // format.
121 v = fmt.Sprintf("%+v", v)
122 } else {
123 v = string(vbytes)
124 }
125 extra := fmt.Sprintf("%s: %s", k, v)
126
127 parts = append(parts, extra)
128 }
129
130 // If the given message was empty and there are some extra data attached,
131 // explicitly state that the message was empty (to avoid a mysterious
132 // leading comma).
133 // Otherwise, if the message was empty and there was no extra structured
134 // data, assume that the sender intended to have it represented as an empty
135 // line.
136 if len(parts) > 1 && parts[0] == "" {
137 parts[0] = "<empty>"
138 }
139
140 // Finally build the message line to emit in leveled logging and emit it.
141 out.Message = strings.Join(parts, ", ")
142
143 write(&out)
144}