Serge Bazanski | 50009e0 | 2021-07-07 14:35:27 +0200 | [diff] [blame^] | 1 | package consensus |
| 2 | |
| 3 | import ( |
| 4 | "encoding/json" |
| 5 | "fmt" |
| 6 | "sort" |
| 7 | "strconv" |
| 8 | "strings" |
| 9 | "time" |
| 10 | |
| 11 | "source.monogon.dev/metropolis/pkg/logbuffer" |
| 12 | "source.monogon.dev/metropolis/pkg/logtree" |
| 13 | "source.monogon.dev/metropolis/pkg/logtree/unraw" |
| 14 | ) |
| 15 | |
| 16 | // etcdLogEntry is a JSON-encoded, structured log entry received from a running |
| 17 | // etcd server. The format comes from the logging library used there, |
| 18 | // github.com/uber-go/zap. |
| 19 | type etcdLogEntry struct { |
| 20 | Level string `json:"level"` |
| 21 | TS time.Time `json:"ts"` |
| 22 | Caller string `json:"caller"` |
| 23 | Message string `json:"msg"` |
| 24 | Extras map[string]interface{} `json:"-"` |
| 25 | } |
| 26 | |
| 27 | // parseEtcdLogEntry is a logtree/unraw compatible parser for etcd log lines. |
| 28 | // It is fairly liberal in what it will accept, falling back to writing a |
| 29 | // message that outlines the given log entry could not have been parsed. This |
| 30 | // ensures that no lines are lost, even if malformed. |
| 31 | func parseEtcdLogEntry(l *logbuffer.Line, write unraw.LeveledWriter) { |
| 32 | if l.Truncated() { |
| 33 | write(&logtree.ExternalLeveledPayload{ |
| 34 | Message: "Log line truncated: " + l.Data, |
| 35 | }) |
| 36 | return |
| 37 | } |
| 38 | |
| 39 | e := etcdLogEntry{} |
| 40 | // Parse constant fields |
| 41 | if err := json.Unmarshal([]byte(l.Data), &e); err != nil { |
| 42 | write(&logtree.ExternalLeveledPayload{ |
| 43 | Message: "Log line unparseable: " + l.Data, |
| 44 | }) |
| 45 | return |
| 46 | } |
| 47 | // Parse extra fields. |
| 48 | if err := json.Unmarshal([]byte(l.Data), &e.Extras); err != nil { |
| 49 | // Not exactly sure how this could ever happen - the previous parse |
| 50 | // went fine, so why wouldn't this one? But to be on the safe side, |
| 51 | // just don't attempt to parse this line any further. |
| 52 | write(&logtree.ExternalLeveledPayload{ |
| 53 | Message: "Log line unparseable: " + l.Data, |
| 54 | }) |
| 55 | return |
| 56 | } |
| 57 | delete(e.Extras, "level") |
| 58 | delete(e.Extras, "ts") |
| 59 | delete(e.Extras, "caller") |
| 60 | delete(e.Extras, "msg") |
| 61 | |
| 62 | out := logtree.ExternalLeveledPayload{ |
| 63 | Timestamp: e.TS, |
| 64 | } |
| 65 | |
| 66 | // Attempt to parse caller (eg. raft/raft.go:765) into file/line (eg. |
| 67 | // raft.go 765). |
| 68 | if len(e.Caller) > 0 { |
| 69 | parts := strings.Split(e.Caller, "/") |
| 70 | fileLine := parts[len(parts)-1] |
| 71 | parts = strings.Split(fileLine, ":") |
| 72 | if len(parts) == 2 { |
| 73 | out.File = parts[0] |
| 74 | if line, err := strconv.ParseInt(parts[1], 10, 32); err == nil { |
| 75 | out.Line = int(line) |
| 76 | } |
| 77 | } |
| 78 | } |
| 79 | |
| 80 | // Convert zap level into logtree severity. |
| 81 | switch e.Level { |
| 82 | case "info": |
| 83 | out.Severity = logtree.INFO |
| 84 | case "warn": |
| 85 | out.Severity = logtree.WARNING |
| 86 | case "error": |
| 87 | out.Severity = logtree.ERROR |
| 88 | case "fatal", "panic", "dpanic": |
| 89 | out.Severity = logtree.FATAL |
| 90 | } |
| 91 | |
| 92 | // Sort extra keys alphabetically. |
| 93 | extraKeys := make([]string, 0, len(e.Extras)) |
| 94 | for k, _ := range e.Extras { |
| 95 | extraKeys = append(extraKeys, k) |
| 96 | } |
| 97 | sort.Strings(extraKeys) |
| 98 | |
| 99 | // Convert structured extras into a human-friendly logline. We will |
| 100 | // comma-join the received message and any structured logging data after |
| 101 | // it. |
| 102 | parts := make([]string, 0, len(e.Extras)+1) |
| 103 | parts = append(parts, e.Message) |
| 104 | for _, k := range extraKeys { |
| 105 | |
| 106 | // Format the value for logs. We elect to use JSON for representing |
| 107 | // each element, as: |
| 108 | // - this quotes strings |
| 109 | // - all the data we retrieved must already be representable in JSON, |
| 110 | // as we just decoded it from an existing blob. |
| 111 | // - the extra data might be arbitrarily nested (eg. an array or |
| 112 | // object) and we don't want to be in the business of coming up with |
| 113 | // our own serialization format in case of such data. |
| 114 | var v string |
| 115 | vbytes, err := json.Marshal(e.Extras[k]) |
| 116 | if err != nil { |
| 117 | // Fall back to +%v just in case. We don't make any API promises |
| 118 | // that the log line will be machine parseable or in any stable |
| 119 | // format. |
| 120 | v = fmt.Sprintf("%+v", v) |
| 121 | } else { |
| 122 | v = string(vbytes) |
| 123 | } |
| 124 | extra := fmt.Sprintf("%s: %s", k, v) |
| 125 | |
| 126 | parts = append(parts, extra) |
| 127 | } |
| 128 | |
| 129 | // If the given message was empty and there are some extra data attached, |
| 130 | // explicitly state that the message was empty (to avoid a mysterious |
| 131 | // leading comma). |
| 132 | // Otherwise, if the message was empty and there was no extra structured |
| 133 | // data, assume that the sender intended to have it represented as an empty |
| 134 | // line. |
| 135 | if len(parts) > 1 && parts[0] == "" { |
| 136 | parts[0] = "<empty>" |
| 137 | } |
| 138 | |
| 139 | // Finally build the message line to emit in leveled logging and emit it. |
| 140 | out.Message = strings.Join(parts, ", ") |
| 141 | |
| 142 | write(&out) |
| 143 | } |