blob: bdf2982e6a39217a4625a8af7007c4e64f1e8b61 [file] [log] [blame]
Tim Windelschmidt6d33a432025-02-04 14:34:25 +01001// Copyright The Monogon Project Authors.
2// SPDX-License-Identifier: Apache-2.0
3
Serge Bazanski50009e02021-07-07 14:35:27 +02004package consensus
5
6import (
7 "encoding/json"
8 "fmt"
9 "sort"
10 "strconv"
11 "strings"
12 "time"
13
Serge Bazanski3c5d0632024-09-12 10:49:12 +000014 "source.monogon.dev/go/logging"
Tim Windelschmidt9f21f532024-05-07 15:14:20 +020015 "source.monogon.dev/osbase/logbuffer"
16 "source.monogon.dev/osbase/logtree"
17 "source.monogon.dev/osbase/logtree/unraw"
Serge Bazanski50009e02021-07-07 14:35:27 +020018)
19
20// etcdLogEntry is a JSON-encoded, structured log entry received from a running
21// etcd server. The format comes from the logging library used there,
22// github.com/uber-go/zap.
23type etcdLogEntry struct {
24 Level string `json:"level"`
25 TS time.Time `json:"ts"`
26 Caller string `json:"caller"`
27 Message string `json:"msg"`
28 Extras map[string]interface{} `json:"-"`
29}
30
31// parseEtcdLogEntry is a logtree/unraw compatible parser for etcd log lines.
32// It is fairly liberal in what it will accept, falling back to writing a
33// message that outlines the given log entry could not have been parsed. This
34// ensures that no lines are lost, even if malformed.
35func parseEtcdLogEntry(l *logbuffer.Line, write unraw.LeveledWriter) {
36 if l.Truncated() {
37 write(&logtree.ExternalLeveledPayload{
38 Message: "Log line truncated: " + l.Data,
39 })
40 return
41 }
42
Tim Windelschmidt3b5a9172024-05-23 13:33:52 +020043 var e etcdLogEntry
Serge Bazanski50009e02021-07-07 14:35:27 +020044 // Parse constant fields
45 if err := json.Unmarshal([]byte(l.Data), &e); err != nil {
46 write(&logtree.ExternalLeveledPayload{
47 Message: "Log line unparseable: " + l.Data,
48 })
49 return
50 }
51 // Parse extra fields.
52 if err := json.Unmarshal([]byte(l.Data), &e.Extras); err != nil {
53 // Not exactly sure how this could ever happen - the previous parse
54 // went fine, so why wouldn't this one? But to be on the safe side,
55 // just don't attempt to parse this line any further.
56 write(&logtree.ExternalLeveledPayload{
57 Message: "Log line unparseable: " + l.Data,
58 })
59 return
60 }
61 delete(e.Extras, "level")
62 delete(e.Extras, "ts")
63 delete(e.Extras, "caller")
64 delete(e.Extras, "msg")
65
66 out := logtree.ExternalLeveledPayload{
67 Timestamp: e.TS,
68 }
69
70 // Attempt to parse caller (eg. raft/raft.go:765) into file/line (eg.
71 // raft.go 765).
72 if len(e.Caller) > 0 {
73 parts := strings.Split(e.Caller, "/")
74 fileLine := parts[len(parts)-1]
75 parts = strings.Split(fileLine, ":")
76 if len(parts) == 2 {
77 out.File = parts[0]
78 if line, err := strconv.ParseInt(parts[1], 10, 32); err == nil {
79 out.Line = int(line)
80 }
81 }
82 }
83
84 // Convert zap level into logtree severity.
85 switch e.Level {
86 case "info":
Serge Bazanski3c5d0632024-09-12 10:49:12 +000087 out.Severity = logging.INFO
Serge Bazanski50009e02021-07-07 14:35:27 +020088 case "warn":
Serge Bazanski3c5d0632024-09-12 10:49:12 +000089 out.Severity = logging.WARNING
Serge Bazanski50009e02021-07-07 14:35:27 +020090 case "error":
Serge Bazanski3c5d0632024-09-12 10:49:12 +000091 out.Severity = logging.ERROR
Serge Bazanski50009e02021-07-07 14:35:27 +020092 case "fatal", "panic", "dpanic":
Serge Bazanski3c5d0632024-09-12 10:49:12 +000093 out.Severity = logging.FATAL
Serge Bazanski50009e02021-07-07 14:35:27 +020094 }
95
96 // Sort extra keys alphabetically.
97 extraKeys := make([]string, 0, len(e.Extras))
Tim Windelschmidt6b6428d2024-04-11 01:35:41 +020098 for k := range e.Extras {
Serge Bazanski50009e02021-07-07 14:35:27 +020099 extraKeys = append(extraKeys, k)
100 }
101 sort.Strings(extraKeys)
102
103 // Convert structured extras into a human-friendly logline. We will
104 // comma-join the received message and any structured logging data after
105 // it.
106 parts := make([]string, 0, len(e.Extras)+1)
107 parts = append(parts, e.Message)
108 for _, k := range extraKeys {
109
110 // Format the value for logs. We elect to use JSON for representing
111 // each element, as:
112 // - this quotes strings
113 // - all the data we retrieved must already be representable in JSON,
114 // as we just decoded it from an existing blob.
115 // - the extra data might be arbitrarily nested (eg. an array or
116 // object) and we don't want to be in the business of coming up with
117 // our own serialization format in case of such data.
118 var v string
119 vbytes, err := json.Marshal(e.Extras[k])
120 if err != nil {
121 // Fall back to +%v just in case. We don't make any API promises
122 // that the log line will be machine parseable or in any stable
123 // format.
124 v = fmt.Sprintf("%+v", v)
125 } else {
126 v = string(vbytes)
127 }
128 extra := fmt.Sprintf("%s: %s", k, v)
129
130 parts = append(parts, extra)
131 }
132
133 // If the given message was empty and there are some extra data attached,
134 // explicitly state that the message was empty (to avoid a mysterious
135 // leading comma).
136 // Otherwise, if the message was empty and there was no extra structured
137 // data, assume that the sender intended to have it represented as an empty
138 // line.
139 if len(parts) > 1 && parts[0] == "" {
140 parts[0] = "<empty>"
141 }
142
143 // Finally build the message line to emit in leveled logging and emit it.
144 out.Message = strings.Join(parts, ", ")
145
146 write(&out)
147}