logs refactoring

This commit is contained in:
Gani Georgiev
2023-11-26 13:33:17 +02:00
parent ff5535f4de
commit 821aae4a62
109 changed files with 7320 additions and 3728 deletions
+264
View File
@@ -0,0 +1,264 @@
package logger
import (
"context"
"log/slog"
"sync"
"github.com/pocketbase/pocketbase/tools/types"
)
var _ slog.Handler = (*BatchHandler)(nil)
// BatchOptions are options for the BatchHandler.
type BatchOptions struct {
// WriteFunc processes the batched logs.
WriteFunc func(ctx context.Context, logs []*Log) error
// BeforeAddFunc is optional function that is invoked every time
// before a new log is added to the batch queue.
//
// Return false to skip adding the log into the batch queue.
BeforeAddFunc func(ctx context.Context, log *Log) bool
// Level reports the minimum level to log.
// Levels with lower levels are discarded.
// If nil, the Handler uses [slog.LevelInfo].
Level slog.Leveler
// BatchSize specifies how many logs to accumulate before calling WriteFunc.
// If not set or 0, fallback to 100 by default.
BatchSize int
}
// NewBatchHandler creates a slog compatible handler that writes JSON
// logs on batches (default to 100), using the given options.
//
// Panics if [BatchOptions.WriteFunc] is not defined.
//
// Example:
//
// l := slog.New(logger.NewBatchHandler(logger.BatchOptions{
// WriteFunc: func(ctx context.Context, logs []*Log) error {
// for _, l := range logs {
// fmt.Println(l.Level, l.Message, l.Data)
// }
// return nil
// }
// }))
// l.Info("Example message", "title", "lorem ipsum")
func NewBatchHandler(options BatchOptions) *BatchHandler {
h := &BatchHandler{
mux: &sync.Mutex{},
options: &options,
}
if h.options.WriteFunc == nil {
panic("options.WriteFunc must be set")
}
if h.options.Level == nil {
h.options.Level = slog.LevelInfo
}
if h.options.BatchSize == 0 {
h.options.BatchSize = 100
}
h.logs = make([]*Log, 0, h.options.BatchSize)
return h
}
// BatchHandler is a slog handler that writes records on batches.
//
// The log records attributes are formatted in JSON.
//
// Requires the [BatchOptions.WriteFunc] option to be defined.
type BatchHandler struct {
mux *sync.Mutex
parent *BatchHandler
options *BatchOptions
group string
attrs []slog.Attr
logs []*Log
}
// Enabled reports whether the handler handles records at the given level.
//
// The handler ignores records whose level is lower.
func (h *BatchHandler) Enabled(ctx context.Context, level slog.Level) bool {
return level >= h.options.Level.Level()
}
// WithGroup returns a new BatchHandler that starts a group.
//
// All logger attributes will be resolved under the specified group name.
func (h *BatchHandler) WithGroup(name string) slog.Handler {
if name == "" {
return h
}
return &BatchHandler{
parent: h,
mux: h.mux,
options: h.options,
group: name,
}
}
// WithAttrs returns a new BatchHandler loaded with the specified attributes.
func (h *BatchHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
if len(attrs) == 0 {
return h
}
return &BatchHandler{
parent: h,
mux: h.mux,
options: h.options,
attrs: attrs,
}
}
// Handle formats the slog.Record argument as JSON object and adds it
// to the batch queue.
//
// If the batch queue threshold has been reached, the WriteFunc option
// is invoked with the accumulated logs which in turn will reset the batch queue.
func (h *BatchHandler) Handle(ctx context.Context, r slog.Record) error {
if h.group != "" {
h.mux.Lock()
attrs := make([]any, 0, len(h.attrs)+r.NumAttrs())
for _, a := range h.attrs {
attrs = append(attrs, a)
}
h.mux.Unlock()
r.Attrs(func(a slog.Attr) bool {
attrs = append(attrs, a)
return true
})
r = slog.NewRecord(r.Time, r.Level, r.Message, r.PC)
r.AddAttrs(slog.Group(h.group, attrs...))
} else if len(h.attrs) > 0 {
r = r.Clone()
h.mux.Lock()
r.AddAttrs(h.attrs...)
h.mux.Unlock()
}
if h.parent != nil {
return h.parent.Handle(ctx, r)
}
data := make(map[string]any, r.NumAttrs())
r.Attrs(func(a slog.Attr) bool {
if err := h.resolveAttr(data, a); err != nil {
return false
}
return true
})
log := &Log{
Time: r.Time,
Level: r.Level,
Message: r.Message,
Data: types.JsonMap(data),
}
if h.options.BeforeAddFunc != nil && !h.options.BeforeAddFunc(ctx, log) {
return nil
}
h.mux.Lock()
h.logs = append(h.logs, log)
totalLogs := len(h.logs)
h.mux.Unlock()
if totalLogs >= h.options.BatchSize {
if err := h.WriteAll(ctx); err != nil {
return err
}
}
return nil
}
// SetLevel updates the handler options level to the specified one.
func (h *BatchHandler) SetLevel(level slog.Level) {
h.mux.Lock()
h.options.Level = level
h.mux.Unlock()
}
// WriteAll writes all accumulated Log entries and resets the batch queue.
func (h *BatchHandler) WriteAll(ctx context.Context) error {
if h.parent != nil {
// invoke recursively the parent level handler since the most
// top level one is holding the logs queue.
return h.parent.WriteAll(ctx)
}
h.mux.Lock()
totalLogs := len(h.logs)
// no logs to write
if totalLogs == 0 {
h.mux.Unlock()
return nil
}
// create a copy of the logs slice to prevent blocking during write
logs := make([]*Log, totalLogs)
copy(logs, h.logs)
h.logs = h.logs[:0] // reset
h.mux.Unlock()
return h.options.WriteFunc(ctx, logs)
}
// resolveAttr writes attr into data.
func (h *BatchHandler) resolveAttr(data map[string]any, attr slog.Attr) error {
// ensure that the attr value is resolved before doing anything else
attr.Value = attr.Value.Resolve()
if attr.Equal(slog.Attr{}) {
return nil // ignore empty attrs
}
switch attr.Value.Kind() {
case slog.KindGroup:
attrs := attr.Value.Group()
if len(attrs) == 0 {
return nil // ignore empty groups
}
// create a submap to wrap the resolved group attributes
groupData := make(map[string]any, len(attrs))
for _, subAttr := range attrs {
h.resolveAttr(groupData, subAttr)
}
if len(groupData) > 0 {
data[attr.Key] = groupData
}
default:
v := attr.Value.Any()
if err, ok := v.(error); ok {
data[attr.Key] = err.Error()
} else {
data[attr.Key] = v
}
}
return nil
}
+324
View File
@@ -0,0 +1,324 @@
package logger
import (
"context"
"errors"
"fmt"
"log/slog"
"testing"
"time"
)
func TestNewBatchHandlerPanic(t *testing.T) {
defer func() {
if r := recover(); r == nil {
t.Errorf("Expected to panic.")
}
}()
NewBatchHandler(BatchOptions{})
}
func TestNewBatchHandlerDefaults(t *testing.T) {
h := NewBatchHandler(BatchOptions{
WriteFunc: func(ctx context.Context, logs []*Log) error {
return nil
},
})
if h.options.BatchSize != 100 {
t.Fatalf("Expected default BatchSize %d, got %d", 100, h.options.BatchSize)
}
if h.options.Level != slog.LevelInfo {
t.Fatalf("Expected default Level Info, got %v", h.options.Level)
}
if h.options.BeforeAddFunc != nil {
t.Fatal("Expected default BeforeAddFunc to be nil")
}
if h.options.WriteFunc == nil {
t.Fatal("Expected default WriteFunc to be set")
}
if h.group != "" {
t.Fatalf("Expected empty group, got %s", h.group)
}
if len(h.attrs) != 0 {
t.Fatalf("Expected empty attrs, got %v", h.attrs)
}
if len(h.logs) != 0 {
t.Fatalf("Expected empty logs queue, got %v", h.logs)
}
}
func TestBatchHandlerEnabled(t *testing.T) {
h := NewBatchHandler(BatchOptions{
Level: slog.LevelWarn,
WriteFunc: func(ctx context.Context, logs []*Log) error {
return nil
},
})
l := slog.New(h)
scenarios := []struct {
level slog.Level
expected bool
}{
{slog.LevelDebug, false},
{slog.LevelInfo, false},
{slog.LevelWarn, true},
{slog.LevelError, true},
}
for _, s := range scenarios {
t.Run(fmt.Sprintf("Level %v", s.level), func(t *testing.T) {
result := l.Enabled(context.Background(), s.level)
if result != s.expected {
t.Fatalf("Expected %v, got %v", s.expected, result)
}
})
}
}
func TestBatchHandlerSetLevel(t *testing.T) {
h := NewBatchHandler(BatchOptions{
Level: slog.LevelWarn,
WriteFunc: func(ctx context.Context, logs []*Log) error {
return nil
},
})
if h.options.Level != slog.LevelWarn {
t.Fatalf("Expected the initial level to be %d, got %d", slog.LevelWarn, h.options.Level)
}
h.SetLevel(slog.LevelDebug)
if h.options.Level != slog.LevelDebug {
t.Fatalf("Expected the updated level to be %d, got %d", slog.LevelDebug, h.options.Level)
}
}
func TestBatchHandlerWithAttrsAndWithGroup(t *testing.T) {
h0 := NewBatchHandler(BatchOptions{
WriteFunc: func(ctx context.Context, logs []*Log) error {
return nil
},
})
h1 := h0.WithAttrs([]slog.Attr{slog.Int("test1", 1)}).(*BatchHandler)
h2 := h1.WithGroup("h2_group").(*BatchHandler)
h3 := h2.WithAttrs([]slog.Attr{slog.Int("test2", 2)}).(*BatchHandler)
scenarios := []struct {
name string
handler *BatchHandler
expectedParent *BatchHandler
expectedGroup string
expectedAttrs int
}{
{
"h0",
h0,
nil,
"",
0,
},
{
"h1",
h1,
h0,
"",
1,
},
{
"h2",
h2,
h1,
"h2_group",
0,
},
{
"h3",
h3,
h2,
"",
1,
},
}
for _, s := range scenarios {
t.Run(s.name, func(t *testing.T) {
if s.handler.group != s.expectedGroup {
t.Fatalf("Expected group %q, got %q", s.expectedGroup, s.handler.group)
}
if s.handler.parent != s.expectedParent {
t.Fatalf("Expected parent %v, got %v", s.expectedParent, s.handler.parent)
}
if totalAttrs := len(s.handler.attrs); totalAttrs != s.expectedAttrs {
t.Fatalf("Expected %d attrs, got %d", s.expectedAttrs, totalAttrs)
}
})
}
}
func TestBatchHandlerHandle(t *testing.T) {
ctx := context.Background()
beforeLogs := []*Log{}
writeLogs := []*Log{}
h := NewBatchHandler(BatchOptions{
BatchSize: 3,
BeforeAddFunc: func(_ context.Context, log *Log) bool {
beforeLogs = append(beforeLogs, log)
if log.Message == "test2" {
return false // skip test2 log
}
return true
},
WriteFunc: func(_ context.Context, logs []*Log) error {
writeLogs = logs
return nil
},
})
h.Handle(ctx, slog.NewRecord(time.Now(), slog.LevelInfo, "test1", 0))
h.Handle(ctx, slog.NewRecord(time.Now(), slog.LevelInfo, "test2", 0))
h.Handle(ctx, slog.NewRecord(time.Now(), slog.LevelInfo, "test3", 0))
// no batch write
{
checkLogMessages([]string{"test1", "test2", "test3"}, beforeLogs, t)
checkLogMessages([]string{"test1", "test3"}, h.logs, t)
// should be empty because no batch write has happened yet
if totalWriteLogs := len(writeLogs); totalWriteLogs != 0 {
t.Fatalf("Expected %d writeLogs, got %d", 0, totalWriteLogs)
}
}
// add one more log to trigger the batch write
{
h.Handle(ctx, slog.NewRecord(time.Now(), slog.LevelInfo, "test4", 0))
// should be empty after the batch write
checkLogMessages([]string{}, h.logs, t)
checkLogMessages([]string{"test1", "test3", "test4"}, writeLogs, t)
}
}
func TestBatchHandlerWriteAll(t *testing.T) {
ctx := context.Background()
beforeLogs := []*Log{}
writeLogs := []*Log{}
h := NewBatchHandler(BatchOptions{
BatchSize: 3,
BeforeAddFunc: func(_ context.Context, log *Log) bool {
beforeLogs = append(beforeLogs, log)
return true
},
WriteFunc: func(_ context.Context, logs []*Log) error {
writeLogs = logs
return nil
},
})
h.Handle(ctx, slog.NewRecord(time.Now(), slog.LevelInfo, "test1", 0))
h.Handle(ctx, slog.NewRecord(time.Now(), slog.LevelInfo, "test2", 0))
checkLogMessages([]string{"test1", "test2"}, beforeLogs, t)
checkLogMessages([]string{"test1", "test2"}, h.logs, t)
checkLogMessages([]string{}, writeLogs, t) // empty because the batch size hasn't been reached
// force trigger the batch write
h.WriteAll(ctx)
checkLogMessages([]string{"test1", "test2"}, beforeLogs, t)
checkLogMessages([]string{}, h.logs, t) // reset
checkLogMessages([]string{"test1", "test2"}, writeLogs, t)
}
func TestBatchHandlerAttrsFormat(t *testing.T) {
ctx := context.Background()
beforeLogs := []*Log{}
h0 := NewBatchHandler(BatchOptions{
BeforeAddFunc: func(_ context.Context, log *Log) bool {
beforeLogs = append(beforeLogs, log)
return true
},
WriteFunc: func(_ context.Context, logs []*Log) error {
return nil
},
})
h1 := h0.WithAttrs([]slog.Attr{slog.Int("a", 1), slog.String("b", "123")})
h2 := h1.WithGroup("sub").WithAttrs([]slog.Attr{
slog.Int("c", 3),
slog.Any("d", map[string]any{"d.1": 1}),
slog.Any("e", errors.New("example error")),
})
record := slog.NewRecord(time.Now(), slog.LevelInfo, "hello", 0)
record.AddAttrs(slog.String("name", "test"))
h0.Handle(ctx, record)
h1.Handle(ctx, record)
h2.Handle(ctx, record)
expected := []string{
`{"name":"test"}`,
`{"a":1,"b":"123","name":"test"}`,
`{"a":1,"b":"123","sub":{"c":3,"d":{"d.1":1},"e":"example error","name":"test"}}`,
}
if len(beforeLogs) != len(expected) {
t.Fatalf("Expected %d logs, got %d", len(beforeLogs), len(expected))
}
for i, data := range expected {
t.Run(fmt.Sprintf("log handler %d", i), func(t *testing.T) {
log := beforeLogs[i]
raw, _ := log.Data.MarshalJSON()
if string(raw) != data {
t.Fatalf("Expected \n%s \ngot \n%s", data, raw)
}
})
}
}
func checkLogMessages(expected []string, logs []*Log, t *testing.T) {
if len(logs) != len(expected) {
t.Fatalf("Expected %d batched logs, got %d (expected: %v)", len(expected), len(logs), expected)
}
for _, message := range expected {
exists := false
for _, l := range logs {
if l.Message == message {
exists = true
continue
}
}
if !exists {
t.Fatalf("Missing %q log message", message)
}
}
}
+17
View File
@@ -0,0 +1,17 @@
package logger
import (
"log/slog"
"time"
"github.com/pocketbase/pocketbase/tools/types"
)
// Log is similar to [slog.Record] bit contains the log attributes as
// preformatted JSON map.
type Log struct {
Time time.Time
Message string
Level slog.Level
Data types.JsonMap
}
+3 -4
View File
@@ -1,7 +1,7 @@
package osutils
import (
"log"
"errors"
"os"
"path/filepath"
@@ -65,9 +65,8 @@ func MoveDirContent(src string, dest string, rootExclude ...string) error {
if err := os.Rename(old, new); err != nil {
if errs := tryRollback(); len(errs) > 0 {
// currently just log the rollback errors
// in the future we may require go 1.20+ to use errors.Join()
log.Println(errs)
errs = append(errs, err)
err = errors.Join(errs...)
}
return err
+2 -2
View File
@@ -22,8 +22,8 @@ func FireAndForget(f func(), wg ...*sync.WaitGroup) {
defer func() {
if err := recover(); err != nil {
log.Printf("RECOVERED FROM PANIC: %v", err)
log.Printf("%s\n", string(debug.Stack()))
log.Printf("RECOVERED FROM PANIC (safe to ignore): %v", err)
log.Println(string(debug.Stack()))
}
}()
+20 -20
View File
@@ -13,7 +13,7 @@ import (
)
func TestFilterDataBuildExpr(t *testing.T) {
resolver := search.NewSimpleFieldResolver("test1", "test2", "test3", `^test4.\w+$`)
resolver := search.NewSimpleFieldResolver("test1", "test2", "test3", `^test4_\w+$`)
scenarios := []struct {
name string
@@ -96,35 +96,35 @@ func TestFilterDataBuildExpr(t *testing.T) {
{
"macros",
`
test4.1 > @now &&
test4.2 > @second &&
test4.3 > @minute &&
test4.4 > @hour &&
test4.5 > @day &&
test4.6 > @year &&
test4.7 > @month &&
test4.9 > @weekday &&
test4.9 > @todayStart &&
test4.10 > @todayEnd &&
test4.11 > @monthStart &&
test4.12 > @monthEnd &&
test4.13 > @yearStart &&
test4.14 > @yearEnd
test4_1 > @now &&
test4_2 > @second &&
test4_3 > @minute &&
test4_4 > @hour &&
test4_5 > @day &&
test4_6 > @year &&
test4_7 > @month &&
test4_9 > @weekday &&
test4_9 > @todayStart &&
test4_10 > @todayEnd &&
test4_11 > @monthStart &&
test4_12 > @monthEnd &&
test4_13 > @yearStart &&
test4_14 > @yearEnd
`,
false,
"([[test4.1]] > {:TEST} AND [[test4.2]] > {:TEST} AND [[test4.3]] > {:TEST} AND [[test4.4]] > {:TEST} AND [[test4.5]] > {:TEST} AND [[test4.6]] > {:TEST} AND [[test4.7]] > {:TEST} AND [[test4.9]] > {:TEST} AND [[test4.9]] > {:TEST} AND [[test4.10]] > {:TEST} AND [[test4.11]] > {:TEST} AND [[test4.12]] > {:TEST} AND [[test4.13]] > {:TEST} AND [[test4.14]] > {:TEST})",
"([[test4_1]] > {:TEST} AND [[test4_2]] > {:TEST} AND [[test4_3]] > {:TEST} AND [[test4_4]] > {:TEST} AND [[test4_5]] > {:TEST} AND [[test4_6]] > {:TEST} AND [[test4_7]] > {:TEST} AND [[test4_9]] > {:TEST} AND [[test4_9]] > {:TEST} AND [[test4_10]] > {:TEST} AND [[test4_11]] > {:TEST} AND [[test4_12]] > {:TEST} AND [[test4_13]] > {:TEST} AND [[test4_14]] > {:TEST})",
},
{
"complex expression",
"((test1 > 1) || (test2 != 2)) && test3 ~ '%%example' && test4.sub = null",
"((test1 > 1) || (test2 != 2)) && test3 ~ '%%example' && test4_sub = null",
false,
"(([[test1]] > {:TEST} OR [[test2]] != {:TEST}) AND [[test3]] LIKE {:TEST} ESCAPE '\\' AND ([[test4.sub]] = '' OR [[test4.sub]] IS NULL))",
"(([[test1]] > {:TEST} OR [[test2]] != {:TEST}) AND [[test3]] LIKE {:TEST} ESCAPE '\\' AND ([[test4_sub]] = '' OR [[test4_sub]] IS NULL))",
},
{
"combination of special literals (null, true, false)",
"test1=true && test2 != false && null = test3 || null != test4.sub",
"test1=true && test2 != false && null = test3 || null != test4_sub",
false,
"([[test1]] = 1 AND [[test2]] != 0 AND ('' = [[test3]] OR [[test3]] IS NULL) OR ('' != [[test4.sub]] AND [[test4.sub]] IS NOT NULL))",
"([[test1]] = 1 AND [[test2]] != 0 AND ('' = [[test3]] OR [[test3]] IS NULL) OR ('' != [[test4_sub]] AND [[test4_sub]] IS NOT NULL))",
},
{
"all operators",
+30 -1
View File
@@ -2,6 +2,8 @@ package search
import (
"fmt"
"strconv"
"strings"
"github.com/pocketbase/dbx"
"github.com/pocketbase/pocketbase/tools/inflector"
@@ -73,7 +75,34 @@ func (r *SimpleFieldResolver) Resolve(field string) (*ResolverResult, error) {
return nil, fmt.Errorf("Failed to resolve field %q.", field)
}
parts := strings.Split(field, ".")
// single regular field
if len(parts) == 1 {
return &ResolverResult{
Identifier: "[[" + inflector.Columnify(parts[0]) + "]]",
}, nil
}
// treat as json path
var jsonPath strings.Builder
jsonPath.WriteString("$")
for _, part := range parts[1:] {
if _, err := strconv.Atoi(part); err == nil {
jsonPath.WriteString("[")
jsonPath.WriteString(inflector.Columnify(part))
jsonPath.WriteString("]")
} else {
jsonPath.WriteString(".")
jsonPath.WriteString(inflector.Columnify(part))
}
}
return &ResolverResult{
Identifier: "[[" + inflector.Columnify(field) + "]]",
Identifier: fmt.Sprintf(
"JSON_EXTRACT([[%s]], '%s')",
inflector.Columnify(parts[0]),
jsonPath.String(),
),
}, nil
}
+2 -1
View File
@@ -43,7 +43,7 @@ func TestSimpleFieldResolverUpdateQuery(t *testing.T) {
}
func TestSimpleFieldResolverResolve(t *testing.T) {
r := search.NewSimpleFieldResolver("test", `^test_regex\d+$`, "Test columnify!")
r := search.NewSimpleFieldResolver("test", `^test_regex\d+$`, "Test columnify!", "data.test")
scenarios := []struct {
fieldName string
@@ -58,6 +58,7 @@ func TestSimpleFieldResolverResolve(t *testing.T) {
{"test_regex", true, ""},
{"test_regex1", false, "[[test_regex1]]"},
{"Test columnify!", false, "[[Testcolumnify]]"},
{"data.test", false, "JSON_EXTRACT([[data]], '$.test')"},
}
for i, s := range scenarios {
+1 -1
View File
@@ -4,8 +4,8 @@ import "sync"
// Store defines a concurrent safe in memory key-value data store.
type Store[T any] struct {
mux sync.RWMutex
data map[string]T
mux sync.RWMutex
}
// New creates a new Store[T] instance with a shallow copy of the provided data (if any).