codeword/vendor/penahub.gitlab.yandexcloud.net/external/trashlog.git/wrappers/zaptrashlog/zaptrashlog.go
2024-08-27 22:09:10 +03:00

418 lines
9.0 KiB
Go

package zaptrashlog
import (
"context"
"fmt"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"google.golang.org/grpc"
"google.golang.org/grpc/backoff"
"google.golang.org/grpc/credentials/insecure"
"penahub.gitlab.yandexcloud.net/external/trashlog.git/dal/bbolt"
"penahub.gitlab.yandexcloud.net/external/trashlog.git/model"
pb "penahub.gitlab.yandexcloud.net/external/trashlog.git/proto/generated"
"penahub.gitlab.yandexcloud.net/external/trashlog.git/sink"
"strings"
"sync"
"time"
)
type levelEnabler struct {
minLevel zapcore.Level
}
type TrashLogCore struct {
levelEnabler
coreFields map[string]interface{}
ctx context.Context
stream pb.Trashlog_ValveClient
svcData *pb.SvcData
ctxFields map[string]*pb.Value
keyFields map[string]*pb.Value
recoverStore *bbolt.DAL
url string
sendMutex sync.Mutex
failCh chan bool
emitCh chan entryWithFields
}
type entryWithFields struct {
entry zapcore.Entry
fields []zapcore.Field
}
const dbPath = "recover.bolt"
func NewCore(
ctx context.Context,
minLevel zapcore.Level,
url, version, commit string,
buildTime int64,
) (*TrashLogCore, error) {
recoverStore, err := bbolt.New(version + dbPath)
if err != nil {
return nil, err
}
tlc := TrashLogCore{
levelEnabler: levelEnabler{
minLevel: minLevel,
},
coreFields: make(map[string]interface{}),
ctx: ctx,
recoverStore: recoverStore,
svcData: &pb.SvcData{
BuildTime: uint64(buildTime),
Version: version,
Commit: commit,
},
url: url,
ctxFields: make(map[string]*pb.Value),
keyFields: make(map[string]*pb.Value),
sendMutex: sync.Mutex{},
failCh: make(chan bool),
emitCh: make(chan entryWithFields, 100),
}
if err := tlc.connectToTrashlog(url); err != nil {
fmt.Println("noconnect", err)
return &tlc, err
}
worker := NewWorker(&tlc)
go worker.EmitWorker()
go worker.SendWorker()
go worker.Reconnect()
return &tlc, nil
}
func (c *TrashLogCore) Close() error {
fmt.Println("Close")
var errSender, errRec error
if c.stream != nil {
errSender = c.stream.CloseSend()
}
errRec = c.recoverStore.Close()
if errRec != nil {
return errRec
}
if errSender != nil {
return errSender
}
return nil
}
func (c *TrashLogCore) connectToTrashlog(url string) error {
var opts []grpc.DialOption
opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithConnectParams(grpc.ConnectParams{
Backoff: backoff.Config{
BaseDelay: 1.0 * time.Second,
Multiplier: 1.6,
Jitter: 0.2,
MaxDelay: 120 * time.Hour,
},
}))
fmt.Println("connectToTrashlog")
conn, err := grpc.Dial(url, opts...)
if err != nil {
return err
}
fmt.Println("Dial")
client := pb.NewTrashlogClient(conn)
fmt.Println("NewTrashlogClient")
var stream pb.Trashlog_ValveClient
stream, err = client.Valve(c.ctx, grpc.WaitForReady(true))
fmt.Println("WaitForReady", err)
if err != nil {
return err
}
c.stream = stream
return nil
}
func (c *TrashLogCore) WrapLogger(logger *zap.Logger) *zap.Logger {
return logger.WithOptions(
zap.WrapCore(func(core zapcore.Core) zapcore.Core {
return zapcore.NewTee(core, c)
}),
zap.AddCallerSkip(1),
)
}
func (le *levelEnabler) Enabled(l zapcore.Level) bool {
return l >= le.minLevel
}
func copyFieldsMap(src map[string]*pb.Value) map[string]*pb.Value {
dst := make(map[string]*pb.Value, len(src))
for k, v := range src {
dst[k] = v
}
return dst
}
func copyCoreMap(src map[string]any) map[string]any {
dst := make(map[string]any, len(src))
for k, v := range src {
dst[k] = v
}
return dst
}
func (c *TrashLogCore) With(fields []zapcore.Field) zapcore.Core {
fieldMap := fieldsToMap(fields)
destCoreFields := copyCoreMap(c.coreFields)
destKeyFields := copyFieldsMap(c.keyFields)
destCtxFields := copyFieldsMap(c.ctxFields)
for k, v := range fieldMap {
destCoreFields[k] = v
switch {
case strings.HasPrefix(k, "Ctx"):
destCtxFields[strings.TrimPrefix(k, "Ctx")] = convertValue(v)
case strings.HasPrefix(k, "Key"):
destKeyFields[strings.TrimPrefix(k, "Key")] = convertValue(v)
default:
destKeyFields[k] = convertValue(v)
}
}
return &TrashLogCore{
coreFields: destCoreFields,
ctxFields: destCtxFields,
keyFields: destKeyFields,
svcData: c.svcData,
ctx: c.ctx,
stream: c.stream,
recoverStore: c.recoverStore,
url: c.url,
sendMutex: c.sendMutex,
}
}
func convertValue(v interface{}) *pb.Value {
switch t := v.(type) {
case string:
return &pb.Value{Value: &pb.Value_Str{Str: t}}
case int64:
return &pb.Value{Value: &pb.Value_Num{Num: t}}
case float64:
return &pb.Value{Value: &pb.Value_Double{Double: float32(t)}}
case bool:
return &pb.Value{Value: &pb.Value_Flag{Flag: t}}
}
return nil
}
func (c *TrashLogCore) Check(
entry zapcore.Entry,
checkedEntry *zapcore.CheckedEntry,
) *zapcore.CheckedEntry {
if c.levelEnabler.Enabled(entry.Level) {
return checkedEntry.AddCore(entry, c)
}
return checkedEntry
}
func (c *TrashLogCore) Write(
entry zapcore.Entry,
fields []zapcore.Field,
) error {
c.emitCh <- entryWithFields{
entry: entry,
fields: fields,
}
return nil
}
func (c *TrashLogCore) Sync() error {
return nil
}
func fieldsToMap(fields []zapcore.Field) map[string]interface{} {
enc := zapcore.NewMapObjectEncoder()
for _, f := range fields {
f.AddTo(enc)
}
m := make(map[string]interface{})
for k, v := range enc.Fields {
m[k] = v
}
return m
}
type Worker struct {
core *TrashLogCore
recordCh chan pb.Record
}
func NewWorker(core *TrashLogCore) *Worker {
return &Worker{
core: core,
}
}
func (w *Worker) EmitWorker() {
for {
select {
case <-w.core.ctx.Done():
return
case emit := <-w.core.emitCh:
w.saveFields(emit.entry, emit.fields)
}
}
}
func (w *Worker) SendWorker() {
ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop()
for {
select {
case <-ticker.C:
w.fetchBolt()
case <-w.core.ctx.Done():
return
}
}
}
func (w *Worker) saveFields(entry zapcore.Entry, fields []zapcore.Field) {
record := w.prepareRecord(entry, fields)
err := w.core.recoverStore.PutRecord(sink.Dto2daoRecord(&record))
if err != nil {
fmt.Println("ошибка сохранения записи в болт", err)
return
}
}
func (w *Worker) fetchBolt() {
records, err := w.core.recoverStore.GetAllRecords()
if err != nil {
fmt.Println("ошибка при получении всех записей из болта", err)
return
}
for key, record := range records {
if w.core.stream != nil {
rec := w.convertToProto(record)
err = w.core.stream.Send(rec)
if err == nil {
err = w.core.recoverStore.DeleteRecordByKey([]byte(key))
if err != nil {
fmt.Println("ошибка при удалении записи из хранилища по ключу", err)
continue
}
} else {
fmt.Println("ошибка отправки записи в trashlog", err)
w.core.failCh <- true
return
}
}
}
}
func (w *Worker) prepareRecord(entry zapcore.Entry, fields []zapcore.Field) pb.Record {
fieldMap := fieldsToMap(fields)
splittedMessage := strings.Split(entry.Message, "!")
var msg string
if len(splittedMessage) == 2 {
msg = splittedMessage[1]
} else {
msg = entry.Message
}
keyFields, ctxFields := make(map[string]*pb.Value), make(map[string]*pb.Value)
for k, v := range w.core.keyFields {
keyFields[k] = v
}
for k, v := range w.core.ctxFields {
ctxFields[k] = v
}
for k, v := range fieldMap {
switch {
case strings.HasPrefix(k, "Ctx"):
ctxFields[strings.ToLower(k)] = convertValue(v)
case strings.HasPrefix(k, "Key"):
keyFields[strings.ToLower(k)] = convertValue(v)
default:
ctxFields[fmt.Sprintf("ctx%s", strings.ToLower(k))] = convertValue(v)
}
}
return pb.Record{
Level: entry.Level.String(),
TS: uint64(entry.Time.Unix()),
Message: msg,
Module: strings.Split(entry.LoggerName, "."),
Stacktrace: entry.Stack,
KeyFields: keyFields,
CtxFields: ctxFields,
SvcFields: &pb.SvcData{
BuildTime: w.core.svcData.BuildTime,
Version: w.core.svcData.Version,
Commit: w.core.svcData.Commit,
File: entry.Caller.File,
Line: uint64(entry.Caller.Line),
},
}
}
func (w *Worker) convertToProto(record model.Record) *pb.Record {
keyFields, ctxFields := make(map[string]*pb.Value), make(map[string]*pb.Value)
for k, v := range record.KeyFields {
keyFields[k] = convertValue(v)
}
for k, v := range record.CtxFields {
ctxFields[k] = convertValue(v)
}
return &pb.Record{
Level: record.Level,
TS: record.TS,
Message: record.Message,
Module: record.Module,
Stacktrace: record.Stacktrace,
KeyFields: keyFields,
CtxFields: ctxFields,
SvcFields: &pb.SvcData{
BuildTime: record.SvcBuildTime,
Version: record.SvcVersion,
Commit: record.SvcCommit,
File: record.SvcFile,
Line: record.SvcLine,
},
}
}
func (w *Worker) Reconnect() {
select {
case <-w.core.ctx.Done():
return
case <-w.core.failCh:
if err := w.core.connectToTrashlog(w.core.url); err == nil {
fmt.Println("Reconnect successful")
}
}
}