diff options
author | uzhas <uzhas@ydb.tech> | 2023-08-24 17:35:21 +0300 |
---|---|---|
committer | uzhas <uzhas@ydb.tech> | 2023-08-24 17:53:39 +0300 |
commit | de6e39881d059d67cbcc978d076d9e3e5e9732fc (patch) | |
tree | cdf4e77c0156fe7f192d644883954f302fc56c01 | |
parent | b890c9f4f00efbc099a862b70c1dbc4c7db3dd2f (diff) | |
download | ydb-de6e39881d059d67cbcc978d076d9e3e5e9732fc.tar.gz |
move yql connector to ydb
move code
149 files changed, 15062 insertions, 4 deletions
diff --git a/library/go/core/log/compat/golog/log.go b/library/go/core/log/compat/golog/log.go new file mode 100644 index 0000000000..4ebe864234 --- /dev/null +++ b/library/go/core/log/compat/golog/log.go @@ -0,0 +1,21 @@ +package golog + +import ( + canal_log "github.com/siddontang/go-log/log" + "github.com/ydb-platform/ydb/library/go/core/log" +) + +func SetLevel(level log.Level) { + switch level { + case log.DebugLevel: + canal_log.SetLevel(canal_log.LevelDebug) + case log.ErrorLevel: + canal_log.SetLevel(canal_log.LevelError) + case log.FatalLevel: + canal_log.SetLevel(canal_log.LevelFatal) + case log.InfoLevel: + canal_log.SetLevel(canal_log.LevelInfo) + case log.TraceLevel: + canal_log.SetLevel(canal_log.LevelTrace) + } +} diff --git a/library/go/core/log/compat/golog/ya.make b/library/go/core/log/compat/golog/ya.make new file mode 100644 index 0000000000..19240f7378 --- /dev/null +++ b/library/go/core/log/compat/golog/ya.make @@ -0,0 +1,5 @@ +GO_LIBRARY() + +SRCS(log.go) + +END() diff --git a/library/go/core/log/compat/logrus/log.go b/library/go/core/log/compat/logrus/log.go new file mode 100644 index 0000000000..a92e52dda0 --- /dev/null +++ b/library/go/core/log/compat/logrus/log.go @@ -0,0 +1,202 @@ +package logrus + +import ( + "io" + "runtime" + "strings" + "sync" + + "github.com/sirupsen/logrus" + "github.com/ydb-platform/ydb/library/go/core/log" +) + +/* Call frame calculations are copied from logrus package */ +var ( + + // qualified package name, cached at first use + logrusPackage string + + // Positions in the call stack when tracing to report the calling method + minimumCallerDepth int + + // Used for caller information initialisation + callerInitOnce sync.Once +) + +const ( + maximumCallerDepth int = 25 + knownLogrusFrames int = 4 +) + +func init() { + // start at the bottom of the stack before the package-name cache is primed + minimumCallerDepth = 1 +} + +// getPackageName reduces a fully qualified function name to the package name +// There really ought to be to be a better way... +func getPackageName(f string) string { + for { + lastPeriod := strings.LastIndex(f, ".") + lastSlash := strings.LastIndex(f, "/") + if lastPeriod > lastSlash { + f = f[:lastPeriod] + } else { + break + } + } + + return f +} + +func getCallerDepth() int { + // cache this package's fully-qualified name + callerInitOnce.Do(func() { + pcs := make([]uintptr, maximumCallerDepth) + _ = runtime.Callers(0, pcs) + + // dynamic get the package name and the minimum caller depth + logrusIsNext := false + for i := 0; i < maximumCallerDepth; i++ { + funcName := runtime.FuncForPC(pcs[i]).Name() + if logrusIsNext { + logrusPackage = getPackageName(funcName) + break + } + if strings.Contains(funcName, "LogrusAdapter") { + logrusIsNext = true + continue + } + } + + minimumCallerDepth = knownLogrusFrames + }) + + // Restrict the lookback frames to avoid runaway lookups + pcs := make([]uintptr, maximumCallerDepth) + depth := runtime.Callers(minimumCallerDepth, pcs) + frames := runtime.CallersFrames(pcs[:depth]) + callerDepth := minimumCallerDepth + + for f, again := frames.Next(); again; f, again = frames.Next() { + pkg := getPackageName(f.Function) + + // If the caller isn't part of this package, we're done + if pkg != logrusPackage { + return callerDepth - 2 + } + callerDepth++ + } + + // if we got here, we failed to find the caller's context + return 0 +} + +func convertLevel(level log.Level) logrus.Level { + switch level { + case log.TraceLevel: + return logrus.TraceLevel + case log.DebugLevel: + return logrus.DebugLevel + case log.InfoLevel: + return logrus.InfoLevel + case log.WarnLevel: + return logrus.WarnLevel + case log.ErrorLevel: + return logrus.ErrorLevel + case log.FatalLevel: + return logrus.FatalLevel + } + + return logrus.PanicLevel +} + +func SetLevel(level log.Level) { + logrus.SetLevel(convertLevel(level)) +} + +type LogrusAdapter struct { + logger log.Logger + adaptCallstack bool + convertPrefix bool +} + +func (a *LogrusAdapter) Format(entry *logrus.Entry) ([]byte, error) { + var name *string + fields := make([]log.Field, 0, len(entry.Data)) + for key, val := range entry.Data { + skip := false + if a.convertPrefix && key == "prefix" { + if w, ok := val.(string); ok { + name = &w + skip = true + } + } + if !skip { + fields = append(fields, log.Any(key, val)) + } + } + + var logger log.Logger + if a.adaptCallstack { + logger = log.AddCallerSkip(a.logger, getCallerDepth()) + } else { + logger = a.logger + } + + if a.convertPrefix && name != nil { + logger = logger.WithName(*name) + } + + switch entry.Level { + case logrus.TraceLevel: + logger.Trace(entry.Message, fields...) + case logrus.DebugLevel: + logger.Debug(entry.Message, fields...) + case logrus.InfoLevel: + logger.Info(entry.Message, fields...) + case logrus.WarnLevel: + logger.Warn(entry.Message, fields...) + case logrus.ErrorLevel: + logger.Error(entry.Message, fields...) + case logrus.FatalLevel: + logger.Fatal(entry.Message, fields...) + case logrus.PanicLevel: + logger.Fatal(entry.Message, fields...) + } + + return nil, nil +} + +type Option func(*LogrusAdapter) + +func DontAdaptCallstack() Option { + return func(adapter *LogrusAdapter) { + adapter.adaptCallstack = false + } +} + +func ConvertPrefix() Option { + return func(adapter *LogrusAdapter) { + adapter.convertPrefix = true + } +} + +// AdaptLogrus replaces logr formatter by wrapped logger +func AdaptLogrus(logr *logrus.Logger, logger log.Logger, level log.Level, opts ...Option) { + logr.SetLevel(convertLevel(level)) + + adapter := &LogrusAdapter{logger, true, false} + + for _, opt := range opts { + opt(adapter) + } + + logr.SetFormatter(adapter) + logr.SetOutput(io.Discard) +} + +// AdaptStandardLogger replaces logrus.StandardLogger() formatter by wrapped logger +func AdaptStandardLogger(logger log.Logger, level log.Level, opts ...Option) { + AdaptLogrus(logrus.StandardLogger(), logger, level, opts...) +} diff --git a/library/go/core/log/compat/logrus/ya.make b/library/go/core/log/compat/logrus/ya.make new file mode 100644 index 0000000000..19240f7378 --- /dev/null +++ b/library/go/core/log/compat/logrus/ya.make @@ -0,0 +1,5 @@ +GO_LIBRARY() + +SRCS(log.go) + +END() diff --git a/library/go/core/log/compat/pion/log.go b/library/go/core/log/compat/pion/log.go new file mode 100644 index 0000000000..cf93e549c8 --- /dev/null +++ b/library/go/core/log/compat/pion/log.go @@ -0,0 +1,76 @@ +package pion + +import ( + "github.com/pion/logging" + "github.com/ydb-platform/ydb/library/go/core/log" +) + +type LoggerFactory struct { + StandardLogger log.Logger +} + +func (l LoggerFactory) NewLogger(scope string) logging.LeveledLogger { + return LoggerAdapter{ + standardLogger: l.StandardLogger, + scope: scope, + } +} + +type LoggerAdapter struct { + standardLogger log.Logger + scope string +} + +func (a LoggerAdapter) Trace(msg string) { + log.AddCallerSkip(a.standardLogger, 1) + a.standardLogger.Trace(a.addScope(msg)) +} + +func (a LoggerAdapter) Tracef(format string, args ...interface{}) { + log.AddCallerSkip(a.standardLogger, 1) + a.standardLogger.Tracef(a.addScope(format), args...) +} + +func (a LoggerAdapter) Debug(msg string) { + log.AddCallerSkip(a.standardLogger, 1) + a.standardLogger.Debug(a.addScope(msg)) +} + +func (a LoggerAdapter) Debugf(format string, args ...interface{}) { + log.AddCallerSkip(a.standardLogger, 1) + a.standardLogger.Debugf(a.addScope(format), args...) +} + +func (a LoggerAdapter) Info(msg string) { + log.AddCallerSkip(a.standardLogger, 1) + a.standardLogger.Info(a.addScope(msg)) +} + +func (a LoggerAdapter) Infof(format string, args ...interface{}) { + log.AddCallerSkip(a.standardLogger, 1) + a.standardLogger.Infof(a.addScope(format), args...) +} + +func (a LoggerAdapter) Warn(msg string) { + log.AddCallerSkip(a.standardLogger, 1) + a.standardLogger.Warn(a.addScope(msg)) +} + +func (a LoggerAdapter) Warnf(format string, args ...interface{}) { + log.AddCallerSkip(a.standardLogger, 1) + a.standardLogger.Warnf(a.addScope(format), args...) +} + +func (a LoggerAdapter) Error(msg string) { + log.AddCallerSkip(a.standardLogger, 1) + a.standardLogger.Error(a.addScope(msg)) +} + +func (a LoggerAdapter) Errorf(format string, args ...interface{}) { + log.AddCallerSkip(a.standardLogger, 1) + a.standardLogger.Errorf(a.addScope(format), args...) +} + +func (a LoggerAdapter) addScope(s string) string { + return a.scope + ": " + s +} diff --git a/library/go/core/log/compat/pion/ya.make b/library/go/core/log/compat/pion/ya.make new file mode 100644 index 0000000000..19240f7378 --- /dev/null +++ b/library/go/core/log/compat/pion/ya.make @@ -0,0 +1,5 @@ +GO_LIBRARY() + +SRCS(log.go) + +END() diff --git a/library/go/core/log/compat/stdlog/stdlog.go b/library/go/core/log/compat/stdlog/stdlog.go new file mode 100644 index 0000000000..f812cd10b7 --- /dev/null +++ b/library/go/core/log/compat/stdlog/stdlog.go @@ -0,0 +1,54 @@ +package stdlog + +import ( + "bytes" + "fmt" + stdlog "log" + + "github.com/ydb-platform/ydb/library/go/core/log" +) + +func levelToFunc(logger log.Logger, lvl log.Level) (func(msg string, fields ...log.Field), error) { + switch lvl { + case log.DebugLevel: + return logger.Debug, nil + case log.TraceLevel: + return logger.Trace, nil + case log.InfoLevel: + return logger.Info, nil + case log.WarnLevel: + return logger.Warn, nil + case log.ErrorLevel: + return logger.Error, nil + case log.FatalLevel: + return logger.Fatal, nil + } + + return nil, fmt.Errorf("unknown log level: %v", lvl) +} + +type loggerWriter struct { + logFunc func(msg string, fields ...log.Field) +} + +func (w *loggerWriter) Write(p []byte) (int, error) { + p = bytes.TrimSpace(p) + w.logFunc(string(p)) + return len(p), nil +} + +// New creates stdlib log.Logger that writes to provided logger on Error level +func New(logger log.Logger) *stdlog.Logger { + l := log.AddCallerSkip(logger, 3) + return stdlog.New(&loggerWriter{logFunc: l.Error}, "", 0) +} + +// NewAt creates stdlib log.Logger that writes to provided logger on specified level +func NewAt(logger log.Logger, lvl log.Level) (*stdlog.Logger, error) { + l := log.AddCallerSkip(logger, 3) + logFunc, err := levelToFunc(l, lvl) + if err != nil { + return nil, err + } + return stdlog.New(&loggerWriter{logFunc: logFunc}, "", 0), nil +} diff --git a/library/go/core/log/compat/stdlog/ya.make b/library/go/core/log/compat/stdlog/ya.make new file mode 100644 index 0000000000..44456e973a --- /dev/null +++ b/library/go/core/log/compat/stdlog/ya.make @@ -0,0 +1,5 @@ +GO_LIBRARY() + +SRCS(stdlog.go) + +END() diff --git a/library/go/core/log/compat/ya.make b/library/go/core/log/compat/ya.make new file mode 100644 index 0000000000..7f371862bd --- /dev/null +++ b/library/go/core/log/compat/ya.make @@ -0,0 +1,6 @@ +RECURSE( + golog + logrus + pion + stdlog +) diff --git a/library/go/core/log/ctxlog/ctxlog.go b/library/go/core/log/ctxlog/ctxlog.go new file mode 100644 index 0000000000..e054e9c2ed --- /dev/null +++ b/library/go/core/log/ctxlog/ctxlog.go @@ -0,0 +1,124 @@ +package ctxlog + +import ( + "context" + "fmt" + + "github.com/ydb-platform/ydb/library/go/core/log" +) + +type ctxKey struct{} + +// ContextFields returns log.Fields bound with ctx. +// If no fields are bound, it returns nil. +func ContextFields(ctx context.Context) []log.Field { + fs, _ := ctx.Value(ctxKey{}).([]log.Field) + return fs +} + +// WithFields returns a new context that is bound with given fields and based +// on parent ctx. +func WithFields(ctx context.Context, fields ...log.Field) context.Context { + if len(fields) == 0 { + return ctx + } + + return context.WithValue(ctx, ctxKey{}, mergeFields(ContextFields(ctx), fields)) +} + +// Trace logs at Trace log level using fields both from arguments and ones that +// are bound to ctx. +func Trace(ctx context.Context, l log.Logger, msg string, fields ...log.Field) { + log.AddCallerSkip(l, 1).Trace(msg, mergeFields(ContextFields(ctx), fields)...) +} + +// Debug logs at Debug log level using fields both from arguments and ones that +// are bound to ctx. +func Debug(ctx context.Context, l log.Logger, msg string, fields ...log.Field) { + log.AddCallerSkip(l, 1).Debug(msg, mergeFields(ContextFields(ctx), fields)...) +} + +// Info logs at Info log level using fields both from arguments and ones that +// are bound to ctx. +func Info(ctx context.Context, l log.Logger, msg string, fields ...log.Field) { + log.AddCallerSkip(l, 1).Info(msg, mergeFields(ContextFields(ctx), fields)...) +} + +// Warn logs at Warn log level using fields both from arguments and ones that +// are bound to ctx. +func Warn(ctx context.Context, l log.Logger, msg string, fields ...log.Field) { + log.AddCallerSkip(l, 1).Warn(msg, mergeFields(ContextFields(ctx), fields)...) +} + +// Error logs at Error log level using fields both from arguments and ones that +// are bound to ctx. +func Error(ctx context.Context, l log.Logger, msg string, fields ...log.Field) { + log.AddCallerSkip(l, 1).Error(msg, mergeFields(ContextFields(ctx), fields)...) +} + +// Fatal logs at Fatal log level using fields both from arguments and ones that +// are bound to ctx. +func Fatal(ctx context.Context, l log.Logger, msg string, fields ...log.Field) { + log.AddCallerSkip(l, 1).Fatal(msg, mergeFields(ContextFields(ctx), fields)...) +} + +// Tracef logs at Trace log level using fields that are bound to ctx. +// The message is formatted using provided arguments. +func Tracef(ctx context.Context, l log.Logger, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + log.AddCallerSkip(l, 1).Trace(msg, ContextFields(ctx)...) +} + +// Debugf logs at Debug log level using fields that are bound to ctx. +// The message is formatted using provided arguments. +func Debugf(ctx context.Context, l log.Logger, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + log.AddCallerSkip(l, 1).Debug(msg, ContextFields(ctx)...) +} + +// Infof logs at Info log level using fields that are bound to ctx. +// The message is formatted using provided arguments. +func Infof(ctx context.Context, l log.Logger, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + log.AddCallerSkip(l, 1).Info(msg, ContextFields(ctx)...) +} + +// Warnf logs at Warn log level using fields that are bound to ctx. +// The message is formatted using provided arguments. +func Warnf(ctx context.Context, l log.Logger, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + log.AddCallerSkip(l, 1).Warn(msg, ContextFields(ctx)...) +} + +// Errorf logs at Error log level using fields that are bound to ctx. +// The message is formatted using provided arguments. +func Errorf(ctx context.Context, l log.Logger, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + log.AddCallerSkip(l, 1).Error(msg, ContextFields(ctx)...) +} + +// Fatalf logs at Fatal log level using fields that are bound to ctx. +// The message is formatted using provided arguments. +func Fatalf(ctx context.Context, l log.Logger, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + log.AddCallerSkip(l, 1).Fatal(msg, ContextFields(ctx)...) +} + +func mergeFields(a, b []log.Field) []log.Field { + if a == nil { + return b + } + if b == nil { + return a + } + + // NOTE: just append() here is unsafe. If a caller passed slice of fields + // followed by ... with capacity greater than length, then simultaneous + // logging will lead to a data race condition. + // + // See https://golang.org/ref/spec#Passing_arguments_to_..._parameters + c := make([]log.Field, len(a)+len(b)) + n := copy(c, a) + copy(c[n:], b) + return c +} diff --git a/library/go/core/log/ctxlog/ctxlog_test.go b/library/go/core/log/ctxlog/ctxlog_test.go new file mode 100644 index 0000000000..c3fbedfc09 --- /dev/null +++ b/library/go/core/log/ctxlog/ctxlog_test.go @@ -0,0 +1,66 @@ +package ctxlog + +import ( + "context" + "reflect" + "testing" + + "github.com/ydb-platform/ydb/library/go/core/log" +) + +func TestContextFields(t *testing.T) { + for _, test := range []struct { + ctx context.Context + exp []log.Field + }{ + { + ctx: context.Background(), + exp: nil, + }, + { + ctx: contextWithFields( + log.String("foo", "bar"), + log.String("bar", "baz"), + ), + exp: []log.Field{ + log.String("foo", "bar"), + log.String("bar", "baz"), + }, + }, + } { + t.Run("", func(t *testing.T) { + act := ContextFields(test.ctx) + if exp := test.exp; !reflect.DeepEqual(act, exp) { + t.Fatalf( + "ContextFields() = %v; want %v", + act, exp, + ) + } + }) + } +} + +// TestWithFields tests the case when race condition may occur on adding fields +// to a bound field slice capable enough to store additional ones. +func TestWithFields(t *testing.T) { + fs := make([]log.Field, 2, 4) + fs[0] = log.String("a", "a") + fs[1] = log.String("b", "b") + + // Bind to ctx1 field slice with cap(fs) = 2. + ctx1 := WithFields(context.Background(), fs...) + + // Bind additional two fields to ctx2 that are able to fit the parent's + // ctx1 bound fields. + _ = WithFields(ctx1, log.String("c", "c"), log.String("d", "d")) + + var act, exp [2]log.Field // Expect to zero-values of Field. + copy(act[:], fs[2:4]) // Check the tail of initial slice. + if act != exp { + t.Fatalf("fields tail is non-empty: %v", act) + } +} + +func contextWithFields(fs ...log.Field) context.Context { + return context.WithValue(context.Background(), ctxKey{}, fs) +} diff --git a/library/go/core/log/ctxlog/gotest/ya.make b/library/go/core/log/ctxlog/gotest/ya.make new file mode 100644 index 0000000000..74138e031c --- /dev/null +++ b/library/go/core/log/ctxlog/gotest/ya.make @@ -0,0 +1,3 @@ +GO_TEST_FOR(library/go/core/log/ctxlog) + +END() diff --git a/library/go/core/log/ctxlog/ya.make b/library/go/core/log/ctxlog/ya.make new file mode 100644 index 0000000000..61b48a372f --- /dev/null +++ b/library/go/core/log/ctxlog/ya.make @@ -0,0 +1,9 @@ +GO_LIBRARY() + +SRCS(ctxlog.go) + +GO_TEST_SRCS(ctxlog_test.go) + +END() + +RECURSE(gotest) diff --git a/library/go/core/log/fields.go b/library/go/core/log/fields.go new file mode 100644 index 0000000000..afd41c197e --- /dev/null +++ b/library/go/core/log/fields.go @@ -0,0 +1,446 @@ +package log + +import ( + "fmt" + "time" +) + +const ( + // DefaultErrorFieldName is the default field name used for errors + DefaultErrorFieldName = "error" +) + +// FieldType is a type of data Field can represent +type FieldType int + +const ( + // FieldTypeNil is for a pure nil + FieldTypeNil FieldType = iota + // FieldTypeString is for a string + FieldTypeString + // FieldTypeBinary is for a binary array + FieldTypeBinary + // FieldTypeBoolean is for boolean + FieldTypeBoolean + // FieldTypeSigned is for signed integers + FieldTypeSigned + // FieldTypeUnsigned is for unsigned integers + FieldTypeUnsigned + // FieldTypeFloat is for float + FieldTypeFloat + // FieldTypeTime is for time.Time + FieldTypeTime + // FieldTypeDuration is for time.Duration + FieldTypeDuration + // FieldTypeError is for an error + FieldTypeError + // FieldTypeArray is for an array of any type + FieldTypeArray + // FieldTypeAny is for any type + FieldTypeAny + // FieldTypeReflect is for unknown types + FieldTypeReflect + // FieldTypeByteString is for a bytes that can be represented as UTF-8 string + FieldTypeByteString +) + +// Field stores one structured logging field +type Field struct { + key string + ftype FieldType + string string + signed int64 + unsigned uint64 + float float64 + iface interface{} +} + +// Key returns field key +func (f Field) Key() string { + return f.key +} + +// Type returns field type +func (f Field) Type() FieldType { + return f.ftype +} + +// String returns field string +func (f Field) String() string { + return f.string +} + +// Binary constructs field of []byte +func (f Field) Binary() []byte { + if f.iface == nil { + return nil + } + return f.iface.([]byte) +} + +// Bool returns field bool +func (f Field) Bool() bool { + return f.Signed() != 0 +} + +// Signed returns field int64 +func (f Field) Signed() int64 { + return f.signed +} + +// Unsigned returns field uint64 +func (f Field) Unsigned() uint64 { + return f.unsigned +} + +// Float returns field float64 +func (f Field) Float() float64 { + return f.float +} + +// Time returns field time.Time +func (f Field) Time() time.Time { + return time.Unix(0, f.signed) +} + +// Duration returns field time.Duration +func (f Field) Duration() time.Duration { + return time.Nanosecond * time.Duration(f.signed) +} + +// Error constructs field of error type +func (f Field) Error() error { + if f.iface == nil { + return nil + } + return f.iface.(error) +} + +// Interface returns field interface +func (f Field) Interface() interface{} { + return f.iface +} + +// Any returns contained data as interface{} +// nolint: gocyclo +func (f Field) Any() interface{} { + switch f.Type() { + case FieldTypeNil: + return nil + case FieldTypeString: + return f.String() + case FieldTypeBinary: + return f.Interface() + case FieldTypeBoolean: + return f.Bool() + case FieldTypeSigned: + return f.Signed() + case FieldTypeUnsigned: + return f.Unsigned() + case FieldTypeFloat: + return f.Float() + case FieldTypeTime: + return f.Time() + case FieldTypeDuration: + return f.Duration() + case FieldTypeError: + return f.Error() + case FieldTypeArray: + return f.Interface() + case FieldTypeAny: + return f.Interface() + case FieldTypeReflect: + return f.Interface() + case FieldTypeByteString: + return f.Interface() + default: + // For when new field type is not added to this func + panic(fmt.Sprintf("unknown field type: %d", f.Type())) + } +} + +// Nil constructs field of nil type +func Nil(key string) Field { + return Field{key: key, ftype: FieldTypeNil} +} + +// String constructs field of string type +func String(key, value string) Field { + return Field{key: key, ftype: FieldTypeString, string: value} +} + +// Sprintf constructs field of string type with formatting +func Sprintf(key, format string, args ...interface{}) Field { + return Field{key: key, ftype: FieldTypeString, string: fmt.Sprintf(format, args...)} +} + +// Strings constructs Field from []string +func Strings(key string, value []string) Field { + return Array(key, value) +} + +// Binary constructs field of []byte type +func Binary(key string, value []byte) Field { + return Field{key: key, ftype: FieldTypeBinary, iface: value} +} + +// Bool constructs field of bool type +func Bool(key string, value bool) Field { + field := Field{key: key, ftype: FieldTypeBoolean} + if value { + field.signed = 1 + } else { + field.signed = 0 + } + + return field +} + +// Bools constructs Field from []bool +func Bools(key string, value []bool) Field { + return Array(key, value) +} + +// Int constructs Field from int +func Int(key string, value int) Field { + return Int64(key, int64(value)) +} + +// Ints constructs Field from []int +func Ints(key string, value []int) Field { + return Array(key, value) +} + +// Int8 constructs Field from int8 +func Int8(key string, value int8) Field { + return Int64(key, int64(value)) +} + +// Int8s constructs Field from []int8 +func Int8s(key string, value []int8) Field { + return Array(key, value) +} + +// Int16 constructs Field from int16 +func Int16(key string, value int16) Field { + return Int64(key, int64(value)) +} + +// Int16s constructs Field from []int16 +func Int16s(key string, value []int16) Field { + return Array(key, value) +} + +// Int32 constructs Field from int32 +func Int32(key string, value int32) Field { + return Int64(key, int64(value)) +} + +// Int32s constructs Field from []int32 +func Int32s(key string, value []int32) Field { + return Array(key, value) +} + +// Int64 constructs Field from int64 +func Int64(key string, value int64) Field { + return Field{key: key, ftype: FieldTypeSigned, signed: value} +} + +// Int64s constructs Field from []int64 +func Int64s(key string, value []int64) Field { + return Array(key, value) +} + +// UInt constructs Field from uint +func UInt(key string, value uint) Field { + return UInt64(key, uint64(value)) +} + +// UInts constructs Field from []uint +func UInts(key string, value []uint) Field { + return Array(key, value) +} + +// UInt8 constructs Field from uint8 +func UInt8(key string, value uint8) Field { + return UInt64(key, uint64(value)) +} + +// UInt8s constructs Field from []uint8 +func UInt8s(key string, value []uint8) Field { + return Array(key, value) +} + +// UInt16 constructs Field from uint16 +func UInt16(key string, value uint16) Field { + return UInt64(key, uint64(value)) +} + +// UInt16s constructs Field from []uint16 +func UInt16s(key string, value []uint16) Field { + return Array(key, value) +} + +// UInt32 constructs Field from uint32 +func UInt32(key string, value uint32) Field { + return UInt64(key, uint64(value)) +} + +// UInt32s constructs Field from []uint32 +func UInt32s(key string, value []uint32) Field { + return Array(key, value) +} + +// UInt64 constructs Field from uint64 +func UInt64(key string, value uint64) Field { + return Field{key: key, ftype: FieldTypeUnsigned, unsigned: value} +} + +// UInt64s constructs Field from []uint64 +func UInt64s(key string, value []uint64) Field { + return Array(key, value) +} + +// Float32 constructs Field from float32 +func Float32(key string, value float32) Field { + return Float64(key, float64(value)) +} + +// Float32s constructs Field from []float32 +func Float32s(key string, value []float32) Field { + return Array(key, value) +} + +// Float64 constructs Field from float64 +func Float64(key string, value float64) Field { + return Field{key: key, ftype: FieldTypeFloat, float: value} +} + +// Float64s constructs Field from []float64 +func Float64s(key string, value []float64) Field { + return Array(key, value) +} + +// Time constructs field of time.Time type +func Time(key string, value time.Time) Field { + return Field{key: key, ftype: FieldTypeTime, signed: value.UnixNano()} +} + +// Times constructs Field from []time.Time +func Times(key string, value []time.Time) Field { + return Array(key, value) +} + +// Duration constructs field of time.Duration type +func Duration(key string, value time.Duration) Field { + return Field{key: key, ftype: FieldTypeDuration, signed: value.Nanoseconds()} +} + +// Durations constructs Field from []time.Duration +func Durations(key string, value []time.Duration) Field { + return Array(key, value) +} + +// NamedError constructs field of error type +func NamedError(key string, value error) Field { + return Field{key: key, ftype: FieldTypeError, iface: value} +} + +// Error constructs field of error type with default field name +func Error(value error) Field { + return NamedError(DefaultErrorFieldName, value) +} + +// Errors constructs Field from []error +func Errors(key string, value []error) Field { + return Array(key, value) +} + +// Array constructs field of array type +func Array(key string, value interface{}) Field { + return Field{key: key, ftype: FieldTypeArray, iface: value} +} + +// Reflect constructs field of unknown type +func Reflect(key string, value interface{}) Field { + return Field{key: key, ftype: FieldTypeReflect, iface: value} +} + +// ByteString constructs field of bytes that could represent UTF-8 string +func ByteString(key string, value []byte) Field { + return Field{key: key, ftype: FieldTypeByteString, iface: value} +} + +// Any tries to deduce interface{} underlying type and constructs Field from it. +// Use of this function is ok only for the sole purpose of not repeating its entire code +// or parts of it in user's code (when you need to log interface{} types with unknown content). +// Otherwise please use specialized functions. +// nolint: gocyclo +func Any(key string, value interface{}) Field { + switch val := value.(type) { + case bool: + return Bool(key, val) + case float64: + return Float64(key, val) + case float32: + return Float32(key, val) + case int: + return Int(key, val) + case []int: + return Ints(key, val) + case int64: + return Int64(key, val) + case []int64: + return Int64s(key, val) + case int32: + return Int32(key, val) + case []int32: + return Int32s(key, val) + case int16: + return Int16(key, val) + case []int16: + return Int16s(key, val) + case int8: + return Int8(key, val) + case []int8: + return Int8s(key, val) + case string: + return String(key, val) + case []string: + return Strings(key, val) + case uint: + return UInt(key, val) + case []uint: + return UInts(key, val) + case uint64: + return UInt64(key, val) + case []uint64: + return UInt64s(key, val) + case uint32: + return UInt32(key, val) + case []uint32: + return UInt32s(key, val) + case uint16: + return UInt16(key, val) + case []uint16: + return UInt16s(key, val) + case uint8: + return UInt8(key, val) + case []byte: + return Binary(key, val) + case time.Time: + return Time(key, val) + case []time.Time: + return Times(key, val) + case time.Duration: + return Duration(key, val) + case []time.Duration: + return Durations(key, val) + case error: + return NamedError(key, val) + case []error: + return Errors(key, val) + default: + return Field{key: key, ftype: FieldTypeAny, iface: value} + } +} diff --git a/library/go/core/log/fields_test.go b/library/go/core/log/fields_test.go new file mode 100644 index 0000000000..ff6890b46a --- /dev/null +++ b/library/go/core/log/fields_test.go @@ -0,0 +1,32 @@ +package log + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +// Simple test, that all type of fields are correctly zapified. +// Maybe we also need some test that checks resulting zap.Field type also. +func TestFieldAny(t *testing.T) { + for typ := FieldType(0); typ <= FieldTypeReflect; typ++ { + field := Field{ftype: typ} + assert.NotPanics(t, func() { + field.Any() + }) + } +} + +func TestAny(t *testing.T) { + var v struct{ A int } + field := Any("test", &v) + assert.Equal(t, field.ftype, FieldTypeAny) +} + +func TestReflect(t *testing.T) { + field := Reflect("test", 1) + assert.Equal(t, field.ftype, FieldTypeReflect) +} + +// TODO: test fields +// TODO: test field converters diff --git a/library/go/core/log/gotest/ya.make b/library/go/core/log/gotest/ya.make new file mode 100644 index 0000000000..84907af36d --- /dev/null +++ b/library/go/core/log/gotest/ya.make @@ -0,0 +1,3 @@ +GO_TEST_FOR(library/go/core/log) + +END() diff --git a/library/go/core/log/levels.go b/library/go/core/log/levels.go new file mode 100644 index 0000000000..54810410b9 --- /dev/null +++ b/library/go/core/log/levels.go @@ -0,0 +1,108 @@ +package log + +import ( + "fmt" + "strings" +) + +// Level of logging +type Level int + +// MarshalText marshals level to text +func (l Level) MarshalText() ([]byte, error) { + if l >= maxLevel || l < 0 { + return nil, fmt.Errorf("failed to marshal log level: level value (%d) is not in the allowed range (0-%d)", l, maxLevel-1) + } + return []byte(l.String()), nil +} + +// UnmarshalText unmarshals level from text +func (l *Level) UnmarshalText(text []byte) error { + level, err := ParseLevel(string(text)) + if err != nil { + return err + } + + *l = level + return nil +} + +// Standard log levels +const ( + TraceLevel Level = iota + DebugLevel + InfoLevel + WarnLevel + ErrorLevel + FatalLevel + maxLevel +) + +func Levels() (l []Level) { + for i := 0; i < int(maxLevel); i++ { + l = append(l, Level(i)) + } + return +} + +// String values for standard log levels +const ( + TraceString = "trace" + DebugString = "debug" + InfoString = "info" + WarnString = "warn" + ErrorString = "error" + FatalString = "fatal" +) + +// String implements Stringer interface for Level +func (l Level) String() string { + switch l { + case TraceLevel: + return TraceString + case DebugLevel: + return DebugString + case InfoLevel: + return InfoString + case WarnLevel: + return WarnString + case ErrorLevel: + return ErrorString + case FatalLevel: + return FatalString + default: + // For when new log level is not added to this func (most likely never). + panic(fmt.Sprintf("unknown log level: %d", l)) + } +} + +// Set implements flag.Value interface +func (l *Level) Set(v string) error { + lvl, err := ParseLevel(v) + if err != nil { + return err + } + + *l = lvl + return nil +} + +// ParseLevel parses log level from string. Returns ErrUnknownLevel for unknown log level. +func ParseLevel(l string) (Level, error) { + switch strings.ToLower(l) { + case TraceString: + return TraceLevel, nil + case DebugString: + return DebugLevel, nil + case InfoString: + return InfoLevel, nil + case WarnString: + return WarnLevel, nil + case ErrorString: + return ErrorLevel, nil + case FatalString: + return FatalLevel, nil + default: + return FatalLevel, fmt.Errorf("unknown log level: %s", l) + } +} diff --git a/library/go/core/log/levels_test.go b/library/go/core/log/levels_test.go new file mode 100644 index 0000000000..3181b91268 --- /dev/null +++ b/library/go/core/log/levels_test.go @@ -0,0 +1,51 @@ +package log_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + "github.com/ydb-platform/ydb/library/go/core/log" +) + +var levelsToTest = []struct { + name string + level log.Level +}{ + {name: log.TraceString, level: log.TraceLevel}, + {name: log.DebugString, level: log.DebugLevel}, + {name: log.InfoString, level: log.InfoLevel}, + {name: log.WarnString, level: log.WarnLevel}, + {name: log.ErrorString, level: log.ErrorLevel}, + {name: log.FatalString, level: log.FatalLevel}, +} + +func TestLevels(t *testing.T) { + for _, levelInput := range levelsToTest { + t.Run("Convert "+levelInput.name, func(t *testing.T) { + levelFromLevelString, err := log.ParseLevel(levelInput.name) + require.NoError(t, err) + require.Equal(t, levelInput.level, levelFromLevelString) + + levelStringFromLevel := levelInput.level.String() + require.Equal(t, levelInput.name, levelStringFromLevel) + + levelFromLevelStringFromLevel, err := log.ParseLevel(levelStringFromLevel) + require.NoError(t, err) + require.Equal(t, levelInput.level, levelFromLevelStringFromLevel) + }) + } +} + +func TestLevel_MarshalText(t *testing.T) { + level := log.DebugLevel + _, err := level.MarshalText() + require.NoError(t, err) + + level = log.Level(100500) + _, err = level.MarshalText() + require.Error(t, err) + + level = log.Level(-1) + _, err = level.MarshalText() + require.Error(t, err) +} diff --git a/library/go/core/log/log.go b/library/go/core/log/log.go new file mode 100644 index 0000000000..3e1f76e870 --- /dev/null +++ b/library/go/core/log/log.go @@ -0,0 +1,134 @@ +package log + +import "errors" + +// Logger is the universal logger that can do everything. +type Logger interface { + loggerStructured + loggerFmt + toStructured + toFmt + withName +} + +type withName interface { + WithName(name string) Logger +} + +type toLogger interface { + // Logger returns general logger + Logger() Logger +} + +// Structured provides interface for logging using fields. +type Structured interface { + loggerStructured + toFmt + toLogger +} + +type loggerStructured interface { + // Trace logs at Trace log level using fields + Trace(msg string, fields ...Field) + // Debug logs at Debug log level using fields + Debug(msg string, fields ...Field) + // Info logs at Info log level using fields + Info(msg string, fields ...Field) + // Warn logs at Warn log level using fields + Warn(msg string, fields ...Field) + // Error logs at Error log level using fields + Error(msg string, fields ...Field) + // Fatal logs at Fatal log level using fields + Fatal(msg string, fields ...Field) +} + +type toFmt interface { + // Fmt returns fmt logger + Fmt() Fmt +} + +// Fmt provides interface for logging using fmt formatter. +type Fmt interface { + loggerFmt + toStructured + toLogger +} + +type loggerFmt interface { + // Tracef logs at Trace log level using fmt formatter + Tracef(format string, args ...interface{}) + // Debugf logs at Debug log level using fmt formatter + Debugf(format string, args ...interface{}) + // Infof logs at Info log level using fmt formatter + Infof(format string, args ...interface{}) + // Warnf logs at Warn log level using fmt formatter + Warnf(format string, args ...interface{}) + // Errorf logs at Error log level using fmt formatter + Errorf(format string, args ...interface{}) + // Fatalf logs at Fatal log level using fmt formatter + Fatalf(format string, args ...interface{}) +} + +type toStructured interface { + // Structured returns structured logger + Structured() Structured +} + +// LoggerWith is an interface for 'With' function +// LoggerWith provides interface for logger modifications. +type LoggerWith interface { + // With implements 'With' + With(fields ...Field) Logger +} + +// With for loggers that implement LoggerWith interface, returns logger that +// always adds provided key/value to every log entry. Otherwise returns same logger. +func With(l Logger, fields ...Field) Logger { + e, ok := l.(LoggerWith) + if !ok { + return l + } + + return e.With(fields...) +} + +// LoggerAddCallerSkip is an interface for 'AddCallerSkip' function +type LoggerAddCallerSkip interface { + // AddCallerSkip implements 'AddCallerSkip' + AddCallerSkip(skip int) Logger +} + +// AddCallerSkip for loggers that implement LoggerAddCallerSkip interface, returns logger that +// adds caller skip to each log entry. Otherwise returns same logger. +func AddCallerSkip(l Logger, skip int) Logger { + e, ok := l.(LoggerAddCallerSkip) + if !ok { + return l + } + + return e.AddCallerSkip(skip) +} + +// WriteAt is a helper method that checks logger and writes message at given level +func WriteAt(l Structured, lvl Level, msg string, fields ...Field) error { + if l == nil { + return errors.New("nil logger given") + } + + switch lvl { + case DebugLevel: + l.Debug(msg, fields...) + case TraceLevel: + l.Trace(msg, fields...) + case InfoLevel: + l.Info(msg, fields...) + case WarnLevel: + l.Warn(msg, fields...) + case ErrorLevel: + l.Error(msg, fields...) + case FatalLevel: + l.Fatal(msg, fields...) + } + + return nil +} diff --git a/library/go/core/log/nop/nop.go b/library/go/core/log/nop/nop.go new file mode 100644 index 0000000000..950742878a --- /dev/null +++ b/library/go/core/log/nop/nop.go @@ -0,0 +1,73 @@ +package nop + +import ( + "os" + + "github.com/ydb-platform/ydb/library/go/core/log" +) + +// Logger that does nothing +type Logger struct{} + +var _ log.Logger = &Logger{} +var _ log.Structured = &Logger{} +var _ log.Fmt = &Logger{} + +// Logger returns general logger +func (l *Logger) Logger() log.Logger { + return l +} + +// Fmt returns fmt logger +func (l *Logger) Fmt() log.Fmt { + return l +} + +// Structured returns structured logger +func (l *Logger) Structured() log.Structured { + return l +} + +// Trace implements Trace method of log.Logger interface +func (l *Logger) Trace(msg string, fields ...log.Field) {} + +// Tracef implements Tracef method of log.Logger interface +func (l *Logger) Tracef(format string, args ...interface{}) {} + +// Debug implements Debug method of log.Logger interface +func (l *Logger) Debug(msg string, fields ...log.Field) {} + +// Debugf implements Debugf method of log.Logger interface +func (l *Logger) Debugf(format string, args ...interface{}) {} + +// Info implements Info method of log.Logger interface +func (l *Logger) Info(msg string, fields ...log.Field) {} + +// Infof implements Infof method of log.Logger interface +func (l *Logger) Infof(format string, args ...interface{}) {} + +// Warn implements Warn method of log.Logger interface +func (l *Logger) Warn(msg string, fields ...log.Field) {} + +// Warnf implements Warnf method of log.Logger interface +func (l *Logger) Warnf(format string, args ...interface{}) {} + +// Error implements Error method of log.Logger interface +func (l *Logger) Error(msg string, fields ...log.Field) {} + +// Errorf implements Errorf method of log.Logger interface +func (l *Logger) Errorf(format string, args ...interface{}) {} + +// Fatal implements Fatal method of log.Logger interface +func (l *Logger) Fatal(msg string, fields ...log.Field) { + os.Exit(1) +} + +// Fatalf implements Fatalf method of log.Logger interface +func (l *Logger) Fatalf(format string, args ...interface{}) { + os.Exit(1) +} + +func (l *Logger) WithName(name string) log.Logger { + return l +} diff --git a/library/go/core/log/nop/ya.make b/library/go/core/log/nop/ya.make new file mode 100644 index 0000000000..5d9e935c04 --- /dev/null +++ b/library/go/core/log/nop/ya.make @@ -0,0 +1,5 @@ +GO_LIBRARY() + +SRCS(nop.go) + +END() diff --git a/library/go/core/log/test/log_bench_test.go b/library/go/core/log/test/log_bench_test.go new file mode 100644 index 0000000000..c75e3dc2d6 --- /dev/null +++ b/library/go/core/log/test/log_bench_test.go @@ -0,0 +1,39 @@ +package test + +import ( + "fmt" + "testing" + + "github.com/ydb-platform/ydb/library/go/core/log" +) + +func BenchmarkOutput(b *testing.B) { + for _, loggerInput := range loggersToTest { + for _, count := range []int{0, 1, 2, 5} { + logger, err := loggerInput.factory(log.DebugLevel) + if err != nil { + b.Fatalf("failed to create logger: %s", b.Name()) + } + b.Run(fmt.Sprintf("%s fields %d", loggerInput.name, count), func(b *testing.B) { + benchmarkFields(b, logger, count) + }) + } + } +} + +func benchmarkFields(b *testing.B, logger log.Logger, count int) { + flds := genFields(count) + + for n := 0; n < b.N; n++ { + logger.Debug(msg, flds...) + } +} + +func genFields(count int) []log.Field { + flds := make([]log.Field, 0, count) + for ; count > 0; count-- { + flds = append(flds, log.String(key, value)) + } + + return flds +} diff --git a/library/go/core/log/test/log_test.go b/library/go/core/log/test/log_test.go new file mode 100644 index 0000000000..b839127a32 --- /dev/null +++ b/library/go/core/log/test/log_test.go @@ -0,0 +1,120 @@ +package test + +import ( + "testing" + + "github.com/stretchr/testify/require" + "github.com/ydb-platform/ydb/library/go/core/log" + "github.com/ydb-platform/ydb/library/go/core/log/nop" + "github.com/ydb-platform/ydb/library/go/core/log/zap" + uzap "go.uber.org/zap" +) + +var ( + msg = "msg" + msgfmt = "%s %s" + msgfmtargs = []interface{}{"hello", "world"} + key = "key" + value = "value" + withKey = "withKey" + withValue = "withValue" +) + +var loggersToTest = []struct { + name string + factory func(level log.Level) (log.Logger, error) +}{ + { + name: "Zap", + factory: func(level log.Level) (log.Logger, error) { + cfg := zap.JSONConfig(level) + // Disable output + cfg.OutputPaths = []string{} + cfg.ErrorOutputPaths = []string{} + return zap.New(cfg) + }, + }, + { + name: "ZapNop", + factory: func(level log.Level) (log.Logger, error) { + return &zap.Logger{ + L: uzap.NewNop(), + }, nil + }, + }, + { + name: "Nop", + factory: func(level log.Level) (log.Logger, error) { + return &nop.Logger{}, nil + }, + }, +} + +func TestLoggers(t *testing.T) { + for _, loggerInput := range loggersToTest { + for _, level := range log.Levels() { + t.Run("Construct "+loggerInput.name+level.String(), func(t *testing.T) { + logger, err := loggerInput.factory(level) + require.NoError(t, err) + require.NotNil(t, logger) + + lfmt := logger.Fmt() + require.NotNil(t, lfmt) + + l := lfmt.Structured() + require.NotNil(t, l) + require.Equal(t, logger, l) + }) + + t.Run("With "+loggerInput.name+level.String(), func(t *testing.T) { + logger, err := loggerInput.factory(level) + require.NoError(t, err) + require.NotNil(t, logger) + + withField := log.String(withKey, withValue) + loggerWith := log.With(logger, withField) + require.NotNil(t, loggerWith) + }) + + t.Run("AddCallerSkip "+loggerInput.name+level.String(), func(t *testing.T) { + logger, err := loggerInput.factory(level) + require.NoError(t, err) + require.NotNil(t, logger) + + loggerCallerSkip := log.AddCallerSkip(logger, 1) + require.NotNil(t, loggerCallerSkip) + }) + + // TODO: validate log output + t.Run("Logger "+loggerInput.name+level.String(), func(t *testing.T) { + logger, err := loggerInput.factory(level) + require.NoError(t, err) + require.NotNil(t, logger) + + logger.Trace(msg, log.String(key, value)) + logger.Debug(msg, log.String(key, value)) + logger.Info(msg, log.String(key, value)) + logger.Warn(msg, log.String(key, value)) + logger.Error(msg, log.String(key, value)) + // TODO: test fatal + }) + + // TODO: validate log output + t.Run("LoggerFMT "+loggerInput.name+level.String(), func(t *testing.T) { + logger, err := loggerInput.factory(level) + require.NoError(t, err) + require.NotNil(t, logger) + + lfmt := logger.Fmt() + require.NotNil(t, lfmt) + + lfmt.Tracef(msgfmt, msgfmtargs...) + lfmt.Debugf(msgfmt, msgfmtargs...) + lfmt.Infof(msgfmt, msgfmtargs...) + lfmt.Warnf(msgfmt, msgfmtargs...) + lfmt.Errorf(msgfmt, msgfmtargs...) + // TODO: test fatal + }) + } + } +} diff --git a/library/go/core/log/test/ya.make b/library/go/core/log/test/ya.make new file mode 100644 index 0000000000..be231ce558 --- /dev/null +++ b/library/go/core/log/test/ya.make @@ -0,0 +1,8 @@ +GO_TEST() + +GO_TEST_SRCS( + log_bench_test.go + log_test.go +) + +END() diff --git a/library/go/core/log/ya.make b/library/go/core/log/ya.make new file mode 100644 index 0000000000..588e736521 --- /dev/null +++ b/library/go/core/log/ya.make @@ -0,0 +1,22 @@ +GO_LIBRARY() + +SRCS( + fields.go + levels.go + log.go +) + +GO_TEST_SRCS(fields_test.go) + +GO_XTEST_SRCS(levels_test.go) + +END() + +RECURSE( + compat + ctxlog + gotest + nop + test + zap +) diff --git a/library/go/core/log/zap/asynczap/background.go b/library/go/core/log/zap/asynczap/background.go new file mode 100644 index 0000000000..5af635dfe3 --- /dev/null +++ b/library/go/core/log/zap/asynczap/background.go @@ -0,0 +1,155 @@ +package asynczap + +import ( + "bytes" + "errors" + "sync" + "sync/atomic" + "time" + + "go.uber.org/zap/buffer" + "go.uber.org/zap/zapcore" +) + +// background is a single object shared by all clones of core. +type background struct { + options Options + + q queue + out zapcore.WriteSyncer + + // use manual buffering instead of bufio background to preserve write atomicity. + // + // bufio.Writer might split log lines at arbitrary position. + writeBuffer bytes.Buffer + + wg sync.WaitGroup + mu sync.Mutex + cond *sync.Cond + stopped bool + iter int64 + lastErr error + forceFlush chan struct{} + + droppedRecords int64 + writeErrors int64 + reportOverflow int64 +} + +func newBackground(options Options, out zapcore.WriteSyncer) *background { + b := &background{ + options: options, + out: out, + forceFlush: make(chan struct{}), + } + b.cond = sync.NewCond(&b.mu) + return b +} + +func (b *background) flush() { + _, err := b.out.Write(b.writeBuffer.Bytes()) + if err != nil { + b.onError(err) + } + b.writeBuffer.Reset() +} + +func (b *background) onError(err error) { + atomic.AddInt64(&b.writeErrors, 1) + + b.lastErr = err +} + +func (b *background) stop() { + b.mu.Lock() + b.stopped = true + b.mu.Unlock() + + b.wg.Wait() +} + +func (b *background) finishIter() (stop bool) { + b.mu.Lock() + stop = b.stopped + b.mu.Unlock() + + atomic.StoreInt64(&b.reportOverflow, 0) + b.cond.Broadcast() + return +} + +func (b *background) run() { + defer b.wg.Done() + + flush := time.NewTicker(b.options.FlushInterval) + defer flush.Stop() + + var bufs []*buffer.Buffer + for { + bufs = bufs[:0] + b.mu.Lock() + + bufs = b.q.dequeueAll(bufs) + for _, buf := range bufs { + b.writeBuffer.Write(buf.Bytes()) + buf.Free() + + if b.writeBuffer.Len() > b.options.WriteBufferSize { + b.flush() + } + } + + if b.writeBuffer.Len() != 0 { + b.flush() + } + + b.iter++ + b.mu.Unlock() + + if b.finishIter() { + return + } + + select { + case <-flush.C: + case <-b.forceFlush: + flush.Reset(b.options.FlushInterval) + } + + } +} + +func (b *background) checkQueueSize() (size int, ok, shouldReport bool) { + size = int(b.q.loadSize()) + if size >= b.options.MaxMemoryUsage { + atomic.AddInt64(&b.droppedRecords, 1) + + old := atomic.SwapInt64(&b.reportOverflow, 1) + return size, false, old == 0 + } + + return 0, true, false +} + +func (b *background) sync() error { + b.mu.Lock() + defer b.mu.Unlock() + + select { + case b.forceFlush <- struct{}{}: + default: + } + + now := b.iter + for { + if b.iter >= now+1 { + return b.lastErr + } + + if b.stopped { + return errors.New("core has stopped") + } + + b.cond.Wait() + } +} diff --git a/library/go/core/log/zap/asynczap/core.go b/library/go/core/log/zap/asynczap/core.go new file mode 100644 index 0000000000..11acd24fba --- /dev/null +++ b/library/go/core/log/zap/asynczap/core.go @@ -0,0 +1,113 @@ +// Package asynczap implements asynchronous core for zap. +// +// By default, zap writes every log line synchronously and without buffering. This behaviour +// is completely inadequate for high-rate logging. +// +// This implementation of zap.Core moves file write to background goroutine, while carefully +// monitoring memory consumption. +// +// When background goroutine can't keep up with logging rate, log records are dropped. +package asynczap + +import ( + "fmt" + "sync/atomic" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +type ( + Core struct { + zapcore.LevelEnabler + enc zapcore.Encoder + w *background + options Options + } + + Stats struct { + // Number of records dropped during memory overflow. + DroppedRecords int + + // Number of errors returned from underlying writer. + WriteErrors int + } +) + +// NewCore creates a Core that writes logs to a WriteSyncer. +func NewCore(enc zapcore.Encoder, ws zapcore.WriteSyncer, enab zapcore.LevelEnabler, options Options) *Core { + options.setDefault() + + w := newBackground(options, ws) + w.wg.Add(1) + go w.run() + + return &Core{ + LevelEnabler: enab, + enc: enc, + w: w, + } +} + +func (c *Core) Stop() { + _ = c.Sync() + c.w.stop() +} + +func (c *Core) With(fields []zap.Field) zapcore.Core { + clone := c.clone() + for i := range fields { + fields[i].AddTo(clone.enc) + } + return clone +} + +func (c *Core) Check(ent zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry { + if c.Enabled(ent.Level) { + return ce.AddCore(ent, c) + } + return ce +} + +func (c *Core) Write(ent zapcore.Entry, fields []zap.Field) error { + if size, ok, shouldReport := c.w.checkQueueSize(); !ok { + if shouldReport { + // Report overflow error only once per background iteration, to avoid spamming error output. + return fmt.Errorf("logger queue overflow: %d >= %d", size, c.options.MaxMemoryUsage) + } else { + return nil + } + } + + buf, err := c.enc.EncodeEntry(ent, fields) + if err != nil { + return err + } + + c.w.q.enqueue(buf) + if ent.Level > zap.ErrorLevel { + // Since we may be crashing the program, sync the output. + _ = c.Sync() + } + return nil +} + +func (c *Core) Sync() error { + return c.w.sync() +} + +func (c *Core) Stat() Stats { + return Stats{ + DroppedRecords: int(atomic.LoadInt64(&c.w.droppedRecords)), + WriteErrors: int(atomic.LoadInt64(&c.w.writeErrors)), + } +} + +func (c *Core) clone() *Core { + return &Core{ + LevelEnabler: c.LevelEnabler, + enc: c.enc.Clone(), + w: c.w, + options: c.options, + } +} diff --git a/library/go/core/log/zap/asynczap/core_test.go b/library/go/core/log/zap/asynczap/core_test.go new file mode 100644 index 0000000000..35ae245678 --- /dev/null +++ b/library/go/core/log/zap/asynczap/core_test.go @@ -0,0 +1,123 @@ +package asynczap + +import ( + "bytes" + "os" + "runtime" + "strings" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +func TestCompareToDefault(t *testing.T) { + var buf0, buf1 bytes.Buffer + out0 := zapcore.AddSync(&buf0) + out1 := zapcore.AddSync(&buf1) + + format := zap.NewProductionEncoderConfig() + format.EncodeTime = func(t time.Time, e zapcore.PrimitiveArrayEncoder) { + e.AppendString("10:00") + } + + asyncCore := NewCore( + zapcore.NewJSONEncoder(format), + out0, + zap.DebugLevel, + Options{}) + + log0 := zap.New(asyncCore) + log0.Error("foo") + + require.NoError(t, asyncCore.Sync()) + asyncCore.Stop() + + syncCore := zapcore.NewCore( + zapcore.NewJSONEncoder(format), + out1, + zap.DebugLevel) + + log1 := zap.New(syncCore) + log1.Error("foo") + + require.Equal(t, buf0.String(), buf1.String()) +} + +type countWriteSyncer int32 + +func (c *countWriteSyncer) Write(b []byte) (int, error) { + atomic.AddInt32((*int32)(c), 1) + return len(b), nil +} + +func (c *countWriteSyncer) Sync() error { + return nil +} + +func TestSync(t *testing.T) { + var c countWriteSyncer + out0 := &c + + format := zap.NewProductionEncoderConfig() + format.EncodeTime = func(t time.Time, e zapcore.PrimitiveArrayEncoder) { + e.AppendString("10:00") + } + + asyncCore := NewCore( + zapcore.NewJSONEncoder(format), + out0, + zap.DebugLevel, + Options{FlushInterval: 10 * time.Nanosecond}) + + log0 := zap.New(asyncCore) + + for i := 0; i < 100000; i++ { + log0.Error("123") + _ = log0.Sync() + require.EqualValues(t, i+1, atomic.LoadInt32((*int32)(&c))) + } +} + +type lockWriter struct { + c chan struct{} +} + +func (w *lockWriter) Write(b []byte) (int, error) { + <-w.c + return 0, nil +} + +func TestDropsRecordsOnOverflow(t *testing.T) { + go func() { + time.Sleep(time.Second * 15) + + buf := make([]byte, 1024*1024) + n := runtime.Stack(buf, true) + _, _ = os.Stderr.Write(buf[:n]) + }() + + w := &lockWriter{c: make(chan struct{})} + + asyncCore := NewCore( + zapcore.NewJSONEncoder(zap.NewProductionEncoderConfig()), + zapcore.AddSync(w), + zap.DebugLevel, + Options{ + MaxMemoryUsage: 100, + }) + defer asyncCore.Stop() + + log := zap.New(asyncCore) + + for i := 0; i < 1000; i++ { + log.Error("foobar", zap.String("key", strings.Repeat("x", 1000))) + } + + assert.Greater(t, asyncCore.Stat().DroppedRecords, 990) + close(w.c) +} diff --git a/library/go/core/log/zap/asynczap/gotest/ya.make b/library/go/core/log/zap/asynczap/gotest/ya.make new file mode 100644 index 0000000000..30e947f6fc --- /dev/null +++ b/library/go/core/log/zap/asynczap/gotest/ya.make @@ -0,0 +1,3 @@ +GO_TEST_FOR(library/go/core/log/zap/asynczap) + +END() diff --git a/library/go/core/log/zap/asynczap/options.go b/library/go/core/log/zap/asynczap/options.go new file mode 100644 index 0000000000..9b7f241fe9 --- /dev/null +++ b/library/go/core/log/zap/asynczap/options.go @@ -0,0 +1,34 @@ +package asynczap + +import "time" + +const ( + defaultMaxMemoryUsage = 1 << 26 // 64MB + defaultWriteBufferSize = 1 << 20 // 1MB + defaultFlushInterval = time.Millisecond * 100 +) + +type Options struct { + // MaxMemoryUsage is maximum amount of memory that will be used by in-flight log records. + MaxMemoryUsage int + + // WriteBufferSize specifies size of the buffer used for writes to underlying file. + WriteBufferSize int + + // FlushInterval specifies how often background goroutine would wake up. + FlushInterval time.Duration +} + +func (o *Options) setDefault() { + if o.MaxMemoryUsage == 0 { + o.MaxMemoryUsage = defaultMaxMemoryUsage + } + + if o.WriteBufferSize == 0 { + o.WriteBufferSize = defaultWriteBufferSize + } + + if o.FlushInterval == 0 { + o.FlushInterval = defaultFlushInterval + } +} diff --git a/library/go/core/log/zap/asynczap/queue.go b/library/go/core/log/zap/asynczap/queue.go new file mode 100644 index 0000000000..a37e87ef47 --- /dev/null +++ b/library/go/core/log/zap/asynczap/queue.go @@ -0,0 +1,83 @@ +package asynczap + +import ( + "sync" + "sync/atomic" + "unsafe" + + "go.uber.org/zap/buffer" +) + +var entryPool sync.Pool + +func newEntry() *entry { + pooled := entryPool.Get() + if pooled != nil { + return pooled.(*entry) + } else { + return new(entry) + } +} + +func putEntry(e *entry) { + entryPool.Put(e) +} + +type entry struct { + next *entry + buf *buffer.Buffer +} + +type queue struct { + size int64 + head unsafe.Pointer +} + +func (q *queue) loadHead() *entry { + return (*entry)(atomic.LoadPointer(&q.head)) +} + +func (q *queue) casHead(old, new *entry) (swapped bool) { + return atomic.CompareAndSwapPointer(&q.head, unsafe.Pointer(old), unsafe.Pointer(new)) +} + +func (q *queue) swapHead() *entry { + return (*entry)(atomic.SwapPointer(&q.head, nil)) +} + +func (q *queue) loadSize() int64 { + return atomic.LoadInt64(&q.size) +} + +func (q *queue) enqueue(buf *buffer.Buffer) { + e := newEntry() + e.buf = buf + + atomic.AddInt64(&q.size, int64(buf.Cap())) + for { + e.next = q.loadHead() + if q.casHead(e.next, e) { + break + } + } +} + +func (q *queue) dequeueAll(to []*buffer.Buffer) []*buffer.Buffer { + head := q.swapHead() + + for head != nil { + atomic.AddInt64(&q.size, -int64(head.buf.Cap())) + to = append(to, head.buf) + + next := head.next + putEntry(head) + head = next + } + + for i := 0; i < len(to)/2; i++ { + j := len(to) - i - 1 + to[i], to[j] = to[j], to[i] + } + + return to +} diff --git a/library/go/core/log/zap/asynczap/queue_test.go b/library/go/core/log/zap/asynczap/queue_test.go new file mode 100644 index 0000000000..25e9e62a1e --- /dev/null +++ b/library/go/core/log/zap/asynczap/queue_test.go @@ -0,0 +1,59 @@ +package asynczap + +import ( + "runtime" + "testing" + "time" + + "github.com/stretchr/testify/require" + "go.uber.org/zap/buffer" +) + +func BenchmarkQueue(b *testing.B) { + var q queue + + go func() { + var buf []*buffer.Buffer + + for range time.Tick(10 * time.Millisecond) { + buf = q.dequeueAll(buf) + buf = buf[:0] + } + }() + + p := &buffer.Buffer{} + + b.ReportAllocs() + b.SetParallelism(runtime.NumCPU() - 1) + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + q.enqueue(p) + } + }) +} + +func TestQueue(t *testing.T) { + var b0, b1, b2, b3 *buffer.Buffer + b0 = &buffer.Buffer{} + b0.AppendString("b0") + b1 = &buffer.Buffer{} + b1.AppendString("b1") + b2 = &buffer.Buffer{} + b2.AppendString("b2") + b3 = &buffer.Buffer{} + b3.AppendString("b3") + + var q queue + q.enqueue(b0) + q.enqueue(b1) + q.enqueue(b2) + + require.Equal(t, []*buffer.Buffer{b0, b1, b2}, q.dequeueAll(nil)) + + q.enqueue(b0) + q.enqueue(b1) + q.enqueue(b2) + q.enqueue(b3) + + require.Equal(t, []*buffer.Buffer{b0, b1, b2, b3}, q.dequeueAll(nil)) +} diff --git a/library/go/core/log/zap/asynczap/ya.make b/library/go/core/log/zap/asynczap/ya.make new file mode 100644 index 0000000000..30e06b4d1b --- /dev/null +++ b/library/go/core/log/zap/asynczap/ya.make @@ -0,0 +1,17 @@ +GO_LIBRARY() + +SRCS( + background.go + core.go + options.go + queue.go +) + +GO_TEST_SRCS( + core_test.go + queue_test.go +) + +END() + +RECURSE(gotest) diff --git a/library/go/core/log/zap/benchmark_test.go b/library/go/core/log/zap/benchmark_test.go new file mode 100644 index 0000000000..bb3102e3a8 --- /dev/null +++ b/library/go/core/log/zap/benchmark_test.go @@ -0,0 +1,131 @@ +package zap + +import ( + "errors" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/ydb-platform/ydb/library/go/core/log" + "go.uber.org/zap" +) + +func BenchmarkZapLogger(b *testing.B) { + // use config for both loggers + cfg := NewDeployConfig() + cfg.OutputPaths = nil + cfg.ErrorOutputPaths = nil + + b.Run("stock", func(b *testing.B) { + for _, level := range log.Levels() { + b.Run(level.String(), func(b *testing.B) { + cfg.Level = zap.NewAtomicLevelAt(ZapifyLevel(level)) + + logger, err := cfg.Build() + require.NoError(b, err) + + funcs := []func(string, ...zap.Field){ + logger.Debug, + logger.Info, + logger.Warn, + logger.Error, + logger.Fatal, + } + + message := "test" + fields := []zap.Field{ + zap.String("test", "test"), + zap.Bool("test", true), + zap.Int("test", 42), + } + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + funcs[i%(len(funcs)-1)](message, fields...) + } + }) + } + }) + + b.Run("wrapped", func(b *testing.B) { + for _, level := range log.Levels() { + b.Run(level.String(), func(b *testing.B) { + cfg.Level = zap.NewAtomicLevelAt(ZapifyLevel(level)) + logger, err := New(cfg) + require.NoError(b, err) + + funcs := []func(string, ...log.Field){ + logger.Debug, + logger.Info, + logger.Warn, + logger.Error, + logger.Fatal, + } + + message := "test" + fields := []log.Field{ + log.String("test", "test"), + log.Bool("test", true), + log.Int("test", 42), + } + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + funcs[i%(len(funcs)-1)](message, fields...) + } + }) + } + }) +} + +func BenchmarkZapifyField(b *testing.B) { + fields := []log.Field{ + log.Nil("test"), + log.String("test", "test"), + log.Binary("test", []byte("test")), + log.Bool("test", true), + log.Int("test", 42), + log.UInt("test", 42), + log.Float64("test", 42), + log.Time("test", time.Now()), + log.Duration("test", time.Second), + log.NamedError("test", errors.New("test")), + log.Strings("test", []string{"test"}), + log.Any("test", "test"), + log.Reflect("test", "test"), + log.ByteString("test", []byte("test")), + } + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + zapifyField(fields[i%(len(fields)-1)]) + } +} + +func BenchmarkZapifyFields(b *testing.B) { + fields := []log.Field{ + log.Nil("test"), + log.String("test", "test"), + log.Binary("test", []byte("test")), + log.Bool("test", true), + log.Int("test", 42), + log.UInt("test", 42), + log.Float64("test", 42), + log.Time("test", time.Now()), + log.Duration("test", time.Second), + log.NamedError("test", errors.New("test")), + log.Strings("test", []string{"test"}), + log.Any("test", "test"), + log.Reflect("test", "test"), + log.ByteString("test", []byte("test")), + } + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + zapifyFields(fields...) + } +} diff --git a/library/go/core/log/zap/deploy.go b/library/go/core/log/zap/deploy.go new file mode 100644 index 0000000000..f86cfbab31 --- /dev/null +++ b/library/go/core/log/zap/deploy.go @@ -0,0 +1,132 @@ +package zap + +import ( + "github.com/ydb-platform/ydb/library/go/core/log" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +// NewDeployEncoderConfig returns an opinionated EncoderConfig for +// deploy environment. +func NewDeployEncoderConfig() zapcore.EncoderConfig { + return zapcore.EncoderConfig{ + MessageKey: "msg", + LevelKey: "levelStr", + StacktraceKey: "stackTrace", + TimeKey: "@timestamp", + CallerKey: "", + NameKey: "loggerName", + EncodeLevel: zapcore.CapitalLevelEncoder, + EncodeTime: zapcore.ISO8601TimeEncoder, + EncodeDuration: zapcore.StringDurationEncoder, + EncodeCaller: zapcore.ShortCallerEncoder, + } +} + +type cfgOption func(cfg *zap.Config) + +// WithSampling sets sampling settings initial and thereafter +func WithSampling(initial int, thereafter int) cfgOption { + return cfgOption(func(cfg *zap.Config) { + cfg.Sampling = &zap.SamplingConfig{ + Initial: initial, + Thereafter: thereafter, + } + }) +} + +// SetOutputPaths sets OutputPaths (stdout by default) +func SetOutputPaths(paths []string) cfgOption { + return cfgOption(func(cfg *zap.Config) { + cfg.OutputPaths = paths + }) +} + +// WithDevelopment sets Development option of zap.Config +func WithDevelopment(enabled bool) cfgOption { + return cfgOption(func(cfg *zap.Config) { + cfg.Development = enabled + }) +} + +// WithLevel sets level of logging +func WithLevel(level log.Level) cfgOption { + return cfgOption(func(cfg *zap.Config) { + cfg.Level = zap.NewAtomicLevelAt(ZapifyLevel(level)) + }) +} + +// NewDeployConfig returns default configuration (with no sampling). +// Not recommended for production use. +func NewDeployConfig(opts ...cfgOption) zap.Config { + cfg := zap.Config{ + Level: zap.NewAtomicLevelAt(zap.DebugLevel), + Encoding: "json", + OutputPaths: []string{"stdout"}, + ErrorOutputPaths: []string{"stderr"}, + EncoderConfig: NewDeployEncoderConfig(), + } + + for _, opt := range opts { + opt(&cfg) + } + + return cfg +} + +// NewCustomDeployLogger constructs new logger by config cfg +func NewCustomDeployLogger(cfg zap.Config, opts ...zap.Option) (*Logger, error) { + zl, err := cfg.Build(opts...) + if err != nil { + return nil, err + } + + return &Logger{ + L: addDeployContext(zl).(*zap.Logger), + }, nil +} + +// NewDeployLogger constructs fully-fledged Deploy compatible logger +// based on predefined config. See https://deploy.yandex-team.ru/docs/concepts/pod/sidecars/logs/logs#format +// for more information +func NewDeployLogger(level log.Level, opts ...zap.Option) (*Logger, error) { + return NewCustomDeployLogger( + NewDeployConfig( + WithLevel(level), + ), + opts..., + ) +} + +// NewProductionDeployConfig returns configuration, suitable for production use. +// +// It uses a JSON encoder, writes to standard error, and enables sampling. +// Stacktraces are automatically included on logs of ErrorLevel and above. +func NewProductionDeployConfig() zap.Config { + return NewDeployConfig( + WithDevelopment(false), + WithSampling(100, 100), + ) +} + +// Same as NewDeployLogger, but with sampling +func NewProductionDeployLogger(level log.Level, opts ...zap.Option) (*Logger, error) { + return NewCustomDeployLogger( + NewDeployConfig( + WithLevel(level), + WithDevelopment(false), + WithSampling(100, 100), + ), + opts..., + ) +} + +func addDeployContext(i interface{}) interface{} { + switch c := i.(type) { + case *zap.Logger: + return c.With(zap.Namespace("@fields")) + case zapcore.Core: + return c.With([]zapcore.Field{zap.Namespace("@fields")}) + } + return i +} diff --git a/library/go/core/log/zap/encoders/cli.go b/library/go/core/log/zap/encoders/cli.go new file mode 100644 index 0000000000..f19d8527df --- /dev/null +++ b/library/go/core/log/zap/encoders/cli.go @@ -0,0 +1,78 @@ +package encoders + +import ( + "sync" + + "go.uber.org/zap/buffer" + "go.uber.org/zap/zapcore" +) + +const ( + // EncoderNameCli is the encoder name to use for zap config + EncoderNameCli = "cli" +) + +var cliPool = sync.Pool{New: func() interface{} { + return &cliEncoder{} +}} + +func getCliEncoder() *cliEncoder { + return cliPool.Get().(*cliEncoder) +} + +type cliEncoder struct { + *kvEncoder +} + +// NewCliEncoder constructs cli encoder +func NewCliEncoder(cfg zapcore.EncoderConfig) (zapcore.Encoder, error) { + return newCliEncoder(cfg), nil +} + +func newCliEncoder(cfg zapcore.EncoderConfig) *cliEncoder { + return &cliEncoder{ + kvEncoder: newKVEncoder(cfg), + } +} + +func (enc *cliEncoder) Clone() zapcore.Encoder { + clone := enc.clone() + _, _ = clone.buf.Write(enc.buf.Bytes()) + return clone +} + +func (enc *cliEncoder) clone() *cliEncoder { + clone := getCliEncoder() + clone.kvEncoder = getKVEncoder() + clone.cfg = enc.cfg + clone.openNamespaces = enc.openNamespaces + clone.pool = enc.pool + clone.buf = enc.pool.Get() + return clone +} + +func (enc *cliEncoder) EncodeEntry(ent zapcore.Entry, fields []zapcore.Field) (*buffer.Buffer, error) { + final := enc.clone() + + // Direct write because we do not want to quote message in cli mode + final.buf.AppendString(ent.Message) + + // Add any structured context. + for _, f := range fields { + f.AddTo(final) + } + + // If there's no stacktrace key, honor that; this allows users to force + // single-line output. + if ent.Stack != "" && final.cfg.StacktraceKey != "" { + final.buf.AppendByte('\n') + final.AppendString(ent.Stack) + } + + if final.cfg.LineEnding != "" { + final.AppendString(final.cfg.LineEnding) + } else { + final.AppendString(zapcore.DefaultLineEnding) + } + return final.buf, nil +} diff --git a/library/go/core/log/zap/encoders/gotest/ya.make b/library/go/core/log/zap/encoders/gotest/ya.make new file mode 100644 index 0000000000..8f23accf05 --- /dev/null +++ b/library/go/core/log/zap/encoders/gotest/ya.make @@ -0,0 +1,3 @@ +GO_TEST_FOR(library/go/core/log/zap/encoders) + +END() diff --git a/library/go/core/log/zap/encoders/kv.go b/library/go/core/log/zap/encoders/kv.go new file mode 100644 index 0000000000..8fd6c607c6 --- /dev/null +++ b/library/go/core/log/zap/encoders/kv.go @@ -0,0 +1,386 @@ +package encoders + +import ( + "encoding/base64" + "encoding/json" + "math" + "strings" + "sync" + "time" + + "go.uber.org/zap/buffer" + "go.uber.org/zap/zapcore" +) + +const ( + // EncoderNameKV is the encoder name to use for zap config + EncoderNameKV = "kv" +) + +const ( + // We use ' for quote symbol instead of " so that it doesn't interfere with %q of fmt package + stringQuoteSymbol = '\'' + kvArraySeparator = ',' +) + +var kvPool = sync.Pool{New: func() interface{} { + return &kvEncoder{} +}} + +func getKVEncoder() *kvEncoder { + return kvPool.Get().(*kvEncoder) +} + +type kvEncoder struct { + cfg zapcore.EncoderConfig + pool buffer.Pool + buf *buffer.Buffer + openNamespaces int + + // for encoding generic values by reflection + reflectBuf *buffer.Buffer + reflectEnc *json.Encoder +} + +// NewKVEncoder constructs kv encoder +func NewKVEncoder(cfg zapcore.EncoderConfig) (zapcore.Encoder, error) { + return newKVEncoder(cfg), nil +} + +func newKVEncoder(cfg zapcore.EncoderConfig) *kvEncoder { + pool := buffer.NewPool() + return &kvEncoder{ + cfg: cfg, + pool: pool, + buf: pool.Get(), + } +} + +func (enc *kvEncoder) addElementSeparator() { + if enc.buf.Len() == 0 { + return + } + + enc.buf.AppendByte(' ') +} + +func (enc *kvEncoder) addKey(key string) { + enc.addElementSeparator() + enc.buf.AppendString(key) + enc.buf.AppendByte('=') +} + +func (enc *kvEncoder) appendFloat(val float64, bitSize int) { + enc.appendArrayItemSeparator() + switch { + case math.IsNaN(val): + enc.buf.AppendString(`"NaN"`) + case math.IsInf(val, 1): + enc.buf.AppendString(`"+Inf"`) + case math.IsInf(val, -1): + enc.buf.AppendString(`"-Inf"`) + default: + enc.buf.AppendFloat(val, bitSize) + } +} + +func (enc *kvEncoder) AddArray(key string, arr zapcore.ArrayMarshaler) error { + enc.addKey(key) + return enc.AppendArray(arr) +} + +func (enc *kvEncoder) AddObject(key string, obj zapcore.ObjectMarshaler) error { + enc.addKey(key) + return enc.AppendObject(obj) +} + +func (enc *kvEncoder) AddBinary(key string, val []byte) { + enc.AddString(key, base64.StdEncoding.EncodeToString(val)) +} + +func (enc *kvEncoder) AddByteString(key string, val []byte) { + enc.addKey(key) + enc.AppendByteString(val) +} + +func (enc *kvEncoder) AddBool(key string, val bool) { + enc.addKey(key) + enc.AppendBool(val) +} + +func (enc *kvEncoder) AddComplex128(key string, val complex128) { + enc.addKey(key) + enc.AppendComplex128(val) +} + +func (enc *kvEncoder) AddDuration(key string, val time.Duration) { + enc.addKey(key) + enc.AppendDuration(val) +} + +func (enc *kvEncoder) AddFloat64(key string, val float64) { + enc.addKey(key) + enc.AppendFloat64(val) +} + +func (enc *kvEncoder) AddInt64(key string, val int64) { + enc.addKey(key) + enc.AppendInt64(val) +} + +func (enc *kvEncoder) resetReflectBuf() { + if enc.reflectBuf == nil { + enc.reflectBuf = enc.pool.Get() + enc.reflectEnc = json.NewEncoder(enc.reflectBuf) + } else { + enc.reflectBuf.Reset() + } +} + +func (enc *kvEncoder) AddReflected(key string, obj interface{}) error { + enc.resetReflectBuf() + err := enc.reflectEnc.Encode(obj) + if err != nil { + return err + } + enc.reflectBuf.TrimNewline() + enc.addKey(key) + _, err = enc.buf.Write(enc.reflectBuf.Bytes()) + return err +} + +func (enc *kvEncoder) OpenNamespace(key string) { + enc.addKey(key) + enc.buf.AppendByte('{') + enc.openNamespaces++ +} + +func (enc *kvEncoder) AddString(key, val string) { + enc.addKey(key) + enc.AppendString(val) +} + +func (enc *kvEncoder) AddTime(key string, val time.Time) { + enc.addKey(key) + enc.AppendTime(val) +} + +func (enc *kvEncoder) AddUint64(key string, val uint64) { + enc.addKey(key) + enc.AppendUint64(val) +} + +func (enc *kvEncoder) appendArrayItemSeparator() { + last := enc.buf.Len() - 1 + if last < 0 { + return + } + + switch enc.buf.Bytes()[last] { + case '[', '{', '=': + return + default: + enc.buf.AppendByte(kvArraySeparator) + } +} + +func (enc *kvEncoder) AppendArray(arr zapcore.ArrayMarshaler) error { + enc.appendArrayItemSeparator() + enc.buf.AppendByte('[') + err := arr.MarshalLogArray(enc) + enc.buf.AppendByte(']') + return err +} + +func (enc *kvEncoder) AppendObject(obj zapcore.ObjectMarshaler) error { + enc.appendArrayItemSeparator() + enc.buf.AppendByte('{') + err := obj.MarshalLogObject(enc) + enc.buf.AppendByte('}') + return err +} + +func (enc *kvEncoder) AppendBool(val bool) { + enc.appendArrayItemSeparator() + enc.buf.AppendBool(val) +} + +func (enc *kvEncoder) AppendByteString(val []byte) { + enc.appendArrayItemSeparator() + _, _ = enc.buf.Write(val) +} + +func (enc *kvEncoder) AppendComplex128(val complex128) { + enc.appendArrayItemSeparator() + r, i := real(val), imag(val) + + enc.buf.AppendByte('"') + // Because we're always in a quoted string, we can use strconv without + // special-casing NaN and +/-Inf. + enc.buf.AppendFloat(r, 64) + enc.buf.AppendByte('+') + enc.buf.AppendFloat(i, 64) + enc.buf.AppendByte('i') + enc.buf.AppendByte('"') +} + +func (enc *kvEncoder) AppendDuration(val time.Duration) { + cur := enc.buf.Len() + enc.cfg.EncodeDuration(val, enc) + if cur == enc.buf.Len() { + // User-supplied EncodeDuration is a no-op. Fall back to nanoseconds to keep + // JSON valid. + enc.AppendInt64(int64(val)) + } +} + +func (enc *kvEncoder) AppendInt64(val int64) { + enc.appendArrayItemSeparator() + enc.buf.AppendInt(val) +} + +func (enc *kvEncoder) AppendReflected(val interface{}) error { + enc.appendArrayItemSeparator() + enc.resetReflectBuf() + err := enc.reflectEnc.Encode(val) + if err != nil { + return err + } + enc.reflectBuf.TrimNewline() + enc.addElementSeparator() + _, err = enc.buf.Write(enc.reflectBuf.Bytes()) + return err +} + +func (enc *kvEncoder) AppendString(val string) { + enc.appendArrayItemSeparator() + var quotes bool + if strings.ContainsAny(val, " =[]{}") { + quotes = true + } + + if quotes { + enc.buf.AppendByte(stringQuoteSymbol) + } + enc.buf.AppendString(val) + if quotes { + enc.buf.AppendByte(stringQuoteSymbol) + } +} + +func (enc *kvEncoder) AppendTime(val time.Time) { + cur := enc.buf.Len() + enc.cfg.EncodeTime(val, enc) + if cur == enc.buf.Len() { + // User-supplied EncodeTime is a no-op. Fall back to nanos since epoch to keep + // output JSON valid. + enc.AppendInt64(val.UnixNano()) + } +} + +func (enc *kvEncoder) AppendUint64(val uint64) { + enc.appendArrayItemSeparator() + enc.buf.AppendUint(val) +} + +func (enc *kvEncoder) AddComplex64(k string, v complex64) { enc.AddComplex128(k, complex128(v)) } +func (enc *kvEncoder) AddFloat32(k string, v float32) { enc.AddFloat64(k, float64(v)) } +func (enc *kvEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) } +func (enc *kvEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) } +func (enc *kvEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) } +func (enc *kvEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) } +func (enc *kvEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) } +func (enc *kvEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) } +func (enc *kvEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) } +func (enc *kvEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) } +func (enc *kvEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) } +func (enc *kvEncoder) AppendComplex64(v complex64) { enc.AppendComplex128(complex128(v)) } +func (enc *kvEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) } +func (enc *kvEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) } +func (enc *kvEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) } +func (enc *kvEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) } +func (enc *kvEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) } +func (enc *kvEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) } +func (enc *kvEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) } +func (enc *kvEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) } +func (enc *kvEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) } +func (enc *kvEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) } +func (enc *kvEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) } + +func (enc *kvEncoder) Clone() zapcore.Encoder { + clone := enc.clone() + _, _ = clone.buf.Write(enc.buf.Bytes()) + return clone +} + +func (enc *kvEncoder) clone() *kvEncoder { + clone := getKVEncoder() + clone.cfg = enc.cfg + clone.openNamespaces = enc.openNamespaces + clone.pool = enc.pool + clone.buf = enc.pool.Get() + return clone +} + +// nolint: gocyclo +func (enc *kvEncoder) EncodeEntry(ent zapcore.Entry, fields []zapcore.Field) (*buffer.Buffer, error) { + final := enc.clone() + if final.cfg.TimeKey != "" && final.cfg.EncodeTime != nil { + final.addElementSeparator() + final.buf.AppendString(final.cfg.TimeKey + "=") + final.cfg.EncodeTime(ent.Time, final) + } + if final.cfg.LevelKey != "" && final.cfg.EncodeLevel != nil { + final.addElementSeparator() + final.buf.AppendString(final.cfg.LevelKey + "=") + final.cfg.EncodeLevel(ent.Level, final) + } + if ent.LoggerName != "" && final.cfg.NameKey != "" { + nameEncoder := final.cfg.EncodeName + + if nameEncoder == nil { + // Fall back to FullNameEncoder for backward compatibility. + nameEncoder = zapcore.FullNameEncoder + } + + final.addElementSeparator() + final.buf.AppendString(final.cfg.NameKey + "=") + nameEncoder(ent.LoggerName, final) + } + if ent.Caller.Defined && final.cfg.CallerKey != "" && final.cfg.EncodeCaller != nil { + final.addElementSeparator() + final.buf.AppendString(final.cfg.CallerKey + "=") + final.cfg.EncodeCaller(ent.Caller, final) + } + + if enc.buf.Len() > 0 { + final.addElementSeparator() + _, _ = final.buf.Write(enc.buf.Bytes()) + } + + // Add the message itself. + if final.cfg.MessageKey != "" { + final.addElementSeparator() + final.buf.AppendString(final.cfg.MessageKey + "=") + final.AppendString(ent.Message) + } + + // Add any structured context. + for _, f := range fields { + f.AddTo(final) + } + + // If there's no stacktrace key, honor that; this allows users to force + // single-line output. + if ent.Stack != "" && final.cfg.StacktraceKey != "" { + final.buf.AppendByte('\n') + final.buf.AppendString(ent.Stack) + } + + if final.cfg.LineEnding != "" { + final.buf.AppendString(final.cfg.LineEnding) + } else { + final.buf.AppendString(zapcore.DefaultLineEnding) + } + return final.buf, nil +} diff --git a/library/go/core/log/zap/encoders/kv_test.go b/library/go/core/log/zap/encoders/kv_test.go new file mode 100644 index 0000000000..85778e888f --- /dev/null +++ b/library/go/core/log/zap/encoders/kv_test.go @@ -0,0 +1,121 @@ +package encoders + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +func TestKVEncodeEntry(t *testing.T) { + type bar struct { + Key string `json:"key"` + + Val float64 `json:"val"` + } + + type foo struct { + A string `json:"aee"` + B int `json:"bee"` + C float64 `json:"cee"` + D []bar `json:"dee"` + } + + tests := []struct { + desc string + expected string + ent zapcore.Entry + fields []zapcore.Field + }{ + { + desc: "info entry with some fields", + expected: `T=2018-06-19T16:33:42.000Z L=info N=bob M='lob law' so=passes answer=42 common_pie=3.14 ` + + `such={"aee":"lol","bee":123,"cee":0.9999,"dee":[{"key":"pi","val":3.141592653589793},` + + `{"key":"tau","val":6.283185307179586}]} +`, + ent: zapcore.Entry{ + Level: zapcore.InfoLevel, + Time: time.Date(2018, 6, 19, 16, 33, 42, 99, time.UTC), + LoggerName: "bob", + Message: "lob law", + }, + fields: []zapcore.Field{ + zap.String("so", "passes"), + zap.Int("answer", 42), + zap.Float64("common_pie", 3.14), + zap.Reflect("such", foo{ + A: "lol", + B: 123, + C: 0.9999, + D: []bar{ + {"pi", 3.141592653589793}, + {"tau", 6.283185307179586}, + }, + }), + }, + }, + { + desc: "info entry with array fields", + expected: `T=2020-06-26T11:13:42.000Z L=info N=alice M='str array' env=test ` + + `intarray=[-5,-7,0,-12] ` + + `uintarray=[1,2,3,4,5] ` + + `strarray=[funny,bunny] ` + + `book=['Alice's Adventures in Wonderland','Lewis Carroll',26-11-1865] ` + + `floatarray=[3.14,-2.17,0.0000000000000000000000000000000000662607]` + "\n", + ent: zapcore.Entry{ + Level: zapcore.InfoLevel, + Time: time.Date(2020, 6, 26, 11, 13, 42, 0, time.UTC), + LoggerName: "alice", + Message: "str array", + }, + fields: []zapcore.Field{ + zap.String("env", "test"), + zap.Ints("intarray", []int{-5, -7, 0, -12}), + zap.Uints("uintarray", []uint{1, 2, 3, 4, 5}), + zap.Strings("strarray", []string{"funny", "bunny"}), + zap.Strings("book", []string{"Alice's Adventures in Wonderland", "Lewis Carroll", "26-11-1865"}), + zap.Float32s("floatarray", []float32{3.14, -2.17, 0.662607015e-34}), + }, + }, + { + desc: "corner cases of arrays", + expected: "T=2020-06-26T12:13:42.000Z L=info N=zorg M='str array' cornerequal=['hello=',world] cornerbracket=['is[',jail,']'] cornerbraces=['is{',exit,'}']\n", + ent: zapcore.Entry{ + Level: zapcore.InfoLevel, + Time: time.Date(2020, 6, 26, 12, 13, 42, 0, time.UTC), + LoggerName: "zorg", + Message: "str array", + }, + fields: []zapcore.Field{ + zap.Strings("cornerequal", []string{"hello=", "world"}), + zap.Strings("cornerbracket", []string{"is[", "jail", "]"}), + zap.Strings("cornerbraces", []string{"is{", "exit", "}"}), + }, + }, + } + + enc, _ := NewKVEncoder(zapcore.EncoderConfig{ + MessageKey: "M", + LevelKey: "L", + TimeKey: "T", + NameKey: "N", + CallerKey: "C", + StacktraceKey: "S", + EncodeLevel: zapcore.LowercaseLevelEncoder, + EncodeTime: zapcore.ISO8601TimeEncoder, + EncodeDuration: zapcore.SecondsDurationEncoder, + EncodeCaller: zapcore.ShortCallerEncoder, + }) + + for _, tt := range tests { + t.Run(tt.desc, func(t *testing.T) { + buf, err := enc.EncodeEntry(tt.ent, tt.fields) + if assert.NoError(t, err, "Unexpected KV encoding error.") { + assert.Equal(t, tt.expected, buf.String(), "Incorrect encoded KV entry.") + } + buf.Free() + }) + } +} diff --git a/library/go/core/log/zap/encoders/tskv.go b/library/go/core/log/zap/encoders/tskv.go new file mode 100644 index 0000000000..75fafa1e20 --- /dev/null +++ b/library/go/core/log/zap/encoders/tskv.go @@ -0,0 +1,442 @@ +package encoders + +import ( + "encoding/hex" + "encoding/json" + "fmt" + "math" + "strings" + "sync" + "time" + + "github.com/ydb-platform/ydb/library/go/core/xerrors" + "go.uber.org/zap/buffer" + "go.uber.org/zap/zapcore" +) + +const ( + // EncoderNameKV is the encoder name to use for zap config + EncoderNameTSKV = "tskv" +) + +const ( + tskvLineEnding = '\n' + tskvElementSeparator = '\t' + tskvKVSeparator = '=' + tskvMark = "tskv" + tskvArrayStart = '[' + tskvArrayEnd = ']' + tskvArraySeparator = ',' +) + +var tskvKeyEscapeRules = []string{ + `\`, `\\`, + "\t", "\\t", + "\n", "\\n", + "\r", `\r`, + "\x00", `\0`, + "=", `\=`, +} + +var tskvValueEscapeRules = []string{ + `\`, `\\`, + "\t", "\\t", + "\n", `\n`, + "\r", `\r`, + "\x00", `\0`, +} + +type tskvEscaper struct { + keyReplacer *strings.Replacer + valueReplacer *strings.Replacer +} + +func newTSKVEscaper() tskvEscaper { + return tskvEscaper{ + keyReplacer: strings.NewReplacer(tskvKeyEscapeRules...), + valueReplacer: strings.NewReplacer(tskvValueEscapeRules...), + } +} + +func (esc *tskvEscaper) escapeKey(key string) string { + return esc.keyReplacer.Replace(key) +} + +func (esc *tskvEscaper) escapeValue(val string) string { + return esc.valueReplacer.Replace(val) +} + +func hexEncode(val []byte) []byte { + dst := make([]byte, hex.EncodedLen(len(val))) + hex.Encode(dst, val) + return dst +} + +var tskvPool = sync.Pool{New: func() interface{} { + return &tskvEncoder{} +}} + +func getTSKVEncoder() *tskvEncoder { + return tskvPool.Get().(*tskvEncoder) +} + +type tskvEncoder struct { + cfg zapcore.EncoderConfig + pool buffer.Pool + buf *buffer.Buffer + + // for encoding generic values by reflection + reflectBuf *buffer.Buffer + reflectEnc *json.Encoder + + tskvEscaper tskvEscaper +} + +// NewKVEncoder constructs tskv encoder +func NewTSKVEncoder(cfg zapcore.EncoderConfig) (zapcore.Encoder, error) { + return newTSKVEncoder(cfg), nil +} + +func newTSKVEncoder(cfg zapcore.EncoderConfig) *tskvEncoder { + pool := buffer.NewPool() + return &tskvEncoder{ + cfg: cfg, + pool: pool, + buf: pool.Get(), + tskvEscaper: newTSKVEscaper(), + } +} + +func (enc *tskvEncoder) appendElementSeparator() { + if enc.buf.Len() == 0 { + return + } + + enc.buf.AppendByte(tskvElementSeparator) +} + +func (enc *tskvEncoder) appendArrayItemSeparator() { + last := enc.buf.Len() - 1 + if last < 0 { + return + } + + switch enc.buf.Bytes()[last] { + case tskvArrayStart, tskvKVSeparator: + return + default: + enc.buf.AppendByte(tskvArraySeparator) + } +} + +func (enc *tskvEncoder) safeAppendKey(key string) { + enc.appendElementSeparator() + enc.buf.AppendString(enc.tskvEscaper.escapeKey(key)) + enc.buf.AppendByte(tskvKVSeparator) +} + +func (enc *tskvEncoder) safeAppendString(val string) { + enc.buf.AppendString(enc.tskvEscaper.escapeValue(val)) +} + +func (enc *tskvEncoder) appendFloat(val float64, bitSize int) { + enc.appendArrayItemSeparator() + switch { + case math.IsNaN(val): + enc.buf.AppendString(`"NaN"`) + case math.IsInf(val, 1): + enc.buf.AppendString(`"+Inf"`) + case math.IsInf(val, -1): + enc.buf.AppendString(`"-Inf"`) + default: + enc.buf.AppendFloat(val, bitSize) + } +} + +func (enc *tskvEncoder) AddArray(key string, arr zapcore.ArrayMarshaler) error { + enc.safeAppendKey(key) + return enc.AppendArray(arr) +} + +func (enc *tskvEncoder) AddObject(key string, obj zapcore.ObjectMarshaler) error { + enc.safeAppendKey(key) + return enc.AppendObject(obj) +} + +func (enc *tskvEncoder) AddBinary(key string, val []byte) { + enc.AddByteString(key, val) +} + +func (enc *tskvEncoder) AddByteString(key string, val []byte) { + enc.safeAppendKey(key) + enc.AppendByteString(val) +} + +func (enc *tskvEncoder) AddBool(key string, val bool) { + enc.safeAppendKey(key) + enc.AppendBool(val) +} + +func (enc *tskvEncoder) AddComplex128(key string, val complex128) { + enc.safeAppendKey(key) + enc.AppendComplex128(val) +} + +func (enc *tskvEncoder) AddDuration(key string, val time.Duration) { + enc.safeAppendKey(key) + enc.AppendDuration(val) +} + +func (enc *tskvEncoder) AddFloat64(key string, val float64) { + enc.safeAppendKey(key) + enc.AppendFloat64(val) +} + +func (enc *tskvEncoder) AddInt64(key string, val int64) { + enc.safeAppendKey(key) + enc.AppendInt64(val) +} + +func (enc *tskvEncoder) resetReflectBuf() { + if enc.reflectBuf == nil { + enc.reflectBuf = enc.pool.Get() + enc.reflectEnc = json.NewEncoder(enc.reflectBuf) + } else { + enc.reflectBuf.Reset() + } +} + +func (enc *tskvEncoder) AddReflected(key string, obj interface{}) error { + enc.resetReflectBuf() + err := enc.reflectEnc.Encode(obj) + if err != nil { + return err + } + enc.reflectBuf.TrimNewline() + enc.safeAppendKey(key) + enc.safeAppendString(enc.reflectBuf.String()) + return err +} + +// OpenNamespace is not supported due to tskv format design +// See AppendObject() for more details +func (enc *tskvEncoder) OpenNamespace(key string) { + panic("TSKV encoder does not support namespaces") +} + +func (enc *tskvEncoder) AddString(key, val string) { + enc.safeAppendKey(key) + enc.safeAppendString(val) +} + +func (enc *tskvEncoder) AddTime(key string, val time.Time) { + enc.safeAppendKey(key) + enc.AppendTime(val) +} + +func (enc *tskvEncoder) AddUint64(key string, val uint64) { + enc.safeAppendKey(key) + enc.AppendUint64(val) +} + +func (enc *tskvEncoder) AppendArray(arr zapcore.ArrayMarshaler) error { + enc.appendArrayItemSeparator() + enc.buf.AppendByte(tskvArrayStart) + err := arr.MarshalLogArray(enc) + enc.buf.AppendByte(tskvArrayEnd) + return err +} + +// TSKV format does not support hierarchy data so we can't log Objects here +// The only thing we can do is to implicitly use fmt.Stringer interface +// +// ObjectMarshaler interface requires MarshalLogObject method +// from within MarshalLogObject you only have access to ObjectEncoder methods (AddString, AddBool ...) +// so if you call AddString then object log will be split by \t sign +// but \t is key-value separator and tskv doesn't have another separators +// e.g +// json encoded: objLogFieldName={"innerObjKey1":{"innerObjKey2":"value"}} +// tskv encoded: objLogFieldName={ \tinnerObjKey1={ \tinnerObjKey2=value}} +func (enc *tskvEncoder) AppendObject(obj zapcore.ObjectMarshaler) error { + var err error + + enc.appendArrayItemSeparator() + enc.buf.AppendByte('{') + stringerObj, ok := obj.(fmt.Stringer) + if !ok { + err = xerrors.Errorf("fmt.Stringer implementation required due to marshall into tskv format") + } else { + enc.safeAppendString(stringerObj.String()) + } + enc.buf.AppendByte('}') + + return err +} + +func (enc *tskvEncoder) AppendBool(val bool) { + enc.appendArrayItemSeparator() + enc.buf.AppendBool(val) +} + +func (enc *tskvEncoder) AppendByteString(val []byte) { + enc.appendArrayItemSeparator() + _, _ = enc.buf.Write(hexEncode(val)) +} + +func (enc *tskvEncoder) AppendComplex128(val complex128) { // TODO + enc.appendArrayItemSeparator() + + r, i := real(val), imag(val) + enc.buf.AppendByte('"') + // Because we're always in a quoted string, we can use strconv without + // special-casing NaN and +/-Inf. + enc.buf.AppendFloat(r, 64) + enc.buf.AppendByte('+') + enc.buf.AppendFloat(i, 64) + enc.buf.AppendByte('i') + enc.buf.AppendByte('"') +} + +func (enc *tskvEncoder) AppendDuration(val time.Duration) { + cur := enc.buf.Len() + enc.cfg.EncodeDuration(val, enc) + if cur == enc.buf.Len() { + // User-supplied EncodeDuration is a no-op. Fall back to nanoseconds + enc.AppendInt64(int64(val)) + } +} + +func (enc *tskvEncoder) AppendInt64(val int64) { + enc.appendArrayItemSeparator() + enc.buf.AppendInt(val) +} + +func (enc *tskvEncoder) AppendReflected(val interface{}) error { + enc.appendArrayItemSeparator() + + enc.resetReflectBuf() + err := enc.reflectEnc.Encode(val) + if err != nil { + return err + } + enc.reflectBuf.TrimNewline() + enc.safeAppendString(enc.reflectBuf.String()) + return nil +} + +func (enc *tskvEncoder) AppendString(val string) { + enc.appendArrayItemSeparator() + enc.safeAppendString(val) +} + +func (enc *tskvEncoder) AppendTime(val time.Time) { + cur := enc.buf.Len() + enc.cfg.EncodeTime(val, enc) + if cur == enc.buf.Len() { + // User-supplied EncodeTime is a no-op. Fall back to nanos since epoch to keep output tskv valid. + enc.AppendInt64(val.Unix()) + } +} + +func (enc *tskvEncoder) AppendUint64(val uint64) { + enc.appendArrayItemSeparator() + enc.buf.AppendUint(val) +} + +func (enc *tskvEncoder) AddComplex64(k string, v complex64) { enc.AddComplex128(k, complex128(v)) } +func (enc *tskvEncoder) AddFloat32(k string, v float32) { enc.AddFloat64(k, float64(v)) } +func (enc *tskvEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) } +func (enc *tskvEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) } +func (enc *tskvEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) } +func (enc *tskvEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) } +func (enc *tskvEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) } +func (enc *tskvEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) } +func (enc *tskvEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) } +func (enc *tskvEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) } +func (enc *tskvEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) } +func (enc *tskvEncoder) AppendComplex64(v complex64) { enc.AppendComplex128(complex128(v)) } +func (enc *tskvEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) } +func (enc *tskvEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) } +func (enc *tskvEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) } +func (enc *tskvEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) } +func (enc *tskvEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) } +func (enc *tskvEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) } +func (enc *tskvEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) } +func (enc *tskvEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) } +func (enc *tskvEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) } +func (enc *tskvEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) } +func (enc *tskvEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) } + +func (enc *tskvEncoder) Clone() zapcore.Encoder { + clone := enc.clone() + _, _ = clone.buf.Write(enc.buf.Bytes()) + return clone +} + +func (enc *tskvEncoder) clone() *tskvEncoder { + clone := getTSKVEncoder() + clone.cfg = enc.cfg + clone.pool = enc.pool + clone.buf = enc.pool.Get() + clone.tskvEscaper = enc.tskvEscaper + return clone +} + +// nolint: gocyclo +func (enc *tskvEncoder) EncodeEntry(ent zapcore.Entry, fields []zapcore.Field) (*buffer.Buffer, error) { + final := enc.clone() + final.AppendString(tskvMark) + + if final.cfg.TimeKey != "" && final.cfg.EncodeTime != nil { + final.safeAppendKey(final.cfg.TimeKey) + final.cfg.EncodeTime(ent.Time, final) + } + if final.cfg.LevelKey != "" && final.cfg.EncodeLevel != nil { + final.safeAppendKey(final.cfg.LevelKey) + final.cfg.EncodeLevel(ent.Level, final) + } + if ent.LoggerName != "" && final.cfg.NameKey != "" { + nameEncoder := final.cfg.EncodeName + + if nameEncoder == nil { + // Fall back to FullNameEncoder for backward compatibility. + nameEncoder = zapcore.FullNameEncoder + } + + final.safeAppendKey(final.cfg.NameKey) + nameEncoder(ent.LoggerName, final) + } + if ent.Caller.Defined && final.cfg.CallerKey != "" && final.cfg.EncodeCaller != nil { + final.safeAppendKey(final.cfg.CallerKey) + final.cfg.EncodeCaller(ent.Caller, final) + } + + if enc.buf.Len() > 0 { + final.appendElementSeparator() + _, _ = final.buf.Write(enc.buf.Bytes()) + } + + // Add the message itself. + if final.cfg.MessageKey != "" { + final.safeAppendKey(final.cfg.MessageKey) + final.safeAppendString(ent.Message) + } + + // Add any structured context. + for _, f := range fields { + f.AddTo(final) + } + + if ent.Stack != "" && final.cfg.StacktraceKey != "" { + final.safeAppendKey(final.cfg.StacktraceKey) + final.safeAppendString(ent.Stack) + } + + if final.cfg.LineEnding != "" { + final.buf.AppendString(final.cfg.LineEnding) + } else { + final.buf.AppendByte(tskvLineEnding) + } + + return final.buf, nil +} diff --git a/library/go/core/log/zap/encoders/tskv_test.go b/library/go/core/log/zap/encoders/tskv_test.go new file mode 100644 index 0000000000..44a74111a3 --- /dev/null +++ b/library/go/core/log/zap/encoders/tskv_test.go @@ -0,0 +1,600 @@ +package encoders + +import ( + "errors" + "fmt" + "math" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "go.uber.org/zap" + "go.uber.org/zap/buffer" + "go.uber.org/zap/zapcore" +) + +func TestTSKVEscaper(t *testing.T) { + tests := []struct { + input string + expectedKey string + expectedValue string + desc string + }{ + { + input: "plain text$ no need to escape", + expectedKey: "plain text$ no need to escape", + expectedValue: "plain text$ no need to escape", + desc: "test without escape", + }, { + input: "test escape\tab", + expectedKey: `test escape\tab`, + expectedValue: `test escape\tab`, + desc: "escape tab", + }, + { + input: "\ntest es\\cape\t\t a\rll char\x00s in string=", + expectedKey: `\ntest es\\cape\t\t a\rll char\0s in string\=`, + expectedValue: `\ntest es\\cape\t\t a\rll char\0s in string=`, + desc: "escape all chars", + }, + } + esc := newTSKVEscaper() + for _, tt := range tests { + t.Run(tt.desc, func(t *testing.T) { + cur := esc.escapeKey(tt.input) + assert.Equal(t, tt.expectedKey, cur, "Incorrect escaped TSKV key.") + }) + + t.Run(tt.desc, func(t *testing.T) { + cur := esc.escapeValue(tt.input) + assert.Equal(t, tt.expectedValue, cur, "Incorrect escaped TSKV value.") + }) + } +} + +type noJSON struct{} + +func (nj noJSON) MarshalJSON() ([]byte, error) { + return nil, errors.New("no") +} + +type nonloggable struct{} + +func (l nonloggable) MarshalLogObject(enc zapcore.ObjectEncoder) error { + return nil +} + +type loggable struct { + bool bool + spec string +} + +func (l loggable) MarshalLogObject(enc zapcore.ObjectEncoder) error { + return nil +} + +func (l loggable) String() string { + + return fmt.Sprintf("loggable%s=%t%s", l.spec, l.bool, l.spec) +} + +func (l loggable) MarshalLogArray(enc zapcore.ArrayEncoder) error { + if !l.bool { + return errors.New("can't marshal") + } + enc.AppendBool(l.bool) + return nil +} + +type loggables int + +func (ls loggables) MarshalLogArray(enc zapcore.ArrayEncoder) error { + l := loggable{true, ""} + for i := 0; i < int(ls); i++ { + if err := enc.AppendObject(l); err != nil { + return err + } + } + return nil +} + +func getCommonTestConfig() zapcore.EncoderConfig { + return zapcore.EncoderConfig{ + MessageKey: "M", + LevelKey: "L", + TimeKey: "T", + NameKey: "N", + CallerKey: "C", + StacktraceKey: "S", + EncodeLevel: zapcore.LowercaseLevelEncoder, + EncodeTime: zapcore.EpochTimeEncoder, + EncodeDuration: zapcore.SecondsDurationEncoder, + EncodeCaller: zapcore.ShortCallerEncoder, + } +} + +func getSpecCharsTestConfig() zapcore.EncoderConfig { + return zapcore.EncoderConfig{ + MessageKey: "M\t", + LevelKey: "L\n", + TimeKey: "T\r", + NameKey: "N\x00", + CallerKey: "C=", + StacktraceKey: "S\\", + EncodeLevel: zapcore.LowercaseLevelEncoder, + EncodeTime: zapcore.EpochTimeEncoder, + EncodeDuration: zapcore.SecondsDurationEncoder, + EncodeCaller: zapcore.ShortCallerEncoder, + } +} + +func getRawTskvEncoder() *tskvEncoder { + pool := buffer.NewPool() + cfg := getCommonTestConfig() + return &tskvEncoder{ + cfg: cfg, + pool: pool, + buf: pool.Get(), + tskvEscaper: newTSKVEscaper(), + } + +} + +func assertOutput(t testing.TB, expected string, f func(encoder zapcore.Encoder)) { + enc := getRawTskvEncoder() + f(enc) + assert.Equal(t, expected, enc.buf.String(), "Unexpected encoder output after adding.") + + enc.buf.Reset() + enc.AddString("foo", "bar") + f(enc) + expectedPrefix := `foo=bar` + if expected != "" { + // If we expect output, it should be tab-separated from the previous + // field. + expectedPrefix += "\t" + } + assert.Equal(t, expectedPrefix+expected, enc.buf.String(), "Unexpected encoder output after adding as a second field.") +} + +func TestTSKVEncoderObjectFields(t *testing.T) { + tests := []struct { + desc string + expected string + f func(encoder zapcore.Encoder) + }{ + {"binary", `k=61623132`, func(e zapcore.Encoder) { e.AddBinary("k", []byte("ab12")) }}, + {"binary esc ", `k\n=61623132`, func(e zapcore.Encoder) { e.AddBinary("k\n", []byte("ab12")) }}, + {"bool", `k=true`, func(e zapcore.Encoder) { e.AddBool("k", true) }}, + {"bool", `k\t=false`, func(e zapcore.Encoder) { e.AddBool("k\t", false) }}, + + {"byteString", `k=765c`, func(e zapcore.Encoder) { e.AddByteString(`k`, []byte(`v\`)) }}, + {"byteString esc", `k\t=61623132`, func(e zapcore.Encoder) { e.AddByteString("k\t", []byte("ab12")) }}, + {"byteString empty val", `k=`, func(e zapcore.Encoder) { e.AddByteString("k", []byte{}) }}, + {"byteString nil val", `k=`, func(e zapcore.Encoder) { e.AddByteString("k", nil) }}, + + {"complex128", `k="1+2i"`, func(e zapcore.Encoder) { e.AddComplex128("k", 1+2i) }}, + {"complex128 esc", `k\t="1+2i"`, func(e zapcore.Encoder) { e.AddComplex128("k\t", 1+2i) }}, + {"complex64", `k="1+2i"`, func(e zapcore.Encoder) { e.AddComplex64("k", 1+2i) }}, + {"complex64 esc", `k\t="1+2i"`, func(e zapcore.Encoder) { e.AddComplex64("k\t", 1+2i) }}, + + {"duration", `k$=0.000000001`, func(e zapcore.Encoder) { e.AddDuration("k$", 1) }}, + {"duration esc", `k\t=0.000000001`, func(e zapcore.Encoder) { e.AddDuration("k\t", 1) }}, + + {"float64", `k=1`, func(e zapcore.Encoder) { e.AddFloat64("k", 1.0) }}, + {"float64 esc", `k\t=1`, func(e zapcore.Encoder) { e.AddFloat64("k\t", 1.0) }}, + {"float64", `k=10000000000`, func(e zapcore.Encoder) { e.AddFloat64("k", 1e10) }}, + {"float64", `k="NaN"`, func(e zapcore.Encoder) { e.AddFloat64("k", math.NaN()) }}, + {"float64", `k="+Inf"`, func(e zapcore.Encoder) { e.AddFloat64("k", math.Inf(1)) }}, + {"float64", `k="-Inf"`, func(e zapcore.Encoder) { e.AddFloat64("k", math.Inf(-1)) }}, + + {"float32", `k=1`, func(e zapcore.Encoder) { e.AddFloat32("k", 1.0) }}, + {"float32", `k\t=1`, func(e zapcore.Encoder) { e.AddFloat32("k\t", 1.0) }}, + {"float32", `k=10000000000`, func(e zapcore.Encoder) { e.AddFloat32("k", 1e10) }}, + {"float32", `k="NaN"`, func(e zapcore.Encoder) { e.AddFloat32("k", float32(math.NaN())) }}, + {"float32", `k="+Inf"`, func(e zapcore.Encoder) { e.AddFloat32("k", float32(math.Inf(1))) }}, + {"float32", `k="-Inf"`, func(e zapcore.Encoder) { e.AddFloat32("k", float32(math.Inf(-1))) }}, + + {"int", `k=42`, func(e zapcore.Encoder) { e.AddInt("k", 42) }}, + {"int esc", `k\t=42`, func(e zapcore.Encoder) { e.AddInt("k\t", 42) }}, + {"int64", `k=42`, func(e zapcore.Encoder) { e.AddInt64("k", 42) }}, + {"int32", `k=42`, func(e zapcore.Encoder) { e.AddInt32("k", 42) }}, + {"int16", `k=42`, func(e zapcore.Encoder) { e.AddInt16("k", 42) }}, + {"int8", `k=42`, func(e zapcore.Encoder) { e.AddInt8("k", 42) }}, + + {"string", `k=v$`, func(e zapcore.Encoder) { e.AddString("k", "v$") }}, + {"string esc", `k\t=v\\`, func(e zapcore.Encoder) { e.AddString("k\t", `v\`) }}, + {"string", `k=`, func(e zapcore.Encoder) { e.AddString("k", "") }}, + + {"time", `k=1`, func(e zapcore.Encoder) { e.AddTime("k", time.Unix(1, 0)) }}, + {"time esc", `k\t=1`, func(e zapcore.Encoder) { e.AddTime("k\t", time.Unix(1, 0)) }}, + + {"uint", `k=42`, func(e zapcore.Encoder) { e.AddUint("k", 42) }}, + {"uint esc", `k\t=42`, func(e zapcore.Encoder) { e.AddUint("k\t", 42) }}, + {"uint64", `k=42`, func(e zapcore.Encoder) { e.AddUint64("k", 42) }}, + {"uint32", `k=42`, func(e zapcore.Encoder) { e.AddUint32("k", 42) }}, + {"uint16", `k=42`, func(e zapcore.Encoder) { e.AddUint16("k", 42) }}, + {"uint8", `k=42`, func(e zapcore.Encoder) { e.AddUint8("k", 42) }}, + {"uintptr", `k=42`, func(e zapcore.Encoder) { e.AddUintptr("k", 42) }}, + { + desc: "object (success)", + expected: `k={loggable=true}`, + f: func(e zapcore.Encoder) { + assert.NoError(t, e.AddObject("k", loggable{true, ""}), "Unexpected error calling AddObject.") + }, + }, + { + desc: "object esc (success)", + expected: `k={loggable\t=true\t}`, + f: func(e zapcore.Encoder) { + assert.NoError(t, e.AddObject("k", loggable{true, "\t"}), "Unexpected error calling AddObject.") + }, + }, + { + desc: "object (error)", + expected: `k={}`, + f: func(e zapcore.Encoder) { + assert.Error(t, e.AddObject("k", nonloggable{}), "Expected an error calling AddObject.") + }, + }, + { + desc: "array (with nested object)", + expected: `loggables=[{loggable=true},{loggable=true}]`, + f: func(e zapcore.Encoder) { + assert.NoError( + t, + e.AddArray("loggables", loggables(2)), + "Unexpected error calling AddObject with nested ArrayMarshalers.", + ) + }, + }, + { + desc: "array (success)", + expected: `k=[true]`, + f: func(e zapcore.Encoder) { + assert.NoError(t, e.AddArray(`k`, loggable{true, ""}), "Unexpected error calling MarshalLogArray.") + }, + }, + { + desc: "array esc (success)", + expected: `k\t=[true]`, + f: func(e zapcore.Encoder) { + assert.NoError(t, e.AddArray("k\t", loggable{true, ""}), "Unexpected error calling MarshalLogArray.") + }, + }, + { + desc: "array (error)", + expected: `k=[]`, + f: func(e zapcore.Encoder) { + assert.Error(t, e.AddArray("k", loggable{false, ""}), "Expected an error calling MarshalLogArray.") + }, + }, + { + desc: "reflect enc (success)", + expected: `k\t={"aee":"l=l","bee":123,"cee":0.9999,"dee":[{"key":"p\\ni","val":3.141592653589793},{"key":"tau=","val":6.283185307179586}]}`, + f: func(e zapcore.Encoder) { + type bar struct { + Key string `json:"key"` + Val float64 `json:"val"` + } + + type foo struct { + A string `json:"aee"` + B int `json:"bee"` + C float64 `json:"cee"` + D []bar `json:"dee"` + } + + assert.NoError(t, e.AddReflected("k\t", foo{ + A: "l=l", + B: 123, + C: 0.9999, + D: []bar{ + {"p\ni", 3.141592653589793}, + {"tau=", 6.283185307179586}, + }, + }), "Unexpected error JSON-serializing a map.") + }, + }, + { + desc: "reflect (failure)", + expected: "", + f: func(e zapcore.Encoder) { + assert.Error(t, e.AddReflected("k", noJSON{}), "Unexpected success JSON-serializing a noJSON.") + }, + }, + } + + for _, tt := range tests { + t.Run(tt.desc, func(t *testing.T) { + assertOutput(t, tt.expected, tt.f) + }) + } +} + +func TestTskvEncoderOpenNamespace(t *testing.T) { + enc := getRawTskvEncoder() + assert.PanicsWithValue(t, "TSKV encoder does not support namespaces", func() { enc.OpenNamespace("test") }) +} + +func TestTSKVEncoderArrays(t *testing.T) { + tests := []struct { + desc string + expected string // expect f to be called twice + f func(zapcore.ArrayEncoder) + }{ + {"bool", `[true,true]`, func(e zapcore.ArrayEncoder) { e.AppendBool(true) }}, + {"byteString", `[6b,6b]`, func(e zapcore.ArrayEncoder) { e.AppendByteString([]byte("k")) }}, + {"byteString", `[6b5c,6b5c]`, func(e zapcore.ArrayEncoder) { e.AppendByteString([]byte(`k\`)) }}, + {"complex128", `["1+2i","1+2i"]`, func(e zapcore.ArrayEncoder) { e.AppendComplex128(1 + 2i) }}, + {"complex64", `["1+2i","1+2i"]`, func(e zapcore.ArrayEncoder) { e.AppendComplex64(1 + 2i) }}, + {"durations", `[0.000000002,0.000000002]`, func(e zapcore.ArrayEncoder) { e.AppendDuration(2) }}, + {"float64", `[3.14,3.14]`, func(e zapcore.ArrayEncoder) { e.AppendFloat64(3.14) }}, + {"float32", `[3.14,3.14]`, func(e zapcore.ArrayEncoder) { e.AppendFloat32(3.14) }}, + {"int", `[42,42]`, func(e zapcore.ArrayEncoder) { e.AppendInt(42) }}, + {"int64", `[42,42]`, func(e zapcore.ArrayEncoder) { e.AppendInt64(42) }}, + {"int32", `[42,42]`, func(e zapcore.ArrayEncoder) { e.AppendInt32(42) }}, + {"int16", `[42,42]`, func(e zapcore.ArrayEncoder) { e.AppendInt16(42) }}, + {"int8", `[42,42]`, func(e zapcore.ArrayEncoder) { e.AppendInt8(42) }}, + {"string", `[k,k]`, func(e zapcore.ArrayEncoder) { e.AppendString("k") }}, + {"string", `[k\\,k\\]`, func(e zapcore.ArrayEncoder) { e.AppendString(`k\`) }}, + {"times", `[1,1]`, func(e zapcore.ArrayEncoder) { e.AppendTime(time.Unix(1, 0)) }}, + {"uint", `[42,42]`, func(e zapcore.ArrayEncoder) { e.AppendUint(42) }}, + {"uint64", `[42,42]`, func(e zapcore.ArrayEncoder) { e.AppendUint64(42) }}, + {"uint32", `[42,42]`, func(e zapcore.ArrayEncoder) { e.AppendUint32(42) }}, + {"uint16", `[42,42]`, func(e zapcore.ArrayEncoder) { e.AppendUint16(42) }}, + {"uint8", `[42,42]`, func(e zapcore.ArrayEncoder) { e.AppendUint8(42) }}, + {"uintptr", `[42,42]`, func(e zapcore.ArrayEncoder) { e.AppendUintptr(42) }}, + { + desc: "arrays (success)", + expected: `[[true],[true]]`, + f: func(arr zapcore.ArrayEncoder) { + assert.NoError(t, arr.AppendArray(zapcore.ArrayMarshalerFunc(func(inner zapcore.ArrayEncoder) error { + inner.AppendBool(true) + return nil + })), "Unexpected error appending an array.") + }, + }, + { + desc: "arrays (error)", + expected: `[[true],[true]]`, + f: func(arr zapcore.ArrayEncoder) { + assert.Error(t, arr.AppendArray(zapcore.ArrayMarshalerFunc(func(inner zapcore.ArrayEncoder) error { + inner.AppendBool(true) + return errors.New("fail") + })), "Expected an error appending an array.") + }, + }, + { + desc: "objects (success)", + expected: `[{loggable=true},{loggable=true}]`, + f: func(arr zapcore.ArrayEncoder) { + assert.NoError(t, arr.AppendObject(loggable{true, ""}), "Unexpected error appending an object.") + }, + }, + { + desc: "objects esc (success)", + expected: `[{loggable\t=true\t},{loggable\t=true\t}]`, + f: func(arr zapcore.ArrayEncoder) { + assert.NoError(t, arr.AppendObject(loggable{true, "\t"}), "Unexpected error appending an object.") + }, + }, + { + desc: "objects (error: fmt.Stringer not implemented)", + expected: `[{},{}]`, + f: func(arr zapcore.ArrayEncoder) { + assert.Error(t, arr.AppendObject(nonloggable{}), "Expected an error appending an object.") + }, + }, + { + desc: "reflect (success)", + expected: `[{"foo":5},{"foo":5}]`, + f: func(arr zapcore.ArrayEncoder) { + assert.NoError( + t, + arr.AppendReflected(map[string]int{"foo": 5}), + "Unexpected an error appending an object with reflection.", + ) + }, + }, + { + desc: "reflect esc (success)", + expected: `[{"foo\\t":5},{"foo\\t":5}]`, + f: func(arr zapcore.ArrayEncoder) { + assert.NoError( + t, + arr.AppendReflected(map[string]int{"foo\t": 5}), + "Unexpected an error appending an object with reflection.", + ) + }, + }, + { + desc: "reflect (error)", + expected: `[]`, + f: func(arr zapcore.ArrayEncoder) { + assert.Error( + t, + arr.AppendReflected(noJSON{}), + "Unexpected an error appending an object with reflection.", + ) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.desc, func(t *testing.T) { + f := func(enc zapcore.Encoder) error { + return enc.AddArray("array", zapcore.ArrayMarshalerFunc(func(arr zapcore.ArrayEncoder) error { + tt.f(arr) + tt.f(arr) + return nil + })) + } + assertOutput(t, `array=`+tt.expected, func(enc zapcore.Encoder) { + err := f(enc) + assert.NoError(t, err, "Unexpected error adding array to JSON encoder.") + }) + }) + } +} + +func TestTSKVEncodeEntry(t *testing.T) { + entryTime := time.Date(2019, 7, 13, 15, 33, 42, 99, time.UTC) + + tests := []struct { + desc string + expected string + cnf zapcore.EncoderConfig + ent zapcore.Entry + fields []zapcore.Field + }{ + { + desc: "entry without escape", + expected: `tskv T=1563032022 L=info M=text here +`, + cnf: getCommonTestConfig(), + ent: zapcore.Entry{ + Time: entryTime, + Message: "text here", + }, + fields: []zapcore.Field{}, + }, + { + desc: "all fields entry without escape", + expected: `tskv T=1563032022 L=debug N=bob C=foo.go:42 M=text here S=fake-stack +`, + cnf: getCommonTestConfig(), + ent: zapcore.Entry{ + Level: zapcore.DebugLevel, + Time: entryTime, + LoggerName: "bob", + Message: "text here", + Caller: zapcore.EntryCaller{Defined: true, File: "foo.go", Line: 42}, + Stack: "fake-stack", + }, + fields: []zapcore.Field{}, + }, + { + desc: "entry with escaped field names", + expected: `tskv T\r=1563032022 L\n=debug N\0=bob C\==foo.go:42 M\t=text here S\\=fake-stack +`, + cnf: getSpecCharsTestConfig(), + ent: zapcore.Entry{ + Level: zapcore.DebugLevel, + Time: entryTime, + LoggerName: "bob", + Message: "text here", + Caller: zapcore.EntryCaller{Defined: true, File: "foo.go", Line: 42}, + Stack: "fake-stack", + }, + fields: []zapcore.Field{}, + }, + { + desc: "entry message escape", + expected: `tskv T=1563032022 L=info M=t\\ex=t\0he\r\tre\n +`, + cnf: getCommonTestConfig(), + ent: zapcore.Entry{ + Time: entryTime, + Message: "t\\ex=t\x00he\r\tre\n", + }, + fields: []zapcore.Field{}, + }, + { + desc: "entry multi-line stack escape", + expected: `tskv T=1563032022 L=info M= S=fake-st\rack\n\tlevel2\n\tlevel1 +`, + cnf: getCommonTestConfig(), + ent: zapcore.Entry{ + Time: entryTime, + Stack: "fake-st\rack\n\tlevel2\n\tlevel1", + }, + fields: []zapcore.Field{}, + }, + { + desc: "entry multi-line caller escape", + expected: `tskv T=1563032022 L=info C=fo\to.go:42 M= +`, + cnf: getCommonTestConfig(), + ent: zapcore.Entry{ + Time: entryTime, + Caller: zapcore.EntryCaller{Defined: true, File: "fo\to.go", Line: 42}, + }, + fields: []zapcore.Field{}, + }, + { + desc: "entry multi-line logger escape", + expected: `tskv T=1563032022 L=info N=b\0b M= +`, + cnf: getCommonTestConfig(), + ent: zapcore.Entry{ + Time: entryTime, + LoggerName: "b\x00b", + }, + fields: []zapcore.Field{}, + }, + { + desc: "entry with additional zap fields", + expected: `tskv T=1563032022 L=info M= so=passes answer=42 common_pie=3.14 ` + + `reflect={"loggable":"yes"} bytes_array=0001020309 bool=true complex="0+1i" +`, + cnf: getCommonTestConfig(), + ent: zapcore.Entry{ + Time: entryTime, + }, + fields: []zapcore.Field{ + zap.String("so", "passes"), + zap.Int("answer", 42), + zap.Float64("common_pie", 3.14), + zap.Reflect("reflect", map[string]string{"loggable": "yes"}), + zap.Binary("bytes_array", []byte{0, 1, 2, 3, '\t'}), + zap.Bool("bool", true), + zap.Complex128("complex", 1i)}, + }, + } + + for _, tt := range tests { + t.Run(tt.desc, func(t *testing.T) { + enc, err := NewTSKVEncoder(tt.cnf) + if err != nil { + panic(err) + } + + buf, err := enc.EncodeEntry(tt.ent, tt.fields) + if assert.NoError(t, err, "Unexpected TSKV encoding error.") { + assert.Equal(t, tt.expected, buf.String(), "Incorrect encoded TSKV entry.") + } + buf.Free() + }) + } +} + +func TestTskvEncoderLoggerWithMethod(t *testing.T) { + entryTime := time.Date(2019, 7, 13, 15, 33, 42, 99, time.UTC) + + enc := getRawTskvEncoder() + enc.AddString("Permanent", "message") + enc.Clone() + tt := struct { + desc string + expected string + ent zapcore.Entry + }{ + desc: "entry without escape", + expected: `tskv T=1563032022 L=info Permanent=message M=text here +`, + ent: zapcore.Entry{ + Time: entryTime, + Message: "text here", + }, + } + + for i := 0; i < 3; i++ { + t.Run(tt.desc, func(t *testing.T) { + buf, err := enc.EncodeEntry(tt.ent, []zapcore.Field{}) + if assert.NoError(t, err, "Unexpected TSKV encoding error.") { + assert.Equal(t, tt.expected, buf.String(), "Incorrect encoded TSKV entry.") + } + }) + } +} diff --git a/library/go/core/log/zap/encoders/ya.make b/library/go/core/log/zap/encoders/ya.make new file mode 100644 index 0000000000..8d71ae0b1b --- /dev/null +++ b/library/go/core/log/zap/encoders/ya.make @@ -0,0 +1,16 @@ +GO_LIBRARY() + +SRCS( + cli.go + kv.go + tskv.go +) + +GO_TEST_SRCS( + kv_test.go + tskv_test.go +) + +END() + +RECURSE(gotest) diff --git a/library/go/core/log/zap/gotest/ya.make b/library/go/core/log/zap/gotest/ya.make new file mode 100644 index 0000000000..f971bead7d --- /dev/null +++ b/library/go/core/log/zap/gotest/ya.make @@ -0,0 +1,3 @@ +GO_TEST_FOR(library/go/core/log/zap) + +END() diff --git a/library/go/core/log/zap/logrotate/error.go b/library/go/core/log/zap/logrotate/error.go new file mode 100644 index 0000000000..f59b322591 --- /dev/null +++ b/library/go/core/log/zap/logrotate/error.go @@ -0,0 +1,5 @@ +package logrotate + +import "errors" + +var ErrNotSupported = errors.New("logrotate sink is not supported on your platform") diff --git a/library/go/core/log/zap/logrotate/example_sink_test.go b/library/go/core/log/zap/logrotate/example_sink_test.go new file mode 100644 index 0000000000..871081439c --- /dev/null +++ b/library/go/core/log/zap/logrotate/example_sink_test.go @@ -0,0 +1,54 @@ +//go:build linux || darwin +// +build linux darwin + +package logrotate_test + +import ( + "net/url" + "path/filepath" + "syscall" + + "github.com/ydb-platform/ydb/library/go/core/log" + "github.com/ydb-platform/ydb/library/go/core/log/zap" + "github.com/ydb-platform/ydb/library/go/core/log/zap/logrotate" + uberzap "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +func Example_simpleUsage() { + // Basic usage, when you don't need any custom preferences is quite easy. + // register our logrotate sink and force it to reopen files on sighup(remember to check for errors) + _ = logrotate.RegisterLogrotateSink(syscall.SIGHUP) + // create zap logger as usual, using `logrotate://` instead of omitting it or using `file://` + cfg := zap.JSONConfig(log.DebugLevel) + logPath, _ := filepath.Abs("./example.log") + cfg.OutputPaths = []string{"logrotate://" + logPath} + logger, _ := zap.New(cfg) + // That's all, when your process receives SIGHUP file will be reopened + logger.Debug("this log should be reopened by SIGHUP") +} + +func Example_namedUsage() { + // Note: each scheme can be registered only once and can not be unregistered + // If you want to provide custom unused scheme name(remember to check for errors): + _ = logrotate.RegisterNamedLogrotateSink("rotate-usr1", syscall.SIGUSR1) + // Now we create logger using that cheme + cfg := zap.JSONConfig(log.DebugLevel) + logPath, _ := filepath.Abs("./example.log") + cfg.OutputPaths = []string{"rotate-usr1://" + logPath} + logger, _ := zap.New(cfg) + // Now file will be reopened by SIGUSR1 + logger.Debug("this log should be reopened by SIGHUP") +} + +func Example_standaloneUsage() { + // If you don't want to register scheme, or use custom logging core you can do this(remember to check for errors): + u, _ := url.ParseRequestURI("/tmp/example.log") + sink, _ := logrotate.NewLogrotateSink(u, syscall.SIGHUP) + + encoder := zapcore.NewConsoleEncoder(zapcore.EncoderConfig{MessageKey: "msg"}) + core := zapcore.NewCore(encoder, sink, uberzap.NewAtomicLevel()) + logger := uberzap.New(core) + // Now file will be reopened by SIGHUP + logger.Debug("this log should be reopened by SIGHUP") +} diff --git a/library/go/core/log/zap/logrotate/gotest/ya.make b/library/go/core/log/zap/logrotate/gotest/ya.make new file mode 100644 index 0000000000..03fe7d8484 --- /dev/null +++ b/library/go/core/log/zap/logrotate/gotest/ya.make @@ -0,0 +1,3 @@ +GO_TEST_FOR(library/go/core/log/zap/logrotate) + +END() diff --git a/library/go/core/log/zap/logrotate/sink.go b/library/go/core/log/zap/logrotate/sink.go new file mode 100644 index 0000000000..c8f3f14a3f --- /dev/null +++ b/library/go/core/log/zap/logrotate/sink.go @@ -0,0 +1,121 @@ +//go:build darwin || freebsd || linux +// +build darwin freebsd linux + +package logrotate + +import ( + "fmt" + "net/url" + "os" + "os/signal" + "sync/atomic" + "unsafe" + + "github.com/ydb-platform/ydb/library/go/core/xerrors" + "go.uber.org/zap" +) + +const defaultSchemeName = "logrotate" + +// Register logrotate sink in zap sink registry. +// This sink internally is like file sink, but listens to provided logrotate signal +// and reopens file when that signal is delivered +// This can be called only once. Any future calls will result in an error +func RegisterLogrotateSink(sig ...os.Signal) error { + return RegisterNamedLogrotateSink(defaultSchemeName, sig...) +} + +// Same as RegisterLogrotateSink, but use provided schemeName instead of default `logrotate` +// Can be useful in special cases for registering different types of sinks for different signal +func RegisterNamedLogrotateSink(schemeName string, sig ...os.Signal) error { + factory := func(url *url.URL) (sink zap.Sink, e error) { + return NewLogrotateSink(url, sig...) + } + return zap.RegisterSink(schemeName, factory) +} + +// sink itself, use RegisterLogrotateSink to register it in zap machinery +type sink struct { + path string + notifier chan os.Signal + file unsafe.Pointer +} + +// Factory for logrotate sink, which accepts os.Signals to listen to for reloading +// Generally if you don't build your own core it is used by zap machinery. +// See RegisterLogrotateSink. +func NewLogrotateSink(u *url.URL, sig ...os.Signal) (zap.Sink, error) { + notifier := make(chan os.Signal, 1) + signal.Notify(notifier, sig...) + + if u.User != nil { + return nil, fmt.Errorf("user and password not allowed with logrotate file URLs: got %v", u) + } + if u.Fragment != "" { + return nil, fmt.Errorf("fragments not allowed with logrotate file URLs: got %v", u) + } + // Error messages are better if we check hostname and port separately. + if u.Port() != "" { + return nil, fmt.Errorf("ports not allowed with logrotate file URLs: got %v", u) + } + if hn := u.Hostname(); hn != "" && hn != "localhost" { + return nil, fmt.Errorf("logrotate file URLs must leave host empty or use localhost: got %v", u) + } + + sink := &sink{ + path: u.Path, + notifier: notifier, + } + if err := sink.reopen(); err != nil { + return nil, err + } + go sink.listenToSignal() + return sink, nil +} + +// wait for signal delivery or chanel close +func (m *sink) listenToSignal() { + for { + _, ok := <-m.notifier + if !ok { + return + } + if err := m.reopen(); err != nil { + // Last chance to signalize about an error + _, _ = fmt.Fprintf(os.Stderr, "%s", err) + } + } +} + +func (m *sink) reopen() error { + file, err := os.OpenFile(m.path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644) + if err != nil { + return xerrors.Errorf("failed to open log file on %s: %w", m.path, err) + } + old := (*os.File)(m.file) + atomic.StorePointer(&m.file, unsafe.Pointer(file)) + if old != nil { + if err := old.Close(); err != nil { + return xerrors.Errorf("failed to close old file: %w", err) + } + } + return nil +} + +func (m *sink) getFile() *os.File { + return (*os.File)(atomic.LoadPointer(&m.file)) +} + +func (m *sink) Close() error { + signal.Stop(m.notifier) + close(m.notifier) + return m.getFile().Close() +} + +func (m *sink) Write(p []byte) (n int, err error) { + return m.getFile().Write(p) +} + +func (m *sink) Sync() error { + return m.getFile().Sync() +} diff --git a/library/go/core/log/zap/logrotate/sink_stub.go b/library/go/core/log/zap/logrotate/sink_stub.go new file mode 100644 index 0000000000..389a55b812 --- /dev/null +++ b/library/go/core/log/zap/logrotate/sink_stub.go @@ -0,0 +1,23 @@ +//go:build !darwin && !freebsd && !linux +// +build !darwin,!freebsd,!linux + +package logrotate + +import ( + "net/url" + "os" + + "go.uber.org/zap" +) + +func RegisterLogrotateSink(sig ...os.Signal) error { + return ErrNotSupported +} + +func RegisterNamedLogrotateSink(schemeName string, sig ...os.Signal) error { + return ErrNotSupported +} + +func NewLogrotateSink(u *url.URL, sig ...os.Signal) (zap.Sink, error) { + return nil, ErrNotSupported +} diff --git a/library/go/core/log/zap/logrotate/sink_test.go b/library/go/core/log/zap/logrotate/sink_test.go new file mode 100644 index 0000000000..e434a0ef8a --- /dev/null +++ b/library/go/core/log/zap/logrotate/sink_test.go @@ -0,0 +1,86 @@ +//go:build linux || darwin +// +build linux darwin + +package logrotate + +import ( + "io" + "os" + "path/filepath" + "strings" + "syscall" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/ydb-platform/ydb/library/go/core/log" + "github.com/ydb-platform/ydb/library/go/core/log/zap" +) + +func TestLogrotateSink(t *testing.T) { + testLogFilename := "test.log" + testDir := "testLogrotate" + + // use test dir in default temp files location + tempDir, err := os.MkdirTemp("", testDir) + require.NoError(t, err, "failed to create temporary directory %s", testDir) + + testLogPath := filepath.Join(tempDir, testLogFilename) + + defer func() { + _ = os.RemoveAll(tempDir) + }() // clean up + + err = RegisterLogrotateSink(syscall.SIGUSR1) + require.NoError(t, err, "failed to register sink") + + // Double registration is not allowed + err = RegisterLogrotateSink(syscall.SIGUSR1) + require.Error(t, err) + + cfg := zap.JSONConfig(log.DebugLevel) + cfg.OutputPaths = []string{"logrotate://" + testLogPath} + logger, err := zap.New(cfg) + require.NoError(t, err, "failed to create logger") + + testLogFile, err := os.OpenFile(testLogPath, os.O_RDONLY, 0) + require.NoError(t, err, "expected logger to create file: %v", err) + defer func() { + _ = testLogFile.Close() + }() + + // test write to file + logger.Debug("test") + logger.Debug("test") + + err = os.Rename(testLogPath, testLogPath+".rotated") + require.NoError(t, err, "failed to rename file") + + err = syscall.Kill(syscall.Getpid(), syscall.SIGUSR1) + require.NoError(t, err, "failed to send signal to self, %v", err) + + // There is an essential race that we can not control of delivering signal, + // so we just wait enough here + time.Sleep(time.Second) + + logger.Debug("test") + logger.Debug("test") + logger.Debug("test") + + // Reopen file to sync content + err = syscall.Kill(syscall.Getpid(), syscall.SIGUSR1) + require.NoError(t, err, "failed to send signal to self, %v", err) + time.Sleep(time.Second) + + requireLineCount(t, testLogPath, 3) + requireLineCount(t, testLogPath+".rotated", 2) +} + +func requireLineCount(t *testing.T, path string, lines int) { + file, err := os.OpenFile(path, os.O_RDONLY, 0) + require.NoError(t, err, "failed to open log file for reading") + defer func() { _ = file.Close() }() + dataRead, err := io.ReadAll(file) + require.NoError(t, err, "failed to read log file") + require.Equal(t, lines, strings.Count(string(dataRead), "\n")) +} diff --git a/library/go/core/log/zap/logrotate/ya.make b/library/go/core/log/zap/logrotate/ya.make new file mode 100644 index 0000000000..3171836441 --- /dev/null +++ b/library/go/core/log/zap/logrotate/ya.make @@ -0,0 +1,42 @@ +GO_LIBRARY() + +IF (OS_LINUX) + SRCS( + error.go + sink.go + ) + + GO_TEST_SRCS(sink_test.go) + + GO_XTEST_SRCS(example_sink_test.go) +ENDIF() + +IF (OS_DARWIN) + SRCS( + error.go + sink.go + ) + + GO_TEST_SRCS(sink_test.go) + + GO_XTEST_SRCS(example_sink_test.go) +ENDIF() + +IF (OS_WINDOWS) + SRCS( + error.go + sink_stub.go + ) +ENDIF() + +END() + +IF ( + OS_DARWIN + OR + OS_FREEBSD + OR + OS_LINUX +) + RECURSE_FOR_TESTS(gotest) +ENDIF() diff --git a/library/go/core/log/zap/qloud.go b/library/go/core/log/zap/qloud.go new file mode 100644 index 0000000000..1f3c90a964 --- /dev/null +++ b/library/go/core/log/zap/qloud.go @@ -0,0 +1,49 @@ +package zap + +import ( + "github.com/ydb-platform/ydb/library/go/core/log" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +// NewQloudLogger constructs fully-fledged Qloud compatible logger +// based on predefined config. See https://wiki.yandex-team.ru/qloud/doc/logs +// for more information +func NewQloudLogger(level log.Level, opts ...zap.Option) (*Logger, error) { + cfg := zap.Config{ + Level: zap.NewAtomicLevelAt(ZapifyLevel(level)), + Encoding: "json", + OutputPaths: []string{"stdout"}, + ErrorOutputPaths: []string{"stderr"}, + EncoderConfig: zapcore.EncoderConfig{ + MessageKey: "msg", + LevelKey: "level", + StacktraceKey: "stackTrace", + TimeKey: "", + CallerKey: "", + EncodeLevel: zapcore.LowercaseLevelEncoder, + EncodeTime: zapcore.ISO8601TimeEncoder, + EncodeDuration: zapcore.StringDurationEncoder, + EncodeCaller: zapcore.ShortCallerEncoder, + }, + } + + zl, err := cfg.Build(opts...) + if err != nil { + return nil, err + } + + return &Logger{ + L: addQloudContext(zl).(*zap.Logger), + }, nil +} + +func addQloudContext(i interface{}) interface{} { + switch c := i.(type) { + case *zap.Logger: + return c.With(zap.Namespace("@fields")) + case zapcore.Core: + return c.With([]zapcore.Field{zap.Namespace("@fields")}) + } + return i +} diff --git a/library/go/core/log/zap/ya.make b/library/go/core/log/zap/ya.make new file mode 100644 index 0000000000..bc41967e07 --- /dev/null +++ b/library/go/core/log/zap/ya.make @@ -0,0 +1,23 @@ +GO_LIBRARY() + +SRCS( + deploy.go + qloud.go + zap.go + zapify.go +) + +GO_TEST_SRCS( + benchmark_test.go + zap_test.go + zapify_test.go +) + +END() + +RECURSE( + asynczap + encoders + gotest + logrotate +) diff --git a/library/go/core/log/zap/zap.go b/library/go/core/log/zap/zap.go new file mode 100644 index 0000000000..e3274cf372 --- /dev/null +++ b/library/go/core/log/zap/zap.go @@ -0,0 +1,252 @@ +package zap + +import ( + "fmt" + + "github.com/ydb-platform/ydb/library/go/core/log" + "github.com/ydb-platform/ydb/library/go/core/log/zap/encoders" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +const ( + // callerSkip is number of stack frames to skip when logging caller + callerSkip = 1 +) + +func init() { + if err := zap.RegisterEncoder(encoders.EncoderNameKV, encoders.NewKVEncoder); err != nil { + panic(err) + } + if err := zap.RegisterEncoder(encoders.EncoderNameCli, encoders.NewCliEncoder); err != nil { + panic(err) + } + if err := zap.RegisterEncoder(encoders.EncoderNameTSKV, encoders.NewTSKVEncoder); err != nil { + panic(err) + } +} + +// Logger implements log.Logger interface +type Logger struct { + L *zap.Logger +} + +var _ log.Logger = &Logger{} +var _ log.Structured = &Logger{} +var _ log.Fmt = &Logger{} +var _ log.LoggerWith = &Logger{} +var _ log.LoggerAddCallerSkip = &Logger{} + +// New constructs zap-based logger from provided config +func New(cfg zap.Config) (*Logger, error) { + zl, err := cfg.Build(zap.AddCallerSkip(callerSkip)) + if err != nil { + return nil, err + } + + return &Logger{ + L: zl, + }, nil +} + +// NewWithCore constructs zap-based logger from provided core +func NewWithCore(core zapcore.Core, options ...zap.Option) *Logger { + options = append(options, zap.AddCallerSkip(callerSkip)) + return &Logger{L: zap.New(core, options...)} +} + +// Must constructs zap-based logger from provided config and panics on error +func Must(cfg zap.Config) *Logger { + l, err := New(cfg) + if err != nil { + panic(fmt.Sprintf("failed to construct zap logger: %v", err)) + } + return l +} + +// JSONConfig returns zap config for structured logging (zap's json encoder) +func JSONConfig(level log.Level) zap.Config { + return StandardConfig("json", level) +} + +// ConsoleConfig returns zap config for logging to console (zap's console encoder) +func ConsoleConfig(level log.Level) zap.Config { + return StandardConfig("console", level) +} + +// CLIConfig returns zap config for cli logging (custom cli encoder) +func CLIConfig(level log.Level) zap.Config { + return StandardConfig("cli", level) +} + +// KVConfig returns zap config for logging to kv (custom kv encoder) +func KVConfig(level log.Level) zap.Config { + return StandardConfig("kv", level) +} + +// TSKVConfig returns zap config for logging to tskv (custom tskv encoder) +func TSKVConfig(level log.Level) zap.Config { + return zap.Config{ + Level: zap.NewAtomicLevelAt(ZapifyLevel(level)), + Encoding: "tskv", + OutputPaths: []string{"stdout"}, + ErrorOutputPaths: []string{"stderr"}, + EncoderConfig: zapcore.EncoderConfig{ + MessageKey: "message", + LevelKey: "levelname", + TimeKey: "unixtime", + CallerKey: "caller", + NameKey: "name", + EncodeLevel: zapcore.CapitalLevelEncoder, + EncodeTime: zapcore.EpochTimeEncoder, + EncodeDuration: zapcore.StringDurationEncoder, + EncodeCaller: zapcore.ShortCallerEncoder, + }, + } +} + +// StandardConfig returns default zap config with specified encoding and level +func StandardConfig(encoding string, level log.Level) zap.Config { + return zap.Config{ + Level: zap.NewAtomicLevelAt(ZapifyLevel(level)), + Encoding: encoding, + OutputPaths: []string{"stdout"}, + ErrorOutputPaths: []string{"stderr"}, + EncoderConfig: zapcore.EncoderConfig{ + MessageKey: "msg", + LevelKey: "level", + TimeKey: "ts", + CallerKey: "caller", + NameKey: "name", + EncodeLevel: zapcore.CapitalLevelEncoder, + EncodeTime: zapcore.ISO8601TimeEncoder, + EncodeDuration: zapcore.StringDurationEncoder, + EncodeCaller: zapcore.ShortCallerEncoder, + }, + } +} + +// Logger returns general logger +func (l *Logger) Logger() log.Logger { + return l +} + +// Fmt returns fmt logger +func (l *Logger) Fmt() log.Fmt { + return l +} + +// Structured returns structured logger +func (l *Logger) Structured() log.Structured { + return l +} + +// With returns logger that always adds provided key/value to every log entry +func (l *Logger) With(fields ...log.Field) log.Logger { + return &Logger{ + L: l.L.With(zapifyFields(fields...)...), + } +} + +func (l *Logger) AddCallerSkip(skip int) log.Logger { + return &Logger{ + L: l.L.WithOptions(zap.AddCallerSkip(skip)), + } +} + +// Trace logs at Trace log level using fields +func (l *Logger) Trace(msg string, fields ...log.Field) { + if ce := l.L.Check(zap.DebugLevel, msg); ce != nil { + ce.Write(zapifyFields(fields...)...) + } +} + +// Tracef logs at Trace log level using fmt formatter +func (l *Logger) Tracef(msg string, args ...interface{}) { + if ce := l.L.Check(zap.DebugLevel, ""); ce != nil { + ce.Message = fmt.Sprintf(msg, args...) + ce.Write() + } +} + +// Debug logs at Debug log level using fields +func (l *Logger) Debug(msg string, fields ...log.Field) { + if ce := l.L.Check(zap.DebugLevel, msg); ce != nil { + ce.Write(zapifyFields(fields...)...) + } +} + +// Debugf logs at Debug log level using fmt formatter +func (l *Logger) Debugf(msg string, args ...interface{}) { + if ce := l.L.Check(zap.DebugLevel, ""); ce != nil { + ce.Message = fmt.Sprintf(msg, args...) + ce.Write() + } +} + +// Info logs at Info log level using fields +func (l *Logger) Info(msg string, fields ...log.Field) { + if ce := l.L.Check(zap.InfoLevel, msg); ce != nil { + ce.Write(zapifyFields(fields...)...) + } +} + +// Infof logs at Info log level using fmt formatter +func (l *Logger) Infof(msg string, args ...interface{}) { + if ce := l.L.Check(zap.InfoLevel, ""); ce != nil { + ce.Message = fmt.Sprintf(msg, args...) + ce.Write() + } +} + +// Warn logs at Warn log level using fields +func (l *Logger) Warn(msg string, fields ...log.Field) { + if ce := l.L.Check(zap.WarnLevel, msg); ce != nil { + ce.Write(zapifyFields(fields...)...) + } +} + +// Warnf logs at Warn log level using fmt formatter +func (l *Logger) Warnf(msg string, args ...interface{}) { + if ce := l.L.Check(zap.WarnLevel, ""); ce != nil { + ce.Message = fmt.Sprintf(msg, args...) + ce.Write() + } +} + +// Error logs at Error log level using fields +func (l *Logger) Error(msg string, fields ...log.Field) { + if ce := l.L.Check(zap.ErrorLevel, msg); ce != nil { + ce.Write(zapifyFields(fields...)...) + } +} + +// Errorf logs at Error log level using fmt formatter +func (l *Logger) Errorf(msg string, args ...interface{}) { + if ce := l.L.Check(zap.ErrorLevel, ""); ce != nil { + ce.Message = fmt.Sprintf(msg, args...) + ce.Write() + } +} + +// Fatal logs at Fatal log level using fields +func (l *Logger) Fatal(msg string, fields ...log.Field) { + if ce := l.L.Check(zap.FatalLevel, msg); ce != nil { + ce.Write(zapifyFields(fields...)...) + } +} + +// Fatalf logs at Fatal log level using fmt formatter +func (l *Logger) Fatalf(msg string, args ...interface{}) { + if ce := l.L.Check(zap.FatalLevel, ""); ce != nil { + ce.Message = fmt.Sprintf(msg, args...) + ce.Write() + } +} + +// WithName adds name to logger +func (l *Logger) WithName(name string) log.Logger { + return &Logger{ + L: l.L.Named(name), + } +} diff --git a/library/go/core/log/zap/zap_test.go b/library/go/core/log/zap/zap_test.go new file mode 100644 index 0000000000..514a1fb89f --- /dev/null +++ b/library/go/core/log/zap/zap_test.go @@ -0,0 +1,113 @@ +package zap + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/stretchr/testify/assert" + "github.com/ydb-platform/ydb/library/go/core/log" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "go.uber.org/zap/zaptest/observer" +) + +func TestNewQloudLogger(t *testing.T) { + logger, err := NewQloudLogger(log.DebugLevel) + assert.NoError(t, err) + + core, logs := observer.New(zap.DebugLevel) + + logger.L = logger.L.WithOptions(zap.WrapCore(func(c zapcore.Core) zapcore.Core { + return addQloudContext(core).(zapcore.Core) + })) + + expectedMessage := "test message" + + logger.Info(expectedMessage, log.String("package", "zap")) + assert.Equal(t, 1, logs.Len()) + + loggedEntry := logs.AllUntimed()[0] + assert.Equal(t, zap.InfoLevel, loggedEntry.Level) + assert.Equal(t, expectedMessage, loggedEntry.Message) + assert.Equal(t, + map[string]interface{}{ + "@fields": map[string]interface{}{ + "package": "zap", + }, + }, + loggedEntry.ContextMap(), + ) +} + +func TestLogger_FormattedMethods(t *testing.T) { + testCases := []struct { + lvl log.Level + expectLogged []zapcore.Entry + }{ + {log.TraceLevel, []zapcore.Entry{ + {Level: zap.DebugLevel, Message: "test at trace"}, + {Level: zap.DebugLevel, Message: "test at debug"}, + {Level: zap.InfoLevel, Message: "test at info"}, + {Level: zap.WarnLevel, Message: "test at warn"}, + {Level: zap.ErrorLevel, Message: "test at error"}, + }}, + {log.DebugLevel, []zapcore.Entry{ + {Level: zap.DebugLevel, Message: "test at trace"}, + {Level: zap.DebugLevel, Message: "test at debug"}, + {Level: zap.InfoLevel, Message: "test at info"}, + {Level: zap.WarnLevel, Message: "test at warn"}, + {Level: zap.ErrorLevel, Message: "test at error"}, + }}, + {log.InfoLevel, []zapcore.Entry{ + {Level: zap.InfoLevel, Message: "test at info"}, + {Level: zap.WarnLevel, Message: "test at warn"}, + {Level: zap.ErrorLevel, Message: "test at error"}, + }}, + {log.WarnLevel, []zapcore.Entry{ + {Level: zap.WarnLevel, Message: "test at warn"}, + {Level: zap.ErrorLevel, Message: "test at error"}, + }}, + {log.ErrorLevel, []zapcore.Entry{ + {Level: zap.ErrorLevel, Message: "test at error"}, + }}, + } + + for _, tc := range testCases { + t.Run(tc.lvl.String(), func(t *testing.T) { + logger, err := New(ConsoleConfig(tc.lvl)) + assert.NoError(t, err) + + core, logs := observer.New(ZapifyLevel(tc.lvl)) + + logger.L = logger.L.WithOptions(zap.WrapCore(func(_ zapcore.Core) zapcore.Core { + return core + })) + + for _, lvl := range log.Levels() { + switch lvl { + case log.TraceLevel: + logger.Tracef("test at %s", lvl.String()) + case log.DebugLevel: + logger.Debugf("test at %s", lvl.String()) + case log.InfoLevel: + logger.Infof("test at %s", lvl.String()) + case log.WarnLevel: + logger.Warnf("test at %s", lvl.String()) + case log.ErrorLevel: + logger.Errorf("test at %s", lvl.String()) + case log.FatalLevel: + // skipping fatal + } + } + + loggedEntries := logs.AllUntimed() + + assert.Equal(t, len(tc.expectLogged), logs.Len(), cmp.Diff(tc.expectLogged, loggedEntries)) + + for i, le := range loggedEntries { + assert.Equal(t, tc.expectLogged[i].Level, le.Level) + assert.Equal(t, tc.expectLogged[i].Message, le.Message) + } + }) + } +} diff --git a/library/go/core/log/zap/zapify.go b/library/go/core/log/zap/zapify.go new file mode 100644 index 0000000000..5fd6ffb1be --- /dev/null +++ b/library/go/core/log/zap/zapify.go @@ -0,0 +1,95 @@ +package zap + +import ( + "fmt" + + "github.com/ydb-platform/ydb/library/go/core/log" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +// ZapifyLevel turns interface log level to zap log level +func ZapifyLevel(level log.Level) zapcore.Level { + switch level { + case log.TraceLevel: + return zapcore.DebugLevel + case log.DebugLevel: + return zapcore.DebugLevel + case log.InfoLevel: + return zapcore.InfoLevel + case log.WarnLevel: + return zapcore.WarnLevel + case log.ErrorLevel: + return zapcore.ErrorLevel + case log.FatalLevel: + return zapcore.FatalLevel + default: + // For when new log level is not added to this func (most likely never). + panic(fmt.Sprintf("unknown log level: %d", level)) + } +} + +// UnzapifyLevel turns zap log level to interface log level. +func UnzapifyLevel(level zapcore.Level) log.Level { + switch level { + case zapcore.DebugLevel: + return log.DebugLevel + case zapcore.InfoLevel: + return log.InfoLevel + case zapcore.WarnLevel: + return log.WarnLevel + case zapcore.ErrorLevel: + return log.ErrorLevel + case zapcore.FatalLevel, zapcore.DPanicLevel, zapcore.PanicLevel: + return log.FatalLevel + default: + // For when new log level is not added to this func (most likely never). + panic(fmt.Sprintf("unknown log level: %d", level)) + } +} + +// nolint: gocyclo +func zapifyField(field log.Field) zap.Field { + switch field.Type() { + case log.FieldTypeNil: + return zap.Reflect(field.Key(), nil) + case log.FieldTypeString: + return zap.String(field.Key(), field.String()) + case log.FieldTypeBinary: + return zap.Binary(field.Key(), field.Binary()) + case log.FieldTypeBoolean: + return zap.Bool(field.Key(), field.Bool()) + case log.FieldTypeSigned: + return zap.Int64(field.Key(), field.Signed()) + case log.FieldTypeUnsigned: + return zap.Uint64(field.Key(), field.Unsigned()) + case log.FieldTypeFloat: + return zap.Float64(field.Key(), field.Float()) + case log.FieldTypeTime: + return zap.Time(field.Key(), field.Time()) + case log.FieldTypeDuration: + return zap.Duration(field.Key(), field.Duration()) + case log.FieldTypeError: + return zap.NamedError(field.Key(), field.Error()) + case log.FieldTypeArray: + return zap.Any(field.Key(), field.Interface()) + case log.FieldTypeAny: + return zap.Any(field.Key(), field.Interface()) + case log.FieldTypeReflect: + return zap.Reflect(field.Key(), field.Interface()) + case log.FieldTypeByteString: + return zap.ByteString(field.Key(), field.Binary()) + default: + // For when new field type is not added to this func + panic(fmt.Sprintf("unknown field type: %d", field.Type())) + } +} + +func zapifyFields(fields ...log.Field) []zapcore.Field { + zapFields := make([]zapcore.Field, 0, len(fields)) + for _, field := range fields { + zapFields = append(zapFields, zapifyField(field)) + } + + return zapFields +} diff --git a/library/go/core/log/zap/zapify_test.go b/library/go/core/log/zap/zapify_test.go new file mode 100644 index 0000000000..b11b1dc261 --- /dev/null +++ b/library/go/core/log/zap/zapify_test.go @@ -0,0 +1,60 @@ +package zap + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/ydb-platform/ydb/library/go/core/log" + "go.uber.org/zap/zapcore" +) + +// Simple test, that all type of fields are correctly zapified. +// Maybe we also need some test that checks resulting zap.Field type also. +func TestZapifyField(t *testing.T) { + fileds := []log.Field{ + log.Nil("test"), + log.String("test", "test"), + log.Binary("test", []byte("test")), + log.Bool("test", true), + log.Int("test", -42), + log.UInt("test", 42), + log.Float64("test", 0.42), + log.Time("test", time.Now()), + log.Duration("test", time.Second), + log.Error(fmt.Errorf("test")), + log.Array("test", []uint32{42}), + log.Any("test", struct{ ID uint32 }{ID: 42}), + log.Reflect("test", struct{ ID uint32 }{ID: 42}), + } + for _, field := range fileds { + assert.NotPanics(t, func() { + zapifyField(field) + }) + } +} + +func TestZapifyAny(t *testing.T) { + f := zapifyField(log.Any("test", struct{ ID uint32 }{ID: 42})) + assert.Equal(t, zapcore.ReflectType, f.Type) +} + +func TestZapifyReflect(t *testing.T) { + f := zapifyField(log.Any("test", struct{ ID uint32 }{ID: 42})) + assert.Equal(t, zapcore.ReflectType, f.Type) +} + +type stringer struct{} + +func (*stringer) String() string { + return "hello" +} + +func TestZapifyStringer(t *testing.T) { + f0 := zapifyField(log.Any("test", &stringer{})) + assert.Equal(t, zapcore.StringerType, f0.Type) + + f1 := zapifyField(log.Reflect("test", &stringer{})) + assert.Equal(t, zapcore.ReflectType, f1.Type) +} diff --git a/library/go/core/xerrors/assertxerrors/assertxerrors.go b/library/go/core/xerrors/assertxerrors/assertxerrors.go new file mode 100644 index 0000000000..593fc1595a --- /dev/null +++ b/library/go/core/xerrors/assertxerrors/assertxerrors.go @@ -0,0 +1,87 @@ +package assertxerrors + +import ( + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/ydb-platform/ydb/library/go/core/xerrors/internal/modes" + "github.com/ydb-platform/ydb/library/go/test/testhelpers" +) + +func RunTestsPerMode(t *testing.T, expected Expectations, constructor func(t *testing.T) error) { + for _, mode := range modes.KnownStackTraceModes() { + t.Run(fmt.Sprintf("Mode%s", mode), func(t *testing.T) { + modes.SetStackTraceMode(mode) + err := constructor(t) + expected.Assert(t, err) + }) + } +} + +type StackTraceModeExpectation struct { + expectedPlusV string + lines []int +} + +func NewStackTraceModeExpectation(plusv string, lines ...int) StackTraceModeExpectation { + return StackTraceModeExpectation{expectedPlusV: plusv, lines: lines} +} + +type Expectations struct { + ExpectedS string + ExpectedV string + Frames StackTraceModeExpectation + Stacks StackTraceModeExpectation + StackThenFrames StackTraceModeExpectation + StackThenNothing StackTraceModeExpectation + Nothing StackTraceModeExpectation +} + +func (e Expectations) Assert(t *testing.T, err error) { + assert.Equal(t, e.ExpectedS, fmt.Sprintf("%s", err)) + assert.Equal(t, e.ExpectedV, fmt.Sprintf("%v", err)) + + var expected StackTraceModeExpectation + switch modes.GetStackTraceMode() { + case modes.StackTraceModeFrames: + expected = e.Frames + case modes.StackTraceModeStacks: + expected = e.Stacks + case modes.StackTraceModeStackThenFrames: + expected = e.StackThenFrames + case modes.StackTraceModeStackThenNothing: + expected = e.StackThenNothing + case modes.StackTraceModeNothing: + expected = e.Nothing + } + + assertErrorOutput(t, expected, err) +} + +func assertErrorOutput(t *testing.T, expected StackTraceModeExpectation, err error) { + // Cut starting \n's if needed (we use `` notation with newlines for expected error messages) + preparedExpected := strings.TrimPrefix(expected.expectedPlusV, "\n") + actual := fmt.Sprintf("%+v", err) + + var e error + preparedExpected, e = testhelpers.RemoveLines(preparedExpected, expected.lines...) + if !assert.NoErrorf(t, e, "lines removal from expected:\n%s", preparedExpected) { + t.Logf("initial expected:\n%s", expected.expectedPlusV) + t.Logf("initial actual:\n%s", actual) + return + } + + preparedActual, e := testhelpers.RemoveLines(actual, expected.lines...) + if !assert.NoErrorf(t, e, "lines removal from actual:\n%s", actual) { + t.Logf("initial expected:\n%s", expected.expectedPlusV) + t.Logf("initial actual:\n%s", actual) + return + } + + if !assert.Equal(t, preparedExpected, preparedActual) { + t.Logf("initial expected:\n%s", expected.expectedPlusV) + t.Logf("initial actual:\n%s", actual) + } +} diff --git a/library/go/core/xerrors/assertxerrors/ya.make b/library/go/core/xerrors/assertxerrors/ya.make new file mode 100644 index 0000000000..c61fc38f34 --- /dev/null +++ b/library/go/core/xerrors/assertxerrors/ya.make @@ -0,0 +1,5 @@ +GO_LIBRARY() + +SRCS(assertxerrors.go) + +END() diff --git a/library/go/core/xerrors/benchmark_test.go b/library/go/core/xerrors/benchmark_test.go new file mode 100644 index 0000000000..c573808eef --- /dev/null +++ b/library/go/core/xerrors/benchmark_test.go @@ -0,0 +1,154 @@ +package xerrors + +import ( + "errors" + "fmt" + "testing" + + pkgerrors "github.com/pkg/errors" + "github.com/ydb-platform/ydb/library/go/core/xerrors/benchxerrors" + "github.com/ydb-platform/ydb/library/go/test/testhelpers" + "golang.org/x/xerrors" +) + +const ( + benchNewMsg = "foo" + benchErrorfMsg = "bar: %w" +) + +func BenchmarkNewStd(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = errors.New(benchNewMsg) + } +} + +func BenchmarkNewPkg(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = pkgerrors.New(benchNewMsg) + } +} + +func BenchmarkNewXerrors(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = xerrors.New(benchNewMsg) + } +} + +func BenchmarkNewV2(b *testing.B) { + benchxerrors.RunPerMode(b, func(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = New(benchNewMsg) + } + }) +} +func BenchmarkErrorfStd(b *testing.B) { + err := errors.New(benchNewMsg) + for i := 0; i < b.N; i++ { + _ = fmt.Errorf(benchErrorfMsg, err) + } +} + +func BenchmarkErrorfPkg(b *testing.B) { + err := errors.New(benchNewMsg) + for i := 0; i < b.N; i++ { + _ = pkgerrors.Wrap(err, benchErrorfMsg) + } +} + +func BenchmarkErrorfXerrors(b *testing.B) { + err := errors.New(benchNewMsg) + for i := 0; i < b.N; i++ { + _ = xerrors.Errorf(benchErrorfMsg, err) + } +} + +func BenchmarkErrorfV2(b *testing.B) { + err := errors.New(benchNewMsg) + benchxerrors.RunPerMode(b, func(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = Errorf(benchErrorfMsg, err) + } + }) +} + +func BenchmarkNewErrorfStd(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = fmt.Errorf(benchErrorfMsg, errors.New(benchNewMsg)) + } +} + +func BenchmarkNewErrorfPkg(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = pkgerrors.Wrap(pkgerrors.New(benchNewMsg), benchErrorfMsg) + } +} + +func BenchmarkNewErrorfXerrors(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = xerrors.Errorf(benchErrorfMsg, xerrors.New(benchNewMsg)) + } +} + +func BenchmarkNewErrorfV2(b *testing.B) { + benchxerrors.RunPerMode(b, func(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = Errorf(benchErrorfMsg, New(benchNewMsg)) + } + }) +} + +func BenchmarkNewErrorfErrorfStd(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = fmt.Errorf(benchErrorfMsg, fmt.Errorf(benchErrorfMsg, errors.New(benchNewMsg))) + } +} + +func BenchmarkNewErrorfErrorfPkg(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = pkgerrors.Wrap(pkgerrors.Wrap(pkgerrors.New(benchNewMsg), benchErrorfMsg), benchErrorfMsg) + } +} + +func BenchmarkNewErrorfErrorfXerrors(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = xerrors.Errorf(benchErrorfMsg, xerrors.Errorf(benchErrorfMsg, xerrors.New(benchNewMsg))) + } +} + +func BenchmarkNewErrorfErrorfV2(b *testing.B) { + benchxerrors.RunPerMode(b, func(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = Errorf(benchErrorfMsg, Errorf(benchErrorfMsg, New(benchNewMsg))) + } + }) +} + +func recurse(f func()) { + testhelpers.Recurse(256, f) +} + +func BenchmarkBigStackNewErrorfErrorfStd(b *testing.B) { + for i := 0; i < b.N; i++ { + recurse(func() { _ = fmt.Errorf(benchErrorfMsg, fmt.Errorf(benchErrorfMsg, errors.New(benchNewMsg))) }) + } +} + +func BenchmarkBigStackNewErrorfErrorfPkg(b *testing.B) { + for i := 0; i < b.N; i++ { + recurse(func() { _ = pkgerrors.Wrap(pkgerrors.Wrap(pkgerrors.New(benchNewMsg), benchErrorfMsg), benchErrorfMsg) }) + } +} + +func BenchmarkBigStackNewErrorfErrorfXerrors(b *testing.B) { + for i := 0; i < b.N; i++ { + recurse(func() { _ = xerrors.Errorf(benchErrorfMsg, xerrors.Errorf(benchErrorfMsg, xerrors.New(benchNewMsg))) }) + } +} + +func BenchmarkBigStackNewErrorfErrorfV2(b *testing.B) { + benchxerrors.RunPerMode(b, func(b *testing.B) { + for i := 0; i < b.N; i++ { + recurse(func() { _ = Errorf(benchErrorfMsg, Errorf(benchErrorfMsg, New(benchNewMsg))) }) + } + }) +} diff --git a/library/go/core/xerrors/benchxerrors/benchxerrors.go b/library/go/core/xerrors/benchxerrors/benchxerrors.go new file mode 100644 index 0000000000..d17286e9fb --- /dev/null +++ b/library/go/core/xerrors/benchxerrors/benchxerrors.go @@ -0,0 +1,17 @@ +package benchxerrors + +import ( + "fmt" + "testing" + + "github.com/ydb-platform/ydb/library/go/core/xerrors/internal/modes" +) + +func RunPerMode(b *testing.B, bench func(b *testing.B)) { + for _, mode := range modes.KnownStackTraceModes() { + b.Run(fmt.Sprintf("Mode%s", mode), func(b *testing.B) { + modes.SetStackTraceMode(mode) + bench(b) + }) + } +} diff --git a/library/go/core/xerrors/benchxerrors/ya.make b/library/go/core/xerrors/benchxerrors/ya.make new file mode 100644 index 0000000000..07de188bb3 --- /dev/null +++ b/library/go/core/xerrors/benchxerrors/ya.make @@ -0,0 +1,5 @@ +GO_LIBRARY() + +SRCS(benchxerrors.go) + +END() diff --git a/library/go/core/xerrors/doc.go b/library/go/core/xerrors/doc.go new file mode 100644 index 0000000000..de06dd15d2 --- /dev/null +++ b/library/go/core/xerrors/doc.go @@ -0,0 +1,2 @@ +// package xerrors is a drop in replacement for errors and golang.org/x/xerrors packages and functionally for github.com/pkg/errors. +package xerrors diff --git a/library/go/core/xerrors/errorf.go b/library/go/core/xerrors/errorf.go new file mode 100644 index 0000000000..0ed8541f28 --- /dev/null +++ b/library/go/core/xerrors/errorf.go @@ -0,0 +1,92 @@ +package xerrors + +import ( + "fmt" + "io" + "strings" + + "github.com/ydb-platform/ydb/library/go/x/xruntime" +) + +type wrappedErrorf struct { + err error + stacktrace *xruntime.StackTrace +} + +var _ ErrorStackTrace = &wrappedErrorf{} + +func Errorf(format string, a ...interface{}) error { + err := fmt.Errorf(format, a...) + return &wrappedErrorf{ + err: err, + stacktrace: newStackTrace(1, err), + } +} + +func SkipErrorf(skip int, format string, a ...interface{}) error { + err := fmt.Errorf(format, a...) + return &wrappedErrorf{ + err: err, + stacktrace: newStackTrace(skip+1, err), + } +} + +func (e *wrappedErrorf) Format(s fmt.State, v rune) { + switch v { + case 'v': + if s.Flag('+') { + msg := e.err.Error() + inner := Unwrap(e.err) + // If Errorf wrapped another error then it will be our message' suffix. If so, cut it since otherwise we will + // print it again as part of formatting that error. + if inner != nil { + if strings.HasSuffix(msg, inner.Error()) { + msg = msg[:len(msg)-len(inner.Error())] + // Cut last space if needed but only if there is stacktrace present (very likely) + if e.stacktrace != nil && strings.HasSuffix(msg, ": ") { + msg = msg[:len(msg)-1] + } + } + } + + _, _ = io.WriteString(s, msg) + if e.stacktrace != nil { + // New line is useful only when printing frames, otherwise it is better to print next error in the chain + // right after we print this one + _, _ = io.WriteString(s, "\n") + writeStackTrace(s, e.stacktrace) + } + + // Print next error down the chain if there is one + if inner != nil { + _, _ = fmt.Fprintf(s, "%+v", inner) + } + + return + } + fallthrough + case 's': + _, _ = io.WriteString(s, e.err.Error()) + case 'q': + _, _ = fmt.Fprintf(s, "%q", e.err.Error()) + } +} + +func (e *wrappedErrorf) Error() string { + // Wrapped error has correct formatting + return e.err.Error() +} + +func (e *wrappedErrorf) Unwrap() error { + // Skip wrapped error and return whatever it is wrapping if inner error contains single error + // TODO: test for correct unwrap + if _, ok := e.err.(interface{ Unwrap() []error }); ok { + return e.err + } + + return Unwrap(e.err) +} + +func (e *wrappedErrorf) StackTrace() *xruntime.StackTrace { + return e.stacktrace +} diff --git a/library/go/core/xerrors/errorf_formatting_with_error_test.go b/library/go/core/xerrors/errorf_formatting_with_error_test.go new file mode 100644 index 0000000000..c6bf47aac2 --- /dev/null +++ b/library/go/core/xerrors/errorf_formatting_with_error_test.go @@ -0,0 +1,72 @@ +package xerrors + +import ( + "testing" + + "github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors" +) + +func TestErrorfFormattingWithError(t *testing.T) { + constructor := func(t *testing.T) error { + err := New("new") + return Errorf("errorf: %w", err) + } + expected := assertxerrors.Expectations{ + ExpectedS: "errorf: new", + ExpectedV: "errorf: new", + Frames: assertxerrors.NewStackTraceModeExpectation(` +errorf: + github.com/ydb-platform/ydb/library/go/core/xerrors.TestErrorfFormattingWithError.func1 + library/go/core/xerrors/errorf_formatting_with_error_test.go:12 +new + github.com/ydb-platform/ydb/library/go/core/xerrors.TestErrorfFormattingWithError.func1 + library/go/core/xerrors/errorf_formatting_with_error_test.go:11 +`, + ), + Stacks: assertxerrors.NewStackTraceModeExpectation(` +errorf: + github.com/ydb-platform/ydb/library/go/core/xerrors.TestErrorfFormattingWithError.func1 + library/go/core/xerrors/errorf_formatting_with_error_test.go:12 + github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors.RunTestsPerMode.func1 + /home/sidh/devel/go/src/github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors/assertxerrors.go:18 + testing.tRunner + /home/sidh/.ya/tools/v4/774223543/src/testing/testing.go:1127 +new + github.com/ydb-platform/ydb/library/go/core/xerrors.TestErrorfFormattingWithError.func1 + library/go/core/xerrors/errorf_formatting_with_error_test.go:11 + github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors.RunTestsPerMode.func1 + /home/sidh/devel/go/src/github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors/assertxerrors.go:18 + testing.tRunner + /home/sidh/.ya/tools/v4/774223543/src/testing/testing.go:1127 +`, + 3, 4, 5, 6, 10, 11, 12, 13, + ), + StackThenFrames: assertxerrors.NewStackTraceModeExpectation(` +errorf: + github.com/ydb-platform/ydb/library/go/core/xerrors.TestErrorfFormattingWithError.func1 + library/go/core/xerrors/errorf_formatting_with_error_test.go:12 +new + github.com/ydb-platform/ydb/library/go/core/xerrors.TestErrorfFormattingWithError.func1 + library/go/core/xerrors/errorf_formatting_with_error_test.go:11 + github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors.RunTestsPerMode.func1 + /home/sidh/devel/go/src/github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors/assertxerrors.go:18 + testing.tRunner + /home/sidh/.ya/tools/v4/774223543/src/testing/testing.go:1127 +`, + 6, 7, 8, 9, + ), + StackThenNothing: assertxerrors.NewStackTraceModeExpectation(` +errorf: new + github.com/ydb-platform/ydb/library/go/core/xerrors.TestErrorfFormattingWithError.func1 + library/go/core/xerrors/errorf_formatting_with_error_test.go:11 + github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors.RunTestsPerMode.func1 + /home/sidh/devel/go/src/github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors/assertxerrors.go:18 + testing.tRunner + /home/sidh/.ya/tools/v4/774223543/src/testing/testing.go:1127 +`, + 3, 4, 5, 6, + ), + Nothing: assertxerrors.NewStackTraceModeExpectation("errorf: new"), + } + assertxerrors.RunTestsPerMode(t, expected, constructor) +} diff --git a/library/go/core/xerrors/errorf_formatting_with_std_error_test.go b/library/go/core/xerrors/errorf_formatting_with_std_error_test.go new file mode 100644 index 0000000000..c8d3153890 --- /dev/null +++ b/library/go/core/xerrors/errorf_formatting_with_std_error_test.go @@ -0,0 +1,60 @@ +package xerrors + +import ( + "errors" + "testing" + + "github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors" +) + +func TestErrorfFormattingWithStdError(t *testing.T) { + constructor := func(t *testing.T) error { + err := errors.New("new") + return Errorf("errorf: %w", err) + } + expected := assertxerrors.Expectations{ + ExpectedS: "errorf: new", + ExpectedV: "errorf: new", + Frames: assertxerrors.NewStackTraceModeExpectation(` +errorf: + github.com/ydb-platform/ydb/library/go/core/xerrors.TestErrorfFormattingWithStdError.func1 + library/go/core/xerrors/errorf_formatting_with_std_error_test.go:13 +new`, + ), + Stacks: assertxerrors.NewStackTraceModeExpectation(` +errorf: + github.com/ydb-platform/ydb/library/go/core/xerrors.TestErrorfFormattingWithStdError.func1 + library/go/core/xerrors/errorf_formatting_with_std_error_test.go:13 + github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors.RunTestsPerMode.func1 + /home/sidh/devel/go/src/github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors/assertxerrors.go:18 + testing.tRunner + /home/sidh/.ya/tools/v4/774223543/src/testing/testing.go:1127 +new`, + 3, 4, 5, 6, + ), + StackThenFrames: assertxerrors.NewStackTraceModeExpectation(` +errorf: + github.com/ydb-platform/ydb/library/go/core/xerrors.TestErrorfFormattingWithStdError.func1 + library/go/core/xerrors/errorf_formatting_with_std_error_test.go:13 + github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors.RunTestsPerMode.func1 + /home/sidh/devel/go/src/github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors/assertxerrors.go:18 + testing.tRunner + /home/sidh/.ya/tools/v4/774223543/src/testing/testing.go:1127 +new`, + 3, 4, 5, 6, + ), + StackThenNothing: assertxerrors.NewStackTraceModeExpectation(` +errorf: + github.com/ydb-platform/ydb/library/go/core/xerrors.TestErrorfFormattingWithStdError.func1 + library/go/core/xerrors/errorf_formatting_with_std_error_test.go:13 + github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors.RunTestsPerMode.func1 + /home/sidh/devel/go/src/github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors/assertxerrors.go:18 + testing.tRunner + /home/sidh/.ya/tools/v4/774223543/src/testing/testing.go:1127 +new`, + 3, 4, 5, 6, + ), + Nothing: assertxerrors.NewStackTraceModeExpectation("errorf: new"), + } + assertxerrors.RunTestsPerMode(t, expected, constructor) +} diff --git a/library/go/core/xerrors/errorf_formatting_without_error_test.go b/library/go/core/xerrors/errorf_formatting_without_error_test.go new file mode 100644 index 0000000000..602804f97f --- /dev/null +++ b/library/go/core/xerrors/errorf_formatting_without_error_test.go @@ -0,0 +1,58 @@ +package xerrors + +import ( + "testing" + + "github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors" +) + +func TestErrorfFormattingWithoutError(t *testing.T) { + constructor := func(t *testing.T) error { + return Errorf("errorf: %s", "not an error") + } + expected := assertxerrors.Expectations{ + ExpectedS: "errorf: not an error", + ExpectedV: "errorf: not an error", + Frames: assertxerrors.NewStackTraceModeExpectation(` +errorf: not an error + github.com/ydb-platform/ydb/library/go/core/xerrors.TestErrorfFormattingWithoutError.func1 + library/go/core/xerrors/errorf_formatting_without_error_test.go:11 +`, + ), + Stacks: assertxerrors.NewStackTraceModeExpectation(` +errorf: not an error + github.com/ydb-platform/ydb/library/go/core/xerrors.TestErrorfFormattingWithoutError.func1 + library/go/core/xerrors/errorf_formatting_without_error_test.go:11 +github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors.RunTestsPerMode.func1 + /home/sidh/devel/go/src/github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors/assertxerrors.go:18 + testing.tRunner + /home/sidh/.ya/tools/v4/774223543/src/testing/testing.go:1127 +`, + 3, 4, 5, 6, + ), + StackThenFrames: assertxerrors.NewStackTraceModeExpectation(` +errorf: not an error + github.com/ydb-platform/ydb/library/go/core/xerrors.TestErrorfFormattingWithoutError.func1 + library/go/core/xerrors/errorf_formatting_without_error_test.go:11 +github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors.RunTestsPerMode.func1 + /home/sidh/devel/go/src/github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors/assertxerrors.go:18 + testing.tRunner + /home/sidh/.ya/tools/v4/774223543/src/testing/testing.go:1127 +`, + 3, 4, 5, 6, + ), + StackThenNothing: assertxerrors.NewStackTraceModeExpectation(` +errorf: not an error + github.com/ydb-platform/ydb/library/go/core/xerrors.TestErrorfFormattingWithoutError.func1 + library/go/core/xerrors/errorf_formatting_without_error_test.go:11 +github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors.RunTestsPerMode.func1 + /home/sidh/devel/go/src/github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors/assertxerrors.go:18 + testing.tRunner + /home/sidh/.ya/tools/v4/774223543/src/testing/testing.go:1127 +`, + 3, 4, 5, 6, + ), + Nothing: assertxerrors.NewStackTraceModeExpectation("errorf: not an error"), + } + assertxerrors.RunTestsPerMode(t, expected, constructor) +} diff --git a/library/go/core/xerrors/errorf_multiple_errors_test.go b/library/go/core/xerrors/errorf_multiple_errors_test.go new file mode 100644 index 0000000000..844b71355e --- /dev/null +++ b/library/go/core/xerrors/errorf_multiple_errors_test.go @@ -0,0 +1,19 @@ +package xerrors + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestErrorfMultipleErrors(t *testing.T) { + err1 := New("error1") + err2 := New("error2") + err3 := New("error3") + + compositeErr := Errorf("errorf: %w, %w", err1, err2) + + require.True(t, Is(compositeErr, err1)) + require.True(t, Is(compositeErr, err2)) + require.False(t, Is(compositeErr, err3)) +} diff --git a/library/go/core/xerrors/forward.go b/library/go/core/xerrors/forward.go new file mode 100644 index 0000000000..aaa900133c --- /dev/null +++ b/library/go/core/xerrors/forward.go @@ -0,0 +1,56 @@ +package xerrors + +import "errors" + +// Unwrap returns the result of calling the Unwrap method on err, if err's +// type contains an Unwrap method returning error. +// Otherwise, Unwrap returns nil. +func Unwrap(err error) error { + return errors.Unwrap(err) +} + +// Is reports whether any error in err's chain matches target. +// +// The chain consists of err itself followed by the sequence of errors obtained by +// repeatedly calling Unwrap. +// +// An error is considered to match a target if it is equal to that target or if +// it implements a method Is(error) bool such that Is(target) returns true. +// +// An error type might provide an Is method so it can be treated as equivalent +// to an existing error. For example, if MyError defines +// +// func (m MyError) Is(target error) bool { return target == os.ErrExist } +// +// then Is(MyError{}, os.ErrExist) returns true. See syscall.Errno.Is for +// an example in the standard library. +func Is(err, target error) bool { + return errors.Is(err, target) +} + +// As finds the first error in err's chain that matches target, and if so, sets +// target to that error value and returns true. Otherwise, it returns false. +// +// The chain consists of err itself followed by the sequence of errors obtained by +// repeatedly calling Unwrap. +// +// An error matches target if the error's concrete value is assignable to the value +// pointed to by target, or if the error has a method As(interface{}) bool such that +// As(target) returns true. In the latter case, the As method is responsible for +// setting target. +// +// An error type might provide an As method so it can be treated as if it were a +// different error type. +// +// As panics if target is not a non-nil pointer to either a type that implements +// error, or to any interface type. +func As(err error, target interface{}) bool { + return errors.As(err, target) +} + +// Wrapper provides context around another error. +type Wrapper interface { + // Unwrap returns the next error in the error chain. + // If there is no next error, Unwrap returns nil. + Unwrap() error +} diff --git a/library/go/core/xerrors/gotest/ya.make b/library/go/core/xerrors/gotest/ya.make new file mode 100644 index 0000000000..1525c5623a --- /dev/null +++ b/library/go/core/xerrors/gotest/ya.make @@ -0,0 +1,5 @@ +GO_TEST_FOR(library/go/core/xerrors) + +ENABLE(GO_DEBUG_PATH_RELATIVE) + +END() diff --git a/library/go/core/xerrors/internal/modes/stack_frames_count.go b/library/go/core/xerrors/internal/modes/stack_frames_count.go new file mode 100644 index 0000000000..c117becf6a --- /dev/null +++ b/library/go/core/xerrors/internal/modes/stack_frames_count.go @@ -0,0 +1,22 @@ +package modes + +import "sync/atomic" + +type StackFramesCount = int32 + +const ( + StackFramesCount16 StackFramesCount = 16 + StackFramesCount32 StackFramesCount = 32 + StackFramesCount64 StackFramesCount = 64 + StackFramesCount128 StackFramesCount = 128 +) + +var StackFramesCountMax = StackFramesCount32 + +func SetStackFramesCountMax(count StackFramesCount) { + atomic.StoreInt32(&StackFramesCountMax, count) +} + +func GetStackFramesCountMax() StackFramesCount { + return atomic.LoadInt32(&StackFramesCountMax) +} diff --git a/library/go/core/xerrors/internal/modes/stack_trace_mode.go b/library/go/core/xerrors/internal/modes/stack_trace_mode.go new file mode 100644 index 0000000000..04f78ffd3d --- /dev/null +++ b/library/go/core/xerrors/internal/modes/stack_trace_mode.go @@ -0,0 +1,48 @@ +package modes + +import "sync/atomic" + +type StackTraceMode int32 + +const ( + StackTraceModeFrames StackTraceMode = iota + StackTraceModeStacks + StackTraceModeStackThenFrames + StackTraceModeStackThenNothing + StackTraceModeNothing +) + +func (m StackTraceMode) String() string { + return []string{"Frames", "Stacks", "StackThenFrames", "StackThenNothing", "Nothing"}[m] +} + +const defaultStackTraceMode = StackTraceModeFrames + +var ( + // Default mode + stackTraceMode = defaultStackTraceMode + // Known modes (used in tests) + knownStackTraceModes = []StackTraceMode{ + StackTraceModeFrames, + StackTraceModeStacks, + StackTraceModeStackThenFrames, + StackTraceModeStackThenNothing, + StackTraceModeNothing, + } +) + +func SetStackTraceMode(v StackTraceMode) { + atomic.StoreInt32((*int32)(&stackTraceMode), int32(v)) +} + +func GetStackTraceMode() StackTraceMode { + return StackTraceMode(atomic.LoadInt32((*int32)(&stackTraceMode))) +} + +func DefaultStackTraceMode() { + SetStackTraceMode(defaultStackTraceMode) +} + +func KnownStackTraceModes() []StackTraceMode { + return knownStackTraceModes +} diff --git a/library/go/core/xerrors/internal/modes/ya.make b/library/go/core/xerrors/internal/modes/ya.make new file mode 100644 index 0000000000..51342e3b12 --- /dev/null +++ b/library/go/core/xerrors/internal/modes/ya.make @@ -0,0 +1,8 @@ +GO_LIBRARY() + +SRCS( + stack_frames_count.go + stack_trace_mode.go +) + +END() diff --git a/library/go/core/xerrors/internal/ya.make b/library/go/core/xerrors/internal/ya.make new file mode 100644 index 0000000000..49349fb87b --- /dev/null +++ b/library/go/core/xerrors/internal/ya.make @@ -0,0 +1 @@ +RECURSE(modes) diff --git a/library/go/core/xerrors/mode.go b/library/go/core/xerrors/mode.go new file mode 100644 index 0000000000..c6b16bed76 --- /dev/null +++ b/library/go/core/xerrors/mode.go @@ -0,0 +1,93 @@ +package xerrors + +import ( + "fmt" + + "github.com/ydb-platform/ydb/library/go/core/xerrors/internal/modes" + "github.com/ydb-platform/ydb/library/go/x/xruntime" +) + +func DefaultStackTraceMode() { + modes.DefaultStackTraceMode() +} + +func EnableFrames() { + modes.SetStackTraceMode(modes.StackTraceModeFrames) +} + +func EnableStacks() { + modes.SetStackTraceMode(modes.StackTraceModeStacks) +} + +func EnableStackThenFrames() { + modes.SetStackTraceMode(modes.StackTraceModeStackThenFrames) +} + +func EnableStackThenNothing() { + modes.SetStackTraceMode(modes.StackTraceModeStackThenNothing) +} + +func DisableStackTraces() { + modes.SetStackTraceMode(modes.StackTraceModeNothing) +} + +// newStackTrace returns stacktrace based on current mode and frames count +func newStackTrace(skip int, err error) *xruntime.StackTrace { + skip++ + m := modes.GetStackTraceMode() + switch m { + case modes.StackTraceModeFrames: + return xruntime.NewFrame(skip) + case modes.StackTraceModeStackThenFrames: + if err != nil && StackTraceOfEffect(err) != nil { + return xruntime.NewFrame(skip) + } + + return _newStackTrace(skip) + case modes.StackTraceModeStackThenNothing: + if err != nil && StackTraceOfEffect(err) != nil { + return nil + } + + return _newStackTrace(skip) + case modes.StackTraceModeStacks: + return _newStackTrace(skip) + case modes.StackTraceModeNothing: + return nil + } + + panic(fmt.Sprintf("unknown stack trace mode %d", m)) +} + +func MaxStackFrames16() { + modes.SetStackFramesCountMax(modes.StackFramesCount16) +} + +func MaxStackFrames32() { + modes.SetStackFramesCountMax(modes.StackFramesCount32) +} + +func MaxStackFrames64() { + modes.SetStackFramesCountMax(modes.StackFramesCount64) +} + +func MaxStackFrames128() { + modes.SetStackFramesCountMax(modes.StackFramesCount128) +} + +func _newStackTrace(skip int) *xruntime.StackTrace { + skip++ + count := modes.GetStackFramesCountMax() + switch count { + case 16: + return xruntime.NewStackTrace16(skip) + case 32: + return xruntime.NewStackTrace32(skip) + case 64: + return xruntime.NewStackTrace64(skip) + case 128: + return xruntime.NewStackTrace128(skip) + } + + panic(fmt.Sprintf("unknown stack frames count %d", count)) +} diff --git a/library/go/core/xerrors/multierr/error.go b/library/go/core/xerrors/multierr/error.go new file mode 100644 index 0000000000..443fa177bc --- /dev/null +++ b/library/go/core/xerrors/multierr/error.go @@ -0,0 +1,277 @@ +package multierr + +import ( + "bytes" + "fmt" + "io" + "strings" + "sync/atomic" + + "github.com/ydb-platform/ydb/library/go/core/xerrors" +) + +type errorGroup interface { + Errors() []error +} + +// Errors returns a slice containing zero or more errors the supplied +// error is composed of. If the error is nil, a nil slice is returned. +// +// If the error is not composed of the errors (do not implement errorGroup +// interface with Errors method), the returned slice contains just the error +// that was padded in. +// +// Callers of this function are free to modify the returned slice. +func Errors(err error) []error { + if err == nil { + return nil + } + + eg, ok := err.(errorGroup) + if !ok { + return []error{err} + } + + errs := eg.Errors() + + result := make([]error, len(errs)) + copy(result, errs) + + return result +} + +// Append appends the given errors together. Either value may be nil. +// +// This function is a specialization of Combine for the common case where +// there are only two errors. +func Append(left, right error) error { + switch { + case left == nil: + return right + case right == nil: + return left + } + + if _, ok := right.(*multiError); !ok { + if l, ok := left.(*multiError); ok && atomic.SwapUint32(&l.copyNeeded, 1) == 0 { + errors := append(l.Errors(), right) + + return &multiError{errors: errors} + } else if !ok { + return &multiError{errors: []error{left, right}} + } + } + + return fromSlice([]error{left, right}) +} + +// Combine combines the passed errors into a single error. +// +// If zero arguments were passed or if all items are nil, +// a nil error is returned. +// +// If only a single error was passed, it is returned as-is. +// +// Combine skips over nil arguments so this function may be +// used to combine errors from operations that fail independently +// of each other. +// +// If any of the passed errors is an errorGroup error, it will be +// flattened along with the other errors. +func Combine(errors ...error) error { + return fromSlice(errors) +} + +func fromSlice(errors []error) error { + inspection := inspect(errors) + + switch inspection.topLevelErrorsCount { + case 0: + return nil + case 1: + return errors[inspection.firstErrorIdx] + case len(errors): + if !inspection.containsErrorGroup { + return &multiError{errors: errors} + } + } + + nonNilErrs := make([]error, 0, inspection.errorsCapacity) + + for _, err := range errors[inspection.firstErrorIdx:] { + if err == nil { + continue + } + + if eg, ok := err.(errorGroup); ok { + nonNilErrs = append(nonNilErrs, eg.Errors()...) + + continue + } + + nonNilErrs = append(nonNilErrs, err) + } + + return &multiError{errors: nonNilErrs} +} + +type errorsInspection struct { + topLevelErrorsCount int + errorsCapacity int + firstErrorIdx int + containsErrorGroup bool +} + +func inspect(errors []error) errorsInspection { + var inspection errorsInspection + + first := true + + for i, err := range errors { + if err == nil { + continue + } + + inspection.topLevelErrorsCount++ + if first { + first = false + inspection.firstErrorIdx = i + } + + if eg, ok := err.(errorGroup); ok { + inspection.containsErrorGroup = true + inspection.errorsCapacity += len(eg.Errors()) + + continue + } + + inspection.errorsCapacity++ + } + + return inspection +} + +type multiError struct { + copyNeeded uint32 + errors []error +} + +// As attempts to find the first error in the error list +// that matched the type of the value that target points to. +// +// This function allows errors.As to traverse the values stored on the +// multiError error. +func (e *multiError) As(target interface{}) bool { + for _, err := range e.Errors() { + if xerrors.As(err, target) { + return true + } + } + + return false +} + +// Is attempts to match the provided error against +// errors in the error list. +// +// This function allows errors.Is to traverse the values stored on the +// multiError error. +func (e *multiError) Is(target error) bool { + for _, err := range e.Errors() { + if xerrors.Is(err, target) { + return true + } + } + + return false +} + +func (e *multiError) Error() string { + if e == nil { + return "" + } + + var buff bytes.Buffer + + e.writeSingleLine(&buff) + + return buff.String() +} + +// Errors returns the list of underlying errors. +// +// This slice MUST NOT be modified. +func (e *multiError) Errors() []error { + if e == nil { + return nil + } + + return e.errors +} + +var ( + singleLineSeparator = []byte("; ") + + multiLineSeparator = []byte("\n") + multiLineIndent = []byte(" ") +) + +func (e *multiError) writeSingleLine(w io.Writer) { + first := true + + for _, err := range e.Errors() { + if first { + first = false + } else { + _, _ = w.Write(singleLineSeparator) + } + + _, _ = io.WriteString(w, err.Error()) + } +} + +func (e *multiError) Format(f fmt.State, c rune) { + if c == 'v' && f.Flag('+') { + e.writeMultiLine(f) + + return + } + + e.writeSingleLine(f) +} + +func (e *multiError) writeMultiLine(w io.Writer) { + var ( + errors = e.Errors() + lastIdx = len(errors) - 1 + ) + + for _, err := range errors[:lastIdx] { + writePrefixLine(w, multiLineIndent, fmt.Sprintf("%+v", err)) + + _, _ = w.Write(multiLineSeparator) + } + + writePrefixLine(w, multiLineIndent, fmt.Sprintf("%+v", errors[lastIdx])) +} + +func writePrefixLine(w io.Writer, prefix []byte, s string) { + first := true + + for len(s) > 0 { + if first { + first = false + } else { + _, _ = w.Write(prefix) + } + + idx := strings.IndexByte(s, '\n') + if idx < 0 { + idx = len(s) - 1 + } + + _, _ = io.WriteString(w, s[:idx+1]) + + s = s[idx+1:] + } +} diff --git a/library/go/core/xerrors/multierr/error_test.go b/library/go/core/xerrors/multierr/error_test.go new file mode 100644 index 0000000000..ac58434948 --- /dev/null +++ b/library/go/core/xerrors/multierr/error_test.go @@ -0,0 +1,647 @@ +package multierr + +import ( + "errors" + "fmt" + "sync" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/ydb-platform/ydb/library/go/core/xerrors" +) + +func TestCombine(t *testing.T) { + t.Parallel() + + testCases := []struct { + GivenErrors []error + ExpectedError error + }{ + { + GivenErrors: nil, + ExpectedError: nil, + }, + { + GivenErrors: []error{}, + ExpectedError: nil, + }, + { + GivenErrors: []error{ + errors.New("foo"), + nil, + errors.New("bar"), + nil, + }, + ExpectedError: newMultiError( + errors.New("foo"), + errors.New("bar"), + ), + }, + { + GivenErrors: []error{ + errors.New("foo"), + newMultiError(errors.New("bar")), + }, + ExpectedError: newMultiError( + errors.New("foo"), + errors.New("bar"), + ), + }, + { + GivenErrors: []error{errors.New("something wrong")}, + ExpectedError: errors.New("something wrong"), + }, + { + GivenErrors: []error{ + errors.New("foo"), + errors.New("bar"), + }, + ExpectedError: newMultiError( + errors.New("foo"), + errors.New("bar"), + ), + }, + { + GivenErrors: []error{ + errors.New("something"), + errors.New("O\n O\n P\n S\n"), + errors.New("wrong"), + }, + ExpectedError: newMultiError( + errors.New("something"), + errors.New("O\n O\n P\n S\n"), + errors.New("wrong"), + ), + }, + { + GivenErrors: []error{ + errors.New("foo"), + newMultiError( + errors.New("bar"), + errors.New("baz"), + ), + errors.New("qyz"), + }, + ExpectedError: newMultiError( + errors.New("foo"), + errors.New("bar"), + errors.New("baz"), + errors.New("qyz"), + ), + }, + { + GivenErrors: []error{ + errors.New("foo"), + nil, + newMultiError( + errors.New("bar"), + ), + nil, + }, + ExpectedError: newMultiError( + errors.New("foo"), + errors.New("bar"), + ), + }, + { + GivenErrors: []error{ + errors.New("foo"), + newMultiError( + errors.New("bar"), + ), + }, + ExpectedError: newMultiError( + errors.New("foo"), + errors.New("bar"), + ), + }, + } + + for i, c := range testCases { + c := c + + t.Run(fmt.Sprint(i), func(t *testing.T) { + t.Parallel() + + require.Equal(t, c.ExpectedError, Combine(c.GivenErrors...)) + }) + } +} + +func TestFormatWithoutTraces(t *testing.T) { + t.Parallel() + + testCases := []struct { + GivenError error + ExpectedSingleLineError string + ExpectedMultiLineError string + }{ + { + GivenError: Combine(errors.New("foo")), + ExpectedSingleLineError: "foo", + ExpectedMultiLineError: "foo", + }, + { + GivenError: Combine( + errors.New("foo"), + errors.New("bar"), + ), + ExpectedSingleLineError: "foo; bar", + ExpectedMultiLineError: "" + + "foo\n" + + "bar", + }, + { + GivenError: Combine( + errors.New("foo"), + errors.New("bar"), + errors.New("baz"), + errors.New("qyz"), + ), + ExpectedSingleLineError: "foo; bar; baz; qyz", + ExpectedMultiLineError: "" + + "foo\n" + + "bar\n" + + "baz\n" + + "qyz", + }, + { + GivenError: Combine( + errors.New("something"), + errors.New("O\n O\n P\n S\n"), + errors.New("wrong"), + ), + ExpectedSingleLineError: "something; O\n O\n P\n S\n; wrong", + ExpectedMultiLineError: "" + + "something\n" + + "O\n" + + " O\n" + + " P\n" + + " S\n\n" + + "wrong", + }, + } + + for i, c := range testCases { + c := c + + t.Run(fmt.Sprint(i), func(t *testing.T) { + t.Parallel() + + t.Run("sprint/single_line", func(t *testing.T) { + assert.Equal(t, c.ExpectedSingleLineError, fmt.Sprintf("%v", c.GivenError)) + }) + + t.Run("error", func(t *testing.T) { + assert.Equal(t, c.ExpectedSingleLineError, c.GivenError.Error()) + }) + + t.Run("sprintf/multi_line", func(t *testing.T) { + assert.Equal(t, c.ExpectedMultiLineError, fmt.Sprintf("%+v", c.GivenError)) + }) + }) + } +} + +func TestCombineDoesNotModifySlice(t *testing.T) { + t.Parallel() + + errs := []error{ + errors.New("foo"), + nil, + errors.New("bar"), + } + + assert.Error(t, Combine(errs...)) + assert.Len(t, errs, 3) + assert.NoError(t, errs[1]) +} + +func TestAppend(t *testing.T) { + t.Parallel() + + testCases := []struct { + GivenLeftError error + GivenRightError error + ExpectedError error + }{ + { + GivenLeftError: nil, + GivenRightError: nil, + ExpectedError: nil, + }, + { + GivenLeftError: nil, + GivenRightError: errors.New("something wrong"), + ExpectedError: errors.New("something wrong"), + }, + { + GivenLeftError: errors.New("something wrong"), + GivenRightError: nil, + ExpectedError: errors.New("something wrong"), + }, + { + GivenLeftError: errors.New("foo"), + GivenRightError: errors.New("bar"), + ExpectedError: newMultiError( + errors.New("foo"), + errors.New("bar"), + ), + }, + { + GivenLeftError: newMultiError( + errors.New("foo"), + errors.New("bar"), + ), + GivenRightError: errors.New("baz"), + ExpectedError: newMultiError( + errors.New("foo"), + errors.New("bar"), + errors.New("baz"), + ), + }, + { + GivenLeftError: errors.New("baz"), + GivenRightError: newMultiError( + errors.New("foo"), + errors.New("bar"), + ), + ExpectedError: newMultiError( + errors.New("baz"), + errors.New("foo"), + errors.New("bar"), + ), + }, + { + GivenLeftError: newMultiError( + errors.New("foo"), + ), + GivenRightError: newMultiError( + errors.New("bar"), + ), + ExpectedError: newMultiError( + errors.New("foo"), + errors.New("bar"), + ), + }, + } + + for i, c := range testCases { + c := c + t.Run(fmt.Sprint(i), func(t *testing.T) { + t.Parallel() + + err := Append(c.GivenLeftError, c.GivenRightError) + + require.Equal(t, c.ExpectedError, err) + }) + } +} + +func TestAppendDoesNotModify(t *testing.T) { + t.Parallel() + + var ( + initial = newMultiErrorWithCapacity() + foo = Append(initial, errors.New("foo")) + bar = Append(initial, errors.New("bar")) + ) + + t.Run("initial_not_modified", func(t *testing.T) { + t.Parallel() + + assert.EqualError(t, initial, newMultiErrorWithCapacity().Error()) + }) + + t.Run("errors_appended", func(t *testing.T) { + t.Parallel() + + assert.EqualError(t, bar, Append(newMultiErrorWithCapacity(), errors.New("bar")).Error()) + assert.EqualError(t, foo, Append(newMultiErrorWithCapacity(), errors.New("foo")).Error()) + }) + + t.Run("errors_len_equal", func(t *testing.T) { + t.Parallel() + + assert.Len(t, Errors(foo), len(Errors(bar))) + assert.Len(t, Errors(foo), len(Errors(initial))+1) + }) +} + +func TestErrors(t *testing.T) { + t.Parallel() + + testCases := []struct { + Given error + Expected []error + Cast bool + }{ + { + Given: nil, + Expected: nil, + }, + { + Given: errors.New("go"), + Expected: []error{errors.New("go")}, + }, + { + Given: groupNotMultiError{}, + Expected: groupNotMultiError{}.Errors(), + }, + { + Given: Combine( + errors.New("foo"), + errors.New("bar"), + ), + Expected: []error{ + errors.New("foo"), + errors.New("bar"), + }, + Cast: true, + }, + { + Given: Append( + errors.New("foo"), + errors.New("bar"), + ), + Expected: []error{ + errors.New("foo"), + errors.New("bar"), + }, + Cast: true, + }, + { + Given: Append( + errors.New("foo"), + Combine( + errors.New("bar"), + ), + ), + Expected: []error{ + errors.New("foo"), + errors.New("bar"), + }, + }, + { + Given: Combine( + errors.New("foo"), + Append( + errors.New("bar"), + errors.New("baz"), + ), + errors.New("qux"), + ), + Expected: []error{ + errors.New("foo"), + errors.New("bar"), + errors.New("baz"), + errors.New("qux"), + }, + }, + } + + for i, c := range testCases { + c := c + + t.Run(fmt.Sprint(i), func(t *testing.T) { + t.Parallel() + + t.Run("errors", func(t *testing.T) { + require.Equal(t, c.Expected, Errors(c.Given)) + }) + + if !c.Cast { + return + } + + t.Run("multiError/errors", func(t *testing.T) { + require.Equal(t, c.Expected, c.Given.(*multiError).Errors()) + }) + + t.Run("errorGroup/errors", func(t *testing.T) { + require.Equal(t, c.Expected, c.Given.(errorGroup).Errors()) + }) + }) + } +} + +func TestAppendRace(t *testing.T) { + t.Parallel() + + initial := newMultiErrorWithCapacity() + + require.NotPanics(t, func() { + var wg sync.WaitGroup + + for i := 0; i < 10; i++ { + wg.Add(1) + + go func() { + defer wg.Done() + + err := initial + for j := 0; j < 10; j++ { + err = Append(err, errors.New("foo")) + } + }() + } + + wg.Wait() + }) +} + +func TestErrorsSliceIsImmutable(t *testing.T) { + t.Parallel() + + var ( + foo = errors.New("foo") + bar = errors.New("bar") + ) + + err := Append(foo, bar) + actualErrors := Errors(err) + require.Equal(t, []error{foo, bar}, actualErrors) + + actualErrors[0] = nil + actualErrors[1] = errors.New("bax") + + require.Equal(t, []error{foo, bar}, Errors(err)) +} + +func TestNilMultiError(t *testing.T) { + t.Parallel() + + var err *multiError + + require.Empty(t, err.Error()) + require.Empty(t, err.Errors()) +} + +var ( + errFoo = errors.New("foo") + errBar = errors.New("bar") + errAbsent = errors.New("absent") +) + +func TestIsMultiError(t *testing.T) { + t.Parallel() + + testCases := []struct { + GivenError error + GivenTarget error + ExpectedIs bool + }{ + { + GivenError: nil, + GivenTarget: nil, + ExpectedIs: true, + }, + { + GivenError: nil, + GivenTarget: errFoo, + ExpectedIs: false, + }, + { + GivenError: Combine(errFoo), + GivenTarget: nil, + ExpectedIs: false, + }, + { + GivenError: Combine(errFoo), + GivenTarget: errFoo, + ExpectedIs: true, + }, + { + GivenError: Append(errFoo, errBar), + GivenTarget: errFoo, + ExpectedIs: true, + }, + { + GivenError: Append(errFoo, errBar), + GivenTarget: errBar, + ExpectedIs: true, + }, + { + GivenError: Append(errFoo, errBar), + GivenTarget: errAbsent, + ExpectedIs: false, + }, + } + + for i, c := range testCases { + c := c + + t.Run(fmt.Sprint(i), func(t *testing.T) { + t.Parallel() + + if err, ok := c.GivenError.(*multiError); ok { + t.Run("is", func(t *testing.T) { + assert.Equal(t, c.ExpectedIs, err.Is(c.GivenTarget)) + }) + } + + t.Run("errors", func(t *testing.T) { + assert.Equal(t, c.ExpectedIs, errors.Is(c.GivenError, c.GivenTarget)) + }) + + t.Run("xerrors", func(t *testing.T) { + assert.Equal(t, c.ExpectedIs, xerrors.Is(c.GivenError, c.GivenTarget)) + }) + }) + } +} + +func TestAsMultiError(t *testing.T) { + t.Parallel() + + testCases := []struct { + GivenError error + ExpectedAs bool + }{ + { + GivenError: nil, + ExpectedAs: false, + }, + { + GivenError: Combine(targetError{}), + ExpectedAs: true, + }, + { + GivenError: Combine(mockedError{}), + ExpectedAs: false, + }, + { + GivenError: Append(mockedError{}, targetError{}), + ExpectedAs: true, + }, + { + GivenError: Append(mockedError{}, groupNotMultiError{}), + ExpectedAs: false, + }, + } + + for i, c := range testCases { + c := c + + t.Run(fmt.Sprint(i), func(t *testing.T) { + t.Parallel() + + var target targetError + + if err, ok := c.GivenError.(*multiError); ok { + t.Run("as", func(t *testing.T) { + assert.Equal(t, c.ExpectedAs, err.As(&target)) + }) + } + + t.Run("errors", func(t *testing.T) { + assert.Equal(t, c.ExpectedAs, errors.As(c.GivenError, &target)) + }) + + t.Run("xerrors", func(t *testing.T) { + assert.Equal(t, c.ExpectedAs, xerrors.As(c.GivenError, &target)) + }) + }) + } +} + +func newMultiError(errors ...error) error { + return &multiError{errors: errors} +} + +func newMultiErrorWithCapacity() error { + return appendN(nil, errors.New("append"), 50) +} + +func appendN(initial, err error, repeat int) error { + errs := initial + + for i := 0; i < repeat; i++ { + errs = Append(errs, err) + } + + return errs +} + +type groupNotMultiError struct{} + +func (e groupNotMultiError) Error() string { + return "something wrong" +} + +func (e groupNotMultiError) Errors() []error { + return []error{errors.New("something wrong")} +} + +type mockedError struct{} + +func (e mockedError) Error() string { + return "mocked" +} + +type targetError struct{} + +func (e targetError) Error() string { + return "target" +} diff --git a/library/go/core/xerrors/multierr/gotest/ya.make b/library/go/core/xerrors/multierr/gotest/ya.make new file mode 100644 index 0000000000..f1881bfbe6 --- /dev/null +++ b/library/go/core/xerrors/multierr/gotest/ya.make @@ -0,0 +1,5 @@ +GO_TEST_FOR(library/go/core/xerrors/multierr) + +ENABLE(GO_DEBUG_PATH_RELATIVE) + +END() diff --git a/library/go/core/xerrors/multierr/ya.make b/library/go/core/xerrors/multierr/ya.make new file mode 100644 index 0000000000..b3741d43f6 --- /dev/null +++ b/library/go/core/xerrors/multierr/ya.make @@ -0,0 +1,15 @@ +GO_LIBRARY() + +SRCS( + error.go +) + +GO_TEST_SRCS( + error_test.go +) + +END() + +RECURSE( + gotest +) diff --git a/library/go/core/xerrors/new.go b/library/go/core/xerrors/new.go new file mode 100644 index 0000000000..0749f125a7 --- /dev/null +++ b/library/go/core/xerrors/new.go @@ -0,0 +1,48 @@ +package xerrors + +import ( + "fmt" + "io" + + "github.com/ydb-platform/ydb/library/go/x/xruntime" +) + +type newError struct { + msg string + stacktrace *xruntime.StackTrace +} + +var _ ErrorStackTrace = &newError{} + +func New(text string) error { + return &newError{ + msg: text, + stacktrace: newStackTrace(1, nil), + } +} + +func (e *newError) Error() string { + return e.msg +} + +func (e *newError) Format(s fmt.State, v rune) { + switch v { + case 'v': + if s.Flag('+') && e.stacktrace != nil { + _, _ = io.WriteString(s, e.msg) + _, _ = io.WriteString(s, "\n") + writeStackTrace(s, e.stacktrace) + return + } + + fallthrough + case 's': + _, _ = io.WriteString(s, e.msg) + case 'q': + _, _ = fmt.Fprintf(s, "%q", e.msg) + } +} + +func (e *newError) StackTrace() *xruntime.StackTrace { + return e.stacktrace +} diff --git a/library/go/core/xerrors/new_formatting_test.go b/library/go/core/xerrors/new_formatting_test.go new file mode 100644 index 0000000000..30daa409f8 --- /dev/null +++ b/library/go/core/xerrors/new_formatting_test.go @@ -0,0 +1,58 @@ +package xerrors + +import ( + "testing" + + "github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors" +) + +func TestNewFormatting(t *testing.T) { + constructor := func(t *testing.T) error { + return New("new") + } + expected := assertxerrors.Expectations{ + ExpectedS: "new", + ExpectedV: "new", + Frames: assertxerrors.NewStackTraceModeExpectation(` +new + github.com/ydb-platform/ydb/library/go/core/xerrors.TestNewFormatting.func1 + library/go/core/xerrors/new_formatting_test.go:11 +`, + ), + Stacks: assertxerrors.NewStackTraceModeExpectation(` +new + github.com/ydb-platform/ydb/library/go/core/xerrors.TestNewFormatting.func1 + library/go/core/xerrors/new_formatting_test.go:11 + github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors.RunTestsPerMode.func1 + /home/sidh/devel/go/src/github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors/assertxerrors.go:83 + testing.tRunner + /home/sidh/.ya/tools/v4/774223543/src/testing/testing.go:1127 +`, + 3, 4, 5, 6, + ), + StackThenFrames: assertxerrors.NewStackTraceModeExpectation(` +new + github.com/ydb-platform/ydb/library/go/core/xerrors.TestNewFormatting.func1 + library/go/core/xerrors/new_formatting_test.go:11 + github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors.RunTestsPerMode.func1 + /home/sidh/devel/go/src/github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors/assertxerrors.go:83 + testing.tRunner + /home/sidh/.ya/tools/v4/774223543/src/testing/testing.go:1127 +`, + 3, 4, 5, 6, + ), + StackThenNothing: assertxerrors.NewStackTraceModeExpectation(` +new + github.com/ydb-platform/ydb/library/go/core/xerrors.TestNewFormatting.func1 + library/go/core/xerrors/new_formatting_test.go:11 + github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors.RunTestsPerMode.func1 + /home/sidh/devel/go/src/github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors/assertxerrors.go:83 + testing.tRunner + /home/sidh/.ya/tools/v4/774223543/src/testing/testing.go:1127 +`, + 3, 4, 5, 6, + ), + Nothing: assertxerrors.NewStackTraceModeExpectation("new"), + } + assertxerrors.RunTestsPerMode(t, expected, constructor) +} diff --git a/library/go/core/xerrors/sentinel.go b/library/go/core/xerrors/sentinel.go new file mode 100644 index 0000000000..6651588619 --- /dev/null +++ b/library/go/core/xerrors/sentinel.go @@ -0,0 +1,150 @@ +package xerrors + +import ( + "errors" + "fmt" + "io" + "strings" + + "github.com/ydb-platform/ydb/library/go/x/xreflect" + "github.com/ydb-platform/ydb/library/go/x/xruntime" +) + +// NewSentinel acts as New but does not add stack frame +func NewSentinel(text string) *Sentinel { + return &Sentinel{error: errors.New(text)} +} + +// Sentinel error +type Sentinel struct { + error +} + +// WithFrame adds stack frame to sentinel error (DEPRECATED) +func (s *Sentinel) WithFrame() error { + return &sentinelWithStackTrace{ + err: s, + stacktrace: newStackTrace(1, nil), + } +} + +func (s *Sentinel) WithStackTrace() error { + return &sentinelWithStackTrace{ + err: s, + stacktrace: newStackTrace(1, nil), + } +} + +// Wrap error with this sentinel error. Adds stack frame. +func (s *Sentinel) Wrap(err error) error { + if err == nil { + panic("tried to wrap a nil error") + } + + return &sentinelWrapper{ + err: s, + wrapped: err, + stacktrace: newStackTrace(1, err), + } +} + +type sentinelWithStackTrace struct { + err error + stacktrace *xruntime.StackTrace +} + +func (e *sentinelWithStackTrace) Error() string { + return e.err.Error() +} + +func (e *sentinelWithStackTrace) Format(s fmt.State, v rune) { + switch v { + case 'v': + if s.Flag('+') && e.stacktrace != nil { + msg := e.err.Error() + _, _ = io.WriteString(s, msg) + writeMsgAndStackTraceSeparator(s, msg) + writeStackTrace(s, e.stacktrace) + return + } + fallthrough + case 's': + _, _ = io.WriteString(s, e.err.Error()) + case 'q': + _, _ = fmt.Fprintf(s, "%q", e.err.Error()) + } +} + +func writeMsgAndStackTraceSeparator(w io.Writer, msg string) { + separator := "\n" + if !strings.HasSuffix(msg, ":") { + separator = ":\n" + } + + _, _ = io.WriteString(w, separator) +} + +// Is checks if e holds the specified error. Checks only immediate error. +func (e *sentinelWithStackTrace) Is(target error) bool { + return e.err == target +} + +// As checks if ew holds the specified error type. Checks only immediate error. +// It does NOT perform target checks as it relies on errors.As to do it +func (e *sentinelWithStackTrace) As(target interface{}) bool { + return xreflect.Assign(e.err, target) +} + +type sentinelWrapper struct { + err error + wrapped error + stacktrace *xruntime.StackTrace +} + +func (e *sentinelWrapper) Error() string { + return fmt.Sprintf("%s", e) +} + +func (e *sentinelWrapper) Format(s fmt.State, v rune) { + switch v { + case 'v': + if s.Flag('+') { + if e.stacktrace != nil { + msg := e.err.Error() + _, _ = io.WriteString(s, msg) + writeMsgAndStackTraceSeparator(s, msg) + writeStackTrace(s, e.stacktrace) + _, _ = fmt.Fprintf(s, "%+v", e.wrapped) + } else { + _, _ = io.WriteString(s, e.err.Error()) + _, _ = io.WriteString(s, ": ") + _, _ = fmt.Fprintf(s, "%+v", e.wrapped) + } + + return + } + fallthrough + case 's': + _, _ = io.WriteString(s, e.err.Error()) + _, _ = io.WriteString(s, ": ") + _, _ = io.WriteString(s, e.wrapped.Error()) + case 'q': + _, _ = fmt.Fprintf(s, "%q", fmt.Sprintf("%s: %s", e.err.Error(), e.wrapped.Error())) + } +} + +// Unwrap implements Wrapper interface +func (e *sentinelWrapper) Unwrap() error { + return e.wrapped +} + +// Is checks if ew holds the specified error. Checks only immediate error. +func (e *sentinelWrapper) Is(target error) bool { + return e.err == target +} + +// As checks if error holds the specified error type. Checks only immediate error. +// It does NOT perform target checks as it relies on errors.As to do it +func (e *sentinelWrapper) As(target interface{}) bool { + return xreflect.Assign(e.err, target) +} diff --git a/library/go/core/xerrors/sentinel_test.go b/library/go/core/xerrors/sentinel_test.go new file mode 100644 index 0000000000..dd566ea366 --- /dev/null +++ b/library/go/core/xerrors/sentinel_test.go @@ -0,0 +1,92 @@ +package xerrors + +import ( + "fmt" + "io" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestSentinelWrapNil(t *testing.T) { + sentinel := NewSentinel("sentinel") + assert.Panics(t, func() { assert.NoError(t, sentinel.Wrap(nil)) }) +} + +func TestSentinelWrap(t *testing.T) { + sentinel := NewSentinel("sentinel") + assert.EqualError(t, sentinel.Wrap(New("err")), "sentinel: err") +} + +func TestSentinelMultiWrap(t *testing.T) { + top := NewSentinel("top") + middle := NewSentinel("middle") + err := top.Wrap(middle.Wrap(New("bottom"))) + assert.EqualError(t, err, "top: middle: bottom") +} + +func TestSentinelIs(t *testing.T) { + sentinel := NewSentinel("sentinel") + assert.True(t, Is(sentinel, sentinel)) + assert.True(t, Is(sentinel.Wrap(New("err")), sentinel)) + assert.True(t, Is(NewSentinel("err").Wrap(sentinel), sentinel)) + assert.True(t, Is(Errorf("wrapper: %w", sentinel), sentinel)) + assert.True(t, Is(sentinel.WithStackTrace(), sentinel)) + assert.True(t, Is(Errorf("wrapper: %w", sentinel.WithStackTrace()), sentinel)) +} + +func TestSentinelMultiWrapIs(t *testing.T) { + top := NewSentinel("top") + middle := NewSentinel("middle") + err := top.Wrap(middle.Wrap(io.EOF)) + assert.True(t, Is(err, top)) + assert.True(t, Is(err, middle)) + assert.True(t, Is(err, io.EOF)) + assert.False(t, Is(err, New("random"))) +} + +func TestSentinelAs(t *testing.T) { + sentinel := NewSentinel("sentinel") + var target *Sentinel + + assert.True(t, As(sentinel, &target)) + assert.NotNil(t, target) + target = nil + + assert.True(t, As(sentinel.Wrap(New("err")), &target)) + assert.NotNil(t, target) + target = nil + + assert.True(t, As(NewSentinel("err").Wrap(sentinel), &target)) + assert.NotNil(t, target) + target = nil + + assert.True(t, As(Errorf("wrapper: %w", sentinel), &target)) + assert.NotNil(t, target) + target = nil + + assert.True(t, As(sentinel.WithStackTrace(), &target)) + assert.NotNil(t, target) + target = nil + + assert.True(t, As(Errorf("wrapper: %w", sentinel.WithStackTrace()), &target)) + assert.NotNil(t, target) + target = nil +} + +func TestSentinelMultiWrapAs(t *testing.T) { + top := NewSentinel("top") + middle := NewSentinel("middle") + err := top.Wrap(middle.Wrap(io.EOF)) + + var target *Sentinel + assert.True(t, As(err, &target)) + assert.NotNil(t, target) +} + +func TestSentinelFormatting(t *testing.T) { + sentinel := NewSentinel("sentinel") + assert.Equal(t, "sentinel", fmt.Sprintf("%s", sentinel)) + assert.Equal(t, "sentinel", fmt.Sprintf("%v", sentinel)) + assert.Equal(t, "sentinel", fmt.Sprintf("%+v", sentinel)) +} diff --git a/library/go/core/xerrors/sentinel_with_stack_formatting_with_colon_test.go b/library/go/core/xerrors/sentinel_with_stack_formatting_with_colon_test.go new file mode 100644 index 0000000000..4ad87ff91b --- /dev/null +++ b/library/go/core/xerrors/sentinel_with_stack_formatting_with_colon_test.go @@ -0,0 +1,59 @@ +package xerrors + +import ( + "testing" + + "github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors" +) + +func TestSentinelWithStackTraceFormattingWithColon(t *testing.T) { + constructor := func(t *testing.T) error { + err := NewSentinel("sentinel:") + return err.WithStackTrace() + } + expected := assertxerrors.Expectations{ + ExpectedS: "sentinel:", + ExpectedV: "sentinel:", + Frames: assertxerrors.NewStackTraceModeExpectation(` +sentinel: + github.com/ydb-platform/ydb/library/go/core/xerrors.TestSentinelWithStackTraceFormattingWithColon.func1 + library/go/core/xerrors/sentinel_with_stack_formatting_with_colon_test.go:12 +`, + ), + Stacks: assertxerrors.NewStackTraceModeExpectation(` +sentinel: + github.com/ydb-platform/ydb/library/go/core/xerrors.TestSentinelWithStackTraceFormattingWithColon.func1 + library/go/core/xerrors/sentinel_with_stack_formatting_with_colon_test.go:12 + github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors.RunTestsPerMode.func1 + /home/sidh/devel/go/src/github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors/assertxerrors.go:18 + testing.tRunner + /home/sidh/.ya/tools/v4/774223543/src/testing/testing.go:1127 +`, + 3, 4, 5, 6, + ), + StackThenFrames: assertxerrors.NewStackTraceModeExpectation(` +sentinel: + github.com/ydb-platform/ydb/library/go/core/xerrors.TestSentinelWithStackTraceFormattingWithColon.func1 + library/go/core/xerrors/sentinel_with_stack_formatting_with_colon_test.go:12 + github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors.RunTestsPerMode.func1 + /home/sidh/devel/go/src/github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors/assertxerrors.go:18 + testing.tRunner + /home/sidh/.ya/tools/v4/774223543/src/testing/testing.go:1127 +`, + 3, 4, 5, 6, + ), + StackThenNothing: assertxerrors.NewStackTraceModeExpectation(` +sentinel: + github.com/ydb-platform/ydb/library/go/core/xerrors.TestSentinelWithStackTraceFormattingWithColon.func1 + library/go/core/xerrors/sentinel_with_stack_formatting_with_colon_test.go:12 + github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors.RunTestsPerMode.func1 + /home/sidh/devel/go/src/github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors/assertxerrors.go:18 + testing.tRunner + /home/sidh/.ya/tools/v4/774223543/src/testing/testing.go:1127 +`, + 3, 4, 5, 6, + ), + Nothing: assertxerrors.NewStackTraceModeExpectation("sentinel:"), + } + assertxerrors.RunTestsPerMode(t, expected, constructor) +} diff --git a/library/go/core/xerrors/sentinel_with_stack_formatting_without_colon_test.go b/library/go/core/xerrors/sentinel_with_stack_formatting_without_colon_test.go new file mode 100644 index 0000000000..f5d057a2ef --- /dev/null +++ b/library/go/core/xerrors/sentinel_with_stack_formatting_without_colon_test.go @@ -0,0 +1,59 @@ +package xerrors + +import ( + "testing" + + "github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors" +) + +func TestSentinelWithStackTraceFormattingWithoutColon(t *testing.T) { + constructor := func(t *testing.T) error { + err := NewSentinel("sentinel") + return err.WithStackTrace() + } + expected := assertxerrors.Expectations{ + ExpectedS: "sentinel", + ExpectedV: "sentinel", + Frames: assertxerrors.NewStackTraceModeExpectation(` +sentinel: + github.com/ydb-platform/ydb/library/go/core/xerrors.TestSentinelWithStackTraceFormattingWithoutColon.func1 + library/go/core/xerrors/sentinel_with_stack_formatting_without_colon_test.go:12 +`, + ), + Stacks: assertxerrors.NewStackTraceModeExpectation(` +sentinel: + github.com/ydb-platform/ydb/library/go/core/xerrors.TestSentinelWithStackTraceFormattingWithoutColon.func1 + library/go/core/xerrors/sentinel_with_stack_formatting_without_colon_test.go:12 + github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors.RunTestsPerMode.func1 + /home/sidh/devel/go/src/github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors/assertxerrors.go:18 + testing.tRunner + /home/sidh/.ya/tools/v4/774223543/src/testing/testing.go:1127 +`, + 3, 4, 5, 6, + ), + StackThenFrames: assertxerrors.NewStackTraceModeExpectation(` +sentinel: + github.com/ydb-platform/ydb/library/go/core/xerrors.TestSentinelWithStackTraceFormattingWithoutColon.func1 + library/go/core/xerrors/sentinel_with_stack_formatting_without_colon_test.go:12 + github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors.RunTestsPerMode.func1 + /home/sidh/devel/go/src/github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors/assertxerrors.go:18 + testing.tRunner + /home/sidh/.ya/tools/v4/774223543/src/testing/testing.go:1127 +`, + 3, 4, 5, 6, + ), + StackThenNothing: assertxerrors.NewStackTraceModeExpectation(` +sentinel: + github.com/ydb-platform/ydb/library/go/core/xerrors.TestSentinelWithStackTraceFormattingWithoutColon.func1 + library/go/core/xerrors/sentinel_with_stack_formatting_without_colon_test.go:12 + github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors.RunTestsPerMode.func1 + /home/sidh/devel/go/src/github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors/assertxerrors.go:18 + testing.tRunner + /home/sidh/.ya/tools/v4/774223543/src/testing/testing.go:1127 +`, + 3, 4, 5, 6, + ), + Nothing: assertxerrors.NewStackTraceModeExpectation("sentinel"), + } + assertxerrors.RunTestsPerMode(t, expected, constructor) +} diff --git a/library/go/core/xerrors/sentinel_wrap_formatting_with_colon_test.go b/library/go/core/xerrors/sentinel_wrap_formatting_with_colon_test.go new file mode 100644 index 0000000000..d5d96ecab0 --- /dev/null +++ b/library/go/core/xerrors/sentinel_wrap_formatting_with_colon_test.go @@ -0,0 +1,60 @@ +package xerrors + +import ( + "testing" + + "github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors" +) + +func TestSentinelWrapFormattingWithColon(t *testing.T) { + constructor := func(t *testing.T) error { + sentinel := NewSentinel("sntnl_wrapper:") + err := NewSentinel("sentinel") + return sentinel.Wrap(err) + } + expected := assertxerrors.Expectations{ + ExpectedS: "sntnl_wrapper:: sentinel", + ExpectedV: "sntnl_wrapper:: sentinel", + Frames: assertxerrors.NewStackTraceModeExpectation(` +sntnl_wrapper: + github.com/ydb-platform/ydb/library/go/core/xerrors.TestSentinelWrapFormattingWithColon.func1 + library/go/core/xerrors/sentinel_wrap_formatting_with_colon_test.go:13 +sentinel`, + ), + Stacks: assertxerrors.NewStackTraceModeExpectation(` +sntnl_wrapper: + github.com/ydb-platform/ydb/library/go/core/xerrors.TestSentinelWrapFormattingWithColon.func1 + library/go/core/xerrors/sentinel_wrap_formatting_with_colon_test.go:13 + github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors.RunTestsPerMode.func1 + /home/sidh/devel/go/src/github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors/assertxerrors.go:18 + testing.tRunner + /home/sidh/.ya/tools/v4/774223543/src/testing/testing.go:1127 +sentinel`, + 3, 4, 5, 6, + ), + StackThenFrames: assertxerrors.NewStackTraceModeExpectation(` +sntnl_wrapper: + github.com/ydb-platform/ydb/library/go/core/xerrors.TestSentinelWrapFormattingWithColon.func1 + library/go/core/xerrors/sentinel_wrap_formatting_with_colon_test.go:13 + github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors.RunTestsPerMode.func1 + /home/sidh/devel/go/src/github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors/assertxerrors.go:18 + testing.tRunner + /home/sidh/.ya/tools/v4/774223543/src/testing/testing.go:1127 +sentinel`, + 3, 4, 5, 6, + ), + StackThenNothing: assertxerrors.NewStackTraceModeExpectation(` +sntnl_wrapper: + github.com/ydb-platform/ydb/library/go/core/xerrors.TestSentinelWrapFormattingWithColon.func1 + library/go/core/xerrors/sentinel_wrap_formatting_with_colon_test.go:13 + github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors.RunTestsPerMode.func1 + /home/sidh/devel/go/src/github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors/assertxerrors.go:18 + testing.tRunner + /home/sidh/.ya/tools/v4/774223543/src/testing/testing.go:1127 +sentinel`, + 3, 4, 5, 6, + ), + Nothing: assertxerrors.NewStackTraceModeExpectation("sntnl_wrapper:: sentinel"), + } + assertxerrors.RunTestsPerMode(t, expected, constructor) +} diff --git a/library/go/core/xerrors/sentinel_wrap_formatting_without_colon_test.go b/library/go/core/xerrors/sentinel_wrap_formatting_without_colon_test.go new file mode 100644 index 0000000000..f5936b1835 --- /dev/null +++ b/library/go/core/xerrors/sentinel_wrap_formatting_without_colon_test.go @@ -0,0 +1,60 @@ +package xerrors + +import ( + "testing" + + "github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors" +) + +func TestSentinelWrapFormattingWithoutColon(t *testing.T) { + constructor := func(t *testing.T) error { + sentinel := NewSentinel("sntnl_wrapper") + err := NewSentinel("sentinel") + return sentinel.Wrap(err) + } + expected := assertxerrors.Expectations{ + ExpectedS: "sntnl_wrapper: sentinel", + ExpectedV: "sntnl_wrapper: sentinel", + Frames: assertxerrors.NewStackTraceModeExpectation(` +sntnl_wrapper: + github.com/ydb-platform/ydb/library/go/core/xerrors.TestSentinelWrapFormattingWithoutColon.func1 + library/go/core/xerrors/sentinel_wrap_formatting_without_colon_test.go:13 +sentinel`, + ), + Stacks: assertxerrors.NewStackTraceModeExpectation(` +sntnl_wrapper: + github.com/ydb-platform/ydb/library/go/core/xerrors.TestSentinelWrapFormattingWithoutColon.func1 + library/go/core/xerrors/sentinel_wrap_formatting_without_colon_test.go:13 + github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors.RunTestsPerMode.func1 + /home/sidh/devel/go/src/github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors/assertxerrors.go:18 + testing.tRunner + /home/sidh/.ya/tools/v4/774223543/src/testing/testing.go:1127 +sentinel`, + 3, 4, 5, 6, + ), + StackThenFrames: assertxerrors.NewStackTraceModeExpectation(` +sntnl_wrapper: + github.com/ydb-platform/ydb/library/go/core/xerrors.TestSentinelWrapFormattingWithoutColon.func1 + library/go/core/xerrors/sentinel_wrap_formatting_without_colon_test.go:13 + github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors.RunTestsPerMode.func1 + /home/sidh/devel/go/src/github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors/assertxerrors.go:18 + testing.tRunner + /home/sidh/.ya/tools/v4/774223543/src/testing/testing.go:1127 +sentinel`, + 3, 4, 5, 6, + ), + StackThenNothing: assertxerrors.NewStackTraceModeExpectation(` +sntnl_wrapper: + github.com/ydb-platform/ydb/library/go/core/xerrors.TestSentinelWrapFormattingWithoutColon.func1 + library/go/core/xerrors/sentinel_wrap_formatting_without_colon_test.go:13 + github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors.RunTestsPerMode.func1 + /home/sidh/devel/go/src/github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors/assertxerrors.go:18 + testing.tRunner + /home/sidh/.ya/tools/v4/774223543/src/testing/testing.go:1127 +sentinel`, + 3, 4, 5, 6, + ), + Nothing: assertxerrors.NewStackTraceModeExpectation("sntnl_wrapper: sentinel"), + } + assertxerrors.RunTestsPerMode(t, expected, constructor) +} diff --git a/library/go/core/xerrors/sentinel_wrap_new_formatting_test.go b/library/go/core/xerrors/sentinel_wrap_new_formatting_test.go new file mode 100644 index 0000000000..5514f0c27f --- /dev/null +++ b/library/go/core/xerrors/sentinel_wrap_new_formatting_test.go @@ -0,0 +1,73 @@ +package xerrors + +import ( + "testing" + + "github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors" +) + +func TestSentinelWrapNewFormatting(t *testing.T) { + constructor := func(t *testing.T) error { + err := New("new") + sentinel := NewSentinel("sentinel") + return sentinel.Wrap(err) + } + expected := assertxerrors.Expectations{ + ExpectedS: "sentinel: new", + ExpectedV: "sentinel: new", + Frames: assertxerrors.NewStackTraceModeExpectation(` +sentinel: + github.com/ydb-platform/ydb/library/go/core/xerrors.TestSentinelWrapNewFormatting.func1 + library/go/core/xerrors/sentinel_wrap_new_formatting_test.go:13 +new + github.com/ydb-platform/ydb/library/go/core/xerrors.TestSentinelWrapNewFormatting.func1 + library/go/core/xerrors/sentinel_wrap_new_formatting_test.go:11 +`, + ), + Stacks: assertxerrors.NewStackTraceModeExpectation(` +sentinel: + github.com/ydb-platform/ydb/library/go/core/xerrors.TestSentinelWrapNewFormatting.func1 + library/go/core/xerrors/sentinel_wrap_new_formatting_test.go:13 + github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors.RunTestsPerMode.func1 + /home/sidh/devel/go/src/github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors/assertxerrors.go:18 + testing.tRunner + /home/sidh/.ya/tools/v4/774223543/src/testing/testing.go:1127 +new + github.com/ydb-platform/ydb/library/go/core/xerrors.TestSentinelWrapNewFormatting.func1 + library/go/core/xerrors/sentinel_wrap_new_formatting_test.go:11 + github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors.RunTestsPerMode.func1 + /home/sidh/devel/go/src/github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors/assertxerrors.go:18 + testing.tRunner + /home/sidh/.ya/tools/v4/774223543/src/testing/testing.go:1127 +`, + 3, 4, 5, 6, 10, 11, 12, 13, + ), + StackThenFrames: assertxerrors.NewStackTraceModeExpectation(` +sentinel: + github.com/ydb-platform/ydb/library/go/core/xerrors.TestSentinelWrapNewFormatting.func1 + library/go/core/xerrors/sentinel_wrap_new_formatting_test.go:13 +new + github.com/ydb-platform/ydb/library/go/core/xerrors.TestSentinelWrapNewFormatting.func1 + library/go/core/xerrors/sentinel_wrap_new_formatting_test.go:11 + github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors.RunTestsPerMode.func1 + /home/sidh/devel/go/src/github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors/assertxerrors.go:18 + testing.tRunner + /home/sidh/.ya/tools/v4/774223543/src/testing/testing.go:1127 +`, + 6, 7, 8, 9, + ), + StackThenNothing: assertxerrors.NewStackTraceModeExpectation(` +sentinel: new + github.com/ydb-platform/ydb/library/go/core/xerrors.TestSentinelWrapNewFormatting.func1 + library/go/core/xerrors/sentinel_wrap_new_formatting_test.go:11 + github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors.RunTestsPerMode.func1 + /home/sidh/devel/go/src/github.com/ydb-platform/ydb/library/go/core/xerrors/assertxerrors/assertxerrors.go:18 + testing.tRunner + /home/sidh/.ya/tools/v4/774223543/src/testing/testing.go:1127 +`, + 3, 4, 5, 6, + ), + Nothing: assertxerrors.NewStackTraceModeExpectation("sentinel: new"), + } + assertxerrors.RunTestsPerMode(t, expected, constructor) +} diff --git a/library/go/core/xerrors/stacktrace.go b/library/go/core/xerrors/stacktrace.go new file mode 100644 index 0000000000..84a00b14f4 --- /dev/null +++ b/library/go/core/xerrors/stacktrace.go @@ -0,0 +1,80 @@ +package xerrors + +import ( + "errors" + "fmt" + "io" + + "github.com/ydb-platform/ydb/library/go/x/xruntime" +) + +func writeStackTrace(w io.Writer, stacktrace *xruntime.StackTrace) { + for _, frame := range stacktrace.Frames() { + if frame.Function != "" { + _, _ = fmt.Fprintf(w, " %s\n ", frame.Function) + } + + if frame.File != "" { + _, _ = fmt.Fprintf(w, " %s:%d\n", frame.File, frame.Line) + } + } +} + +type ErrorStackTrace interface { + StackTrace() *xruntime.StackTrace +} + +// StackTraceOfEffect returns last stacktrace that was added to error chain (furthest from the root error). +// Guarantees that returned value has valid StackTrace object (but not that there are any frames). +func StackTraceOfEffect(err error) ErrorStackTrace { + var st ErrorStackTrace + for { + if !As(err, &st) { + return nil + } + + if st.StackTrace() != nil { + return st + } + + err = st.(error) + err = errors.Unwrap(err) + } +} + +// StackTraceOfCause returns first stacktrace that was added to error chain (closest to the root error). +// Guarantees that returned value has valid StackTrace object (but not that there are any frames). +func StackTraceOfCause(err error) ErrorStackTrace { + var res ErrorStackTrace + var st ErrorStackTrace + for { + if !As(err, &st) { + return res + } + + if st.StackTrace() != nil { + res = st + } + + err = st.(error) + err = errors.Unwrap(err) + } +} + +// NextStackTracer returns next error with stack trace. +// Guarantees that returned value has valid StackTrace object (but not that there are any frames). +func NextStackTrace(st ErrorStackTrace) ErrorStackTrace { + var res ErrorStackTrace + for { + err := st.(error) + err = errors.Unwrap(err) + + if !As(err, &res) { + return nil + } + + if res.StackTrace() != nil { + return res + } + } +} diff --git a/library/go/core/xerrors/ya.make b/library/go/core/xerrors/ya.make new file mode 100644 index 0000000000..48ded42a2f --- /dev/null +++ b/library/go/core/xerrors/ya.make @@ -0,0 +1,36 @@ +GO_LIBRARY() + +SRCS( + doc.go + errorf.go + forward.go + mode.go + new.go + sentinel.go + stacktrace.go +) + +GO_TEST_SRCS( + benchmark_test.go + errorf_formatting_with_error_test.go + errorf_formatting_with_std_error_test.go + errorf_formatting_without_error_test.go + errorf_multiple_errors_test.go + new_formatting_test.go + sentinel_test.go + sentinel_with_stack_formatting_with_colon_test.go + sentinel_with_stack_formatting_without_colon_test.go + sentinel_wrap_formatting_with_colon_test.go + sentinel_wrap_formatting_without_colon_test.go + sentinel_wrap_new_formatting_test.go +) + +END() + +RECURSE( + assertxerrors + benchxerrors + gotest + internal + multierr +) diff --git a/library/go/test/testhelpers/recurse.go b/library/go/test/testhelpers/recurse.go new file mode 100644 index 0000000000..1239d39e01 --- /dev/null +++ b/library/go/test/testhelpers/recurse.go @@ -0,0 +1,12 @@ +package testhelpers + +// Recurse calls itself 'depth' times then executes 'f'. Useful for testing things where stack size matters. +func Recurse(depth int, f func()) { + if depth > 0 { + depth-- + Recurse(depth, f) + return + } + + f() +} diff --git a/library/go/test/testhelpers/remove_lines.go b/library/go/test/testhelpers/remove_lines.go new file mode 100644 index 0000000000..214a2627a8 --- /dev/null +++ b/library/go/test/testhelpers/remove_lines.go @@ -0,0 +1,47 @@ +package testhelpers + +import ( + "fmt" + "sort" + "strings" +) + +func RemoveLines(str string, lines ...int) (string, error) { + if len(lines) == 0 { + return str, nil + } + + sort.Ints(lines) + + var b strings.Builder + b.Grow(len(str)) + + var count int + var start int + var lineID int + for i, s := range str { + if s != '\n' { + continue + } + + if lines[lineID] != count { + b.WriteString(str[start:i]) + b.WriteString("\n") + } else { + lineID++ + if len(lines) <= lineID { + b.WriteString(str[i+1:]) + break + } + } + + count++ + start = i + 1 + } + + if len(lines) > lineID { + return str, fmt.Errorf("not all lines were removed: processed line ids before %d for lines %d", lineID, lines) + } + + return b.String(), nil +} diff --git a/library/go/test/testhelpers/ya.make b/library/go/test/testhelpers/ya.make new file mode 100644 index 0000000000..267b73b0dc --- /dev/null +++ b/library/go/test/testhelpers/ya.make @@ -0,0 +1,12 @@ +GO_LIBRARY() + +SRCS( + recurse.go + remove_lines.go +) + +GO_TEST_SRCS(remove_lines_test.go) + +END() + +RECURSE(gotest) diff --git a/library/go/x/xreflect/assign.go b/library/go/x/xreflect/assign.go new file mode 100644 index 0000000000..624612575c --- /dev/null +++ b/library/go/x/xreflect/assign.go @@ -0,0 +1,17 @@ +package xreflect + +import "reflect" + +// Assign source's value to target's value it points to. Source must be value, target must be pointer to existing value. +// Source must be assignable to target's value it points to. +func Assign(source interface{}, target interface{}) bool { + val := reflect.ValueOf(target) + typ := val.Type() + targetType := typ.Elem() + if reflect.TypeOf(source).AssignableTo(targetType) { + val.Elem().Set(reflect.ValueOf(source)) + return true + } + + return false +} diff --git a/library/go/x/xreflect/assign_test.go b/library/go/x/xreflect/assign_test.go new file mode 100644 index 0000000000..45fd7186a7 --- /dev/null +++ b/library/go/x/xreflect/assign_test.go @@ -0,0 +1,83 @@ +package xreflect_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + "github.com/ydb-platform/ydb/library/go/x/xreflect" +) + +type Foo struct { + S string +} + +var _ InterfaceFoo = &Foo{} + +func (f *Foo) FuncFoo() {} + +type InterfaceFoo interface { + FuncFoo() +} + +type Bar struct { + I int +} + +var _ InterfaceBar = &Bar{} + +func (f *Bar) FuncBar() {} + +type InterfaceBar interface { + FuncBar() +} + +func TestAssign(t *testing.T) { + t.Run("ValueToValue", func(t *testing.T) { + src := Foo{S: "S"} + var dst Foo + require.True(t, xreflect.Assign(src, &dst)) + require.Equal(t, src, dst) + }) + + t.Run("ValueToValueInvalid", func(t *testing.T) { + src := Foo{S: "S"} + var dst Bar + require.False(t, xreflect.Assign(src, &dst)) + require.Equal(t, Bar{}, dst) + }) + + t.Run("ValueToInterface", func(t *testing.T) { + src := Foo{S: "S"} + var dst InterfaceFoo + require.True(t, xreflect.Assign(&src, &dst)) + require.NotNil(t, dst) + v, ok := dst.(*Foo) + require.True(t, ok) + require.Equal(t, &src, v) + }) + + t.Run("ValueToInterfaceInvalid", func(t *testing.T) { + src := Bar{I: 42} + var dst InterfaceFoo + require.False(t, xreflect.Assign(&src, &dst)) + require.Nil(t, dst) + }) + + t.Run("InterfaceToInterface", func(t *testing.T) { + src := InterfaceFoo(&Foo{S: "S"}) + var dst InterfaceFoo + require.True(t, xreflect.Assign(src, &dst)) + require.NotNil(t, dst) + require.Equal(t, src, dst) + v, ok := dst.(*Foo) + require.True(t, ok) + require.Equal(t, src, v) + }) + + t.Run("InterfaceToInterfaceInvalid", func(t *testing.T) { + src := InterfaceFoo(&Foo{S: "S"}) + var dst InterfaceBar + require.False(t, xreflect.Assign(src, &dst)) + require.Nil(t, dst) + }) +} diff --git a/library/go/x/xreflect/gotest/ya.make b/library/go/x/xreflect/gotest/ya.make new file mode 100644 index 0000000000..ff492d3cc2 --- /dev/null +++ b/library/go/x/xreflect/gotest/ya.make @@ -0,0 +1,3 @@ +GO_TEST_FOR(library/go/x/xreflect) + +END() diff --git a/library/go/x/xreflect/ya.make b/library/go/x/xreflect/ya.make new file mode 100644 index 0000000000..e2e2632ac7 --- /dev/null +++ b/library/go/x/xreflect/ya.make @@ -0,0 +1,9 @@ +GO_LIBRARY() + +SRCS(assign.go) + +GO_XTEST_SRCS(assign_test.go) + +END() + +RECURSE(gotest) diff --git a/library/go/x/xruntime/gotest/ya.make b/library/go/x/xruntime/gotest/ya.make new file mode 100644 index 0000000000..86def649c9 --- /dev/null +++ b/library/go/x/xruntime/gotest/ya.make @@ -0,0 +1,3 @@ +GO_TEST_FOR(library/go/x/xruntime) + +END() diff --git a/library/go/x/xruntime/stacktrace.go b/library/go/x/xruntime/stacktrace.go new file mode 100644 index 0000000000..5c5e661188 --- /dev/null +++ b/library/go/x/xruntime/stacktrace.go @@ -0,0 +1,69 @@ +package xruntime + +import ( + "runtime" +) + +type StackTrace struct { + frames []uintptr + full bool +} + +func NewStackTrace16(skip int) *StackTrace { + var pcs [16]uintptr + return newStackTrace(skip+2, pcs[:]) +} + +func NewStackTrace32(skip int) *StackTrace { + var pcs [32]uintptr + return newStackTrace(skip+2, pcs[:]) +} + +func NewStackTrace64(skip int) *StackTrace { + var pcs [64]uintptr + return newStackTrace(skip+2, pcs[:]) +} + +func NewStackTrace128(skip int) *StackTrace { + var pcs [128]uintptr + return newStackTrace(skip+2, pcs[:]) +} + +func newStackTrace(skip int, pcs []uintptr) *StackTrace { + n := runtime.Callers(skip+1, pcs) + return &StackTrace{frames: pcs[:n], full: true} +} + +func NewFrame(skip int) *StackTrace { + var pcs [3]uintptr + n := runtime.Callers(skip+1, pcs[:]) + return &StackTrace{frames: pcs[:n]} +} + +func (st *StackTrace) Frames() []runtime.Frame { + frames := runtime.CallersFrames(st.frames[:]) + if !st.full { + if _, ok := frames.Next(); !ok { + return nil + } + + fr, ok := frames.Next() + if !ok { + return nil + } + + return []runtime.Frame{fr} + } + + var res []runtime.Frame + for { + frame, more := frames.Next() + if !more { + break + } + + res = append(res, frame) + } + + return res +} diff --git a/library/go/x/xruntime/stacktrace_benchmark_test.go b/library/go/x/xruntime/stacktrace_benchmark_test.go new file mode 100644 index 0000000000..557f0148bf --- /dev/null +++ b/library/go/x/xruntime/stacktrace_benchmark_test.go @@ -0,0 +1,46 @@ +package xruntime + +import ( + "fmt" + "testing" + + "github.com/ydb-platform/ydb/library/go/test/testhelpers" +) + +func BenchmarkNew(b *testing.B) { + inputs := []struct { + Name string + Func func(skip int) *StackTrace + }{ + { + Name: "Frame", + Func: NewFrame, + }, + { + Name: "StackTrace16", + Func: NewStackTrace16, + }, + { + Name: "StackTrace32", + Func: NewStackTrace32, + }, + { + Name: "StackTrace64", + Func: NewStackTrace64, + }, + { + Name: "StackTrace128", + Func: NewStackTrace128, + }, + } + + for _, depth := range []int{1, 16, 32, 64, 128, 256} { + for _, input := range inputs { + b.Run(fmt.Sprintf("Depth%d_%s", depth, input.Name), func(b *testing.B) { + for i := 0; i < b.N; i++ { + testhelpers.Recurse(depth, func() { input.Func(0) }) + } + }) + } + } +} diff --git a/library/go/x/xruntime/ya.make b/library/go/x/xruntime/ya.make new file mode 100644 index 0000000000..7060a9781e --- /dev/null +++ b/library/go/x/xruntime/ya.make @@ -0,0 +1,9 @@ +GO_LIBRARY() + +SRCS(stacktrace.go) + +GO_TEST_SRCS(stacktrace_benchmark_test.go) + +END() + +RECURSE(gotest) diff --git a/ydb/library/yql/providers/generic/connector/api/service/connector.proto b/ydb/library/yql/providers/generic/connector/api/service/connector.proto index 056d104478..ec1b855ff3 100644 --- a/ydb/library/yql/providers/generic/connector/api/service/connector.proto +++ b/ydb/library/yql/providers/generic/connector/api/service/connector.proto @@ -4,8 +4,7 @@ package NYql.NConnector.NApi; import "ydb/library/yql/providers/generic/connector/api/service/protos/connector.proto"; -// NOTE: Protobuf-generated code for Go will appear in yql (non-public) path -option go_package = "github.com/ydb-platform/ydb/yql/providers/connector/api/service"; +option go_package = "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/libgo/service"; // Connector provides unified interface for various data sources that can be used to extend capabilities // of YQ and YQL services. diff --git a/ydb/library/yql/providers/generic/connector/api/service/protos/connector.proto b/ydb/library/yql/providers/generic/connector/api/service/protos/connector.proto index e93e2abb77..7fb225f81e 100644 --- a/ydb/library/yql/providers/generic/connector/api/service/protos/connector.proto +++ b/ydb/library/yql/providers/generic/connector/api/service/protos/connector.proto @@ -7,8 +7,7 @@ import "ydb/public/api/protos/ydb_status_codes.proto"; import "ydb/public/api/protos/ydb_issue_message.proto"; import "ydb/library/yql/providers/generic/connector/api/common/data_source.proto"; -// NOTE: Protobuf-generated code for Go will appear in yql (non-public) path -option go_package = "github.com/ydb-platform/ydb/yql/providers/connector/api/service/protos"; +option go_package = "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/libgo/service/protos"; // ---------- API Requests ---------- diff --git a/ydb/library/yql/providers/generic/connector/app/client/client.go b/ydb/library/yql/providers/generic/connector/app/client/client.go new file mode 100644 index 0000000000..d029302efe --- /dev/null +++ b/ydb/library/yql/providers/generic/connector/app/client/client.go @@ -0,0 +1,293 @@ +package client + +import ( + "bytes" + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "io" + "io/ioutil" + "os" + + "github.com/apache/arrow/go/v13/arrow/ipc" + "github.com/spf13/cobra" + "github.com/ydb-platform/ydb/library/go/core/log" + api_common "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/api/common" + "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/app/config" + "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/app/server/utils" + api_service "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/libgo/service" + api_service_protos "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/libgo/service/protos" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/protobuf/encoding/prototext" +) + +const ( + tableName = "primitives" + outputFormat = api_service_protos.TReadSplitsRequest_ARROW_IPC_STREAMING +) + +func newConfigFromPath(configPath string) (*config.ClientConfig, error) { + data, err := ioutil.ReadFile(configPath) + if err != nil { + return nil, fmt.Errorf("read file %v: %w", configPath, err) + } + + var cfg config.ClientConfig + + if err := prototext.Unmarshal(data, &cfg); err != nil { + return nil, fmt.Errorf("prototext unmarshal `%v`: %w", string(data), err) + } + + return &cfg, nil +} + +func runClient(_ *cobra.Command, args []string) error { + logger, err := utils.NewDevelopmentLogger() + if err != nil { + return fmt.Errorf("zap new: %w", err) + } + + configPath := args[0] + + cfg, err := newConfigFromPath(configPath) + if err != nil { + return fmt.Errorf("unknown instance: %w", err) + } + + if err := callServer(logger, cfg); err != nil { + return fmt.Errorf("call server: %w", err) + } + + return nil +} + +func makeConnection(logger log.Logger, cfg *config.ClientConfig) (*grpc.ClientConn, error) { + var opts []grpc.DialOption + + if cfg.Tls != nil { + logger.Info("client will use TLS connections") + + caCrt, err := os.ReadFile(cfg.Tls.Ca) + if err != nil { + return nil, err + } + + certPool := x509.NewCertPool() + if !certPool.AppendCertsFromPEM(caCrt) { + return nil, fmt.Errorf("failed to add server CA's certificate") + } + + tlsCfg := &tls.Config{ + RootCAs: certPool, + } + + opts = append(opts, grpc.WithTransportCredentials(credentials.NewTLS(tlsCfg))) + } else { + logger.Info("client will use insecure connections") + + opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) + } + + conn, err := grpc.Dial(utils.EndpointToString(cfg.Endpoint), opts...) + if err != nil { + return nil, fmt.Errorf("grpc dial: %w", err) + } + + return conn, nil +} + +func callServer(logger log.Logger, cfg *config.ClientConfig) error { + conn, err := makeConnection(logger, cfg) + if err != nil { + return fmt.Errorf("grpc dial: %w", err) + } + + defer utils.LogCloserError(logger, conn, "connection close") + + connectorClient := api_service.NewConnectorClient(conn) + + // DescribeTable + schema, err := describeTable(logger, connectorClient, cfg.DataSourceInstance) + if err != nil { + return fmt.Errorf("describe table: %w", err) + } + + // ListSplits - we want to SELECT * + splits, err := listSplits(logger, schema, connectorClient, cfg.DataSourceInstance) + if err != nil { + return fmt.Errorf("list splits: %w", err) + } + + // ReadSplits + if err := readSplits(logger, splits, outputFormat, connectorClient, cfg.DataSourceInstance); err != nil { + return fmt.Errorf("read splits: %w", err) + } + + return nil +} + +func describeTable( + logger log.Logger, + connectorClient api_service.ConnectorClient, + dsi *api_common.TDataSourceInstance, +) (*api_service_protos.TSchema, error) { + req := &api_service_protos.TDescribeTableRequest{Table: tableName, DataSourceInstance: dsi} + logger.Debug("DescribeTable", log.String("request", req.String())) + + resp, err := connectorClient.DescribeTable(context.TODO(), req) + if err != nil { + return nil, fmt.Errorf("describe table: %w", err) + } + + if utils.IsSuccess(resp.Error) { + logger.Debug("DescribeTable", log.String("response", resp.String())) + return resp.Schema, nil + } + logger.Error("DescribeTable", log.String("response", resp.String())) + + return nil, utils.NewSTDErrorFromAPIError(resp.Error) +} + +func listSplits( + logger log.Logger, + schema *api_service_protos.TSchema, + connectorClient api_service.ConnectorClient, + dsi *api_common.TDataSourceInstance, +) ([]*api_service_protos.TSplit, error) { + items := []*api_service_protos.TSelect_TWhat_TItem{} + + for _, column := range schema.Columns { + items = append(items, &api_service_protos.TSelect_TWhat_TItem{ + Payload: &api_service_protos.TSelect_TWhat_TItem_Column{Column: column}, + }) + } + + req := &api_service_protos.TListSplitsRequest{ + Selects: []*api_service_protos.TSelect{ + { + DataSourceInstance: nil, + What: &api_service_protos.TSelect_TWhat{Items: items}, + From: &api_service_protos.TSelect_TFrom{Table: tableName}, + }, + }, + DataSourceInstance: dsi, + } + logger.Debug("ListSplits", log.String("request", req.String())) + + streamListSplits, err := connectorClient.ListSplits(context.TODO(), req) + if err != nil { + return nil, fmt.Errorf("list splits: %w", err) + } + + var splits []*api_service_protos.TSplit + + for { + resp, err := streamListSplits.Recv() + if err != nil { + if err == io.EOF { + break + } + + return nil, fmt.Errorf("stream list splits: %w", err) + } + + if !utils.IsSuccess(resp.Error) { + logger.Error("ListSplits", log.String("response", resp.String())) + return splits, utils.NewSTDErrorFromAPIError(resp.Error) + } + + logger.Debug("ListSplits", log.String("response", resp.String())) + splits = append(splits, resp.Splits...) + } + + if len(splits) != 1 { + return nil, fmt.Errorf("too many splits") + } + + return splits, nil +} + +func readSplits( + logger log.Logger, + splits []*api_service_protos.TSplit, + format api_service_protos.TReadSplitsRequest_EFormat, + connectorClient api_service.ConnectorClient, + dsi *api_common.TDataSourceInstance, +) error { + req := &api_service_protos.TReadSplitsRequest{Splits: splits, Format: format, DataSourceInstance: dsi} + logger.Debug("ReadSplits", log.String("request", req.String())) + + streamReadSplits, err := connectorClient.ReadSplits(context.Background(), req) + if err != nil { + return fmt.Errorf("list splits: %w", err) + } + + var responses []*api_service_protos.TReadSplitsResponse + + for { + resp, err := streamReadSplits.Recv() + if err != nil { + if err == io.EOF { + break + } + + return fmt.Errorf("stream list splits: %w", err) + } + + if !utils.IsSuccess(resp.Error) { + return utils.NewSTDErrorFromAPIError(resp.Error) + } + + responses = append(responses, resp) + } + + if err := dumpReadResponses(logger, format, responses); err != nil { + return fmt.Errorf("dump read responses: %w", err) + } + + return nil +} + +func dumpReadResponses( + logger log.Logger, + format api_service_protos.TReadSplitsRequest_EFormat, + responses []*api_service_protos.TReadSplitsResponse, +) error { + if format == api_service_protos.TReadSplitsRequest_ARROW_IPC_STREAMING { + for _, resp := range responses { + buf := bytes.NewBuffer(resp.GetArrowIpcStreaming()) + + reader, err := ipc.NewReader(buf) + if err != nil { + return fmt.Errorf("new reader: %w", err) + } + + for reader.Next() { + record := reader.Record() + logger.Debug("schema", log.String("schema", record.Schema().String())) + + for i, column := range record.Columns() { + logger.Debug("column", log.Int("id", i), log.String("data", column.String())) + } + } + + reader.Release() + } + } + + return fmt.Errorf("unknown format: %v", format) +} + +var Cmd = &cobra.Command{ + Use: "client", + Short: "client for Connector testing and debugging purposes", + Run: func(cmd *cobra.Command, args []string) { + if err := runClient(cmd, args); err != nil { + fmt.Println(err) + os.Exit(1) + } + }, +} diff --git a/ydb/library/yql/providers/generic/connector/app/client/ya.make b/ydb/library/yql/providers/generic/connector/app/client/ya.make new file mode 100644 index 0000000000..c3855e7300 --- /dev/null +++ b/ydb/library/yql/providers/generic/connector/app/client/ya.make @@ -0,0 +1,5 @@ +GO_LIBRARY() + +SRCS(client.go) + +END() diff --git a/ydb/library/yql/providers/generic/connector/app/config/client.pb.go b/ydb/library/yql/providers/generic/connector/app/config/client.pb.go new file mode 100644 index 0000000000..c57d939b68 --- /dev/null +++ b/ydb/library/yql/providers/generic/connector/app/config/client.pb.go @@ -0,0 +1,266 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.19.0 +// source: ydb/library/yql/providers/generic/connector/app/config/client.proto + +package config + +import ( + common "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/api/common" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Connector client configuration +type ClientConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Connector service instance network address we connect to + Endpoint *common.TEndpoint `protobuf:"bytes,1,opt,name=endpoint,proto3" json:"endpoint,omitempty"` + // TLS credentials for Connector + Tls *ClientTLSConfig `protobuf:"bytes,2,opt,name=tls,proto3" json:"tls,omitempty"` + // Data source instance we read data from + DataSourceInstance *common.TDataSourceInstance `protobuf:"bytes,3,opt,name=data_source_instance,json=dataSourceInstance,proto3" json:"data_source_instance,omitempty"` +} + +func (x *ClientConfig) Reset() { + *x = ClientConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_ydb_library_yql_providers_generic_connector_app_config_client_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClientConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientConfig) ProtoMessage() {} + +func (x *ClientConfig) ProtoReflect() protoreflect.Message { + mi := &file_ydb_library_yql_providers_generic_connector_app_config_client_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientConfig.ProtoReflect.Descriptor instead. +func (*ClientConfig) Descriptor() ([]byte, []int) { + return file_ydb_library_yql_providers_generic_connector_app_config_client_proto_rawDescGZIP(), []int{0} +} + +func (x *ClientConfig) GetEndpoint() *common.TEndpoint { + if x != nil { + return x.Endpoint + } + return nil +} + +func (x *ClientConfig) GetTls() *ClientTLSConfig { + if x != nil { + return x.Tls + } + return nil +} + +func (x *ClientConfig) GetDataSourceInstance() *common.TDataSourceInstance { + if x != nil { + return x.DataSourceInstance + } + return nil +} + +type ClientTLSConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // CA certificate path + Ca string `protobuf:"bytes,1,opt,name=ca,proto3" json:"ca,omitempty"` +} + +func (x *ClientTLSConfig) Reset() { + *x = ClientTLSConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_ydb_library_yql_providers_generic_connector_app_config_client_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClientTLSConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientTLSConfig) ProtoMessage() {} + +func (x *ClientTLSConfig) ProtoReflect() protoreflect.Message { + mi := &file_ydb_library_yql_providers_generic_connector_app_config_client_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientTLSConfig.ProtoReflect.Descriptor instead. +func (*ClientTLSConfig) Descriptor() ([]byte, []int) { + return file_ydb_library_yql_providers_generic_connector_app_config_client_proto_rawDescGZIP(), []int{1} +} + +func (x *ClientTLSConfig) GetCa() string { + if x != nil { + return x.Ca + } + return "" +} + +var File_ydb_library_yql_providers_generic_connector_app_config_client_proto protoreflect.FileDescriptor + +var file_ydb_library_yql_providers_generic_connector_app_config_client_proto_rawDesc = []byte{ + 0x0a, 0x43, 0x79, 0x64, 0x62, 0x2f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x2f, 0x79, 0x71, + 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x69, 0x63, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x61, 0x70, + 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x19, 0x4e, 0x59, 0x71, 0x6c, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x41, 0x70, 0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x1a, 0x48, 0x79, 0x64, 0x62, 0x2f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x2f, 0x79, 0x71, + 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x69, 0x63, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x45, 0x79, 0x64, 0x62, 0x2f, + 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x2f, 0x79, 0x71, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x76, + 0x69, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x2f, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x2f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x43, 0x79, 0x64, 0x62, 0x2f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x2f, 0x79, + 0x71, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x69, 0x63, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x61, + 0x70, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe6, 0x01, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3b, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x4e, 0x59, 0x71, 0x6c, + 0x2e, 0x4e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4e, 0x41, 0x70, 0x69, + 0x2e, 0x54, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x08, 0x65, 0x6e, 0x64, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x03, 0x74, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2a, 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x2e, 0x41, 0x70, 0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x54, 0x4c, 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x03, 0x74, + 0x6c, 0x73, 0x12, 0x5b, 0x0a, 0x14, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x29, 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e, 0x4e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x2e, 0x4e, 0x41, 0x70, 0x69, 0x2e, 0x54, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x12, 0x64, 0x61, 0x74, + 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x22, + 0x21, 0x0a, 0x0f, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x4c, 0x53, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x0e, 0x0a, 0x02, 0x63, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, + 0x63, 0x61, 0x42, 0x49, 0x5a, 0x47, 0x61, 0x2e, 0x79, 0x61, 0x6e, 0x64, 0x65, 0x78, 0x2d, 0x74, + 0x65, 0x61, 0x6d, 0x2e, 0x72, 0x75, 0x2f, 0x79, 0x64, 0x62, 0x2f, 0x6c, 0x69, 0x62, 0x72, 0x61, + 0x72, 0x79, 0x2f, 0x79, 0x71, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, + 0x2f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x2f, 0x61, 0x70, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_ydb_library_yql_providers_generic_connector_app_config_client_proto_rawDescOnce sync.Once + file_ydb_library_yql_providers_generic_connector_app_config_client_proto_rawDescData = file_ydb_library_yql_providers_generic_connector_app_config_client_proto_rawDesc +) + +func file_ydb_library_yql_providers_generic_connector_app_config_client_proto_rawDescGZIP() []byte { + file_ydb_library_yql_providers_generic_connector_app_config_client_proto_rawDescOnce.Do(func() { + file_ydb_library_yql_providers_generic_connector_app_config_client_proto_rawDescData = protoimpl.X.CompressGZIP(file_ydb_library_yql_providers_generic_connector_app_config_client_proto_rawDescData) + }) + return file_ydb_library_yql_providers_generic_connector_app_config_client_proto_rawDescData +} + +var file_ydb_library_yql_providers_generic_connector_app_config_client_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_ydb_library_yql_providers_generic_connector_app_config_client_proto_goTypes = []interface{}{ + (*ClientConfig)(nil), // 0: NYql.Connector.App.Config.ClientConfig + (*ClientTLSConfig)(nil), // 1: NYql.Connector.App.Config.ClientTLSConfig + (*common.TEndpoint)(nil), // 2: NYql.NConnector.NApi.TEndpoint + (*common.TDataSourceInstance)(nil), // 3: NYql.NConnector.NApi.TDataSourceInstance +} +var file_ydb_library_yql_providers_generic_connector_app_config_client_proto_depIdxs = []int32{ + 2, // 0: NYql.Connector.App.Config.ClientConfig.endpoint:type_name -> NYql.NConnector.NApi.TEndpoint + 1, // 1: NYql.Connector.App.Config.ClientConfig.tls:type_name -> NYql.Connector.App.Config.ClientTLSConfig + 3, // 2: NYql.Connector.App.Config.ClientConfig.data_source_instance:type_name -> NYql.NConnector.NApi.TDataSourceInstance + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_ydb_library_yql_providers_generic_connector_app_config_client_proto_init() } +func file_ydb_library_yql_providers_generic_connector_app_config_client_proto_init() { + if File_ydb_library_yql_providers_generic_connector_app_config_client_proto != nil { + return + } + file_ydb_library_yql_providers_generic_connector_app_config_server_proto_init() + if !protoimpl.UnsafeEnabled { + file_ydb_library_yql_providers_generic_connector_app_config_client_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClientConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ydb_library_yql_providers_generic_connector_app_config_client_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClientTLSConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_ydb_library_yql_providers_generic_connector_app_config_client_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_ydb_library_yql_providers_generic_connector_app_config_client_proto_goTypes, + DependencyIndexes: file_ydb_library_yql_providers_generic_connector_app_config_client_proto_depIdxs, + MessageInfos: file_ydb_library_yql_providers_generic_connector_app_config_client_proto_msgTypes, + }.Build() + File_ydb_library_yql_providers_generic_connector_app_config_client_proto = out.File + file_ydb_library_yql_providers_generic_connector_app_config_client_proto_rawDesc = nil + file_ydb_library_yql_providers_generic_connector_app_config_client_proto_goTypes = nil + file_ydb_library_yql_providers_generic_connector_app_config_client_proto_depIdxs = nil +} diff --git a/ydb/library/yql/providers/generic/connector/app/config/client.proto b/ydb/library/yql/providers/generic/connector/app/config/client.proto new file mode 100644 index 0000000000..60aa720508 --- /dev/null +++ b/ydb/library/yql/providers/generic/connector/app/config/client.proto @@ -0,0 +1,23 @@ +syntax = "proto3"; +package NYql.Connector.App.Config; + +import "ydb/library/yql/providers/generic/connector/api/common/data_source.proto"; +import "ydb/library/yql/providers/generic/connector/api/common/endpoint.proto"; +import "ydb/library/yql/providers/generic/connector/app/config/server.proto"; + +option go_package = "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/app/config"; + +// Connector client configuration +message ClientConfig { + // Connector service instance network address we connect to + NYql.NConnector.NApi.TEndpoint endpoint = 1; + // TLS credentials for Connector + ClientTLSConfig tls = 2; + // Data source instance we read data from + NYql.NConnector.NApi.TDataSourceInstance data_source_instance = 3; +} + +message ClientTLSConfig { + // CA certificate path + string ca = 1; +} diff --git a/ydb/library/yql/providers/generic/connector/app/config/server.pb.go b/ydb/library/yql/providers/generic/connector/app/config/server.pb.go new file mode 100644 index 0000000000..4dd7a97d2b --- /dev/null +++ b/ydb/library/yql/providers/generic/connector/app/config/server.pb.go @@ -0,0 +1,332 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.19.0 +// source: ydb/library/yql/providers/generic/connector/app/config/server.proto + +package config + +import ( + common "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/api/common" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Connector server configuration +type ServerConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Network address server will be listening on + Endpoint *common.TEndpoint `protobuf:"bytes,1,opt,name=endpoint,proto3" json:"endpoint,omitempty"` + // TLS settings. + // Leave it empty for insecure connections. + Tls *ServerTLSConfig `protobuf:"bytes,2,opt,name=tls,proto3" json:"tls,omitempty"` + // This is a rough restriction for YQ memory consumption until + // https://st.yandex-team.ru/YQ-2057 is implemented. + // Leave it empty if you want to avoid any memory limits. + ReadLimit *ServerReadLimit `protobuf:"bytes,3,opt,name=read_limit,json=readLimit,proto3" json:"read_limit,omitempty"` +} + +func (x *ServerConfig) Reset() { + *x = ServerConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_ydb_library_yql_providers_generic_connector_app_config_server_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerConfig) ProtoMessage() {} + +func (x *ServerConfig) ProtoReflect() protoreflect.Message { + mi := &file_ydb_library_yql_providers_generic_connector_app_config_server_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerConfig.ProtoReflect.Descriptor instead. +func (*ServerConfig) Descriptor() ([]byte, []int) { + return file_ydb_library_yql_providers_generic_connector_app_config_server_proto_rawDescGZIP(), []int{0} +} + +func (x *ServerConfig) GetEndpoint() *common.TEndpoint { + if x != nil { + return x.Endpoint + } + return nil +} + +func (x *ServerConfig) GetTls() *ServerTLSConfig { + if x != nil { + return x.Tls + } + return nil +} + +func (x *ServerConfig) GetReadLimit() *ServerReadLimit { + if x != nil { + return x.ReadLimit + } + return nil +} + +type ServerTLSConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // TLS private key path + Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + // TLS public cert path + Cert string `protobuf:"bytes,3,opt,name=cert,proto3" json:"cert,omitempty"` +} + +func (x *ServerTLSConfig) Reset() { + *x = ServerTLSConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_ydb_library_yql_providers_generic_connector_app_config_server_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerTLSConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerTLSConfig) ProtoMessage() {} + +func (x *ServerTLSConfig) ProtoReflect() protoreflect.Message { + mi := &file_ydb_library_yql_providers_generic_connector_app_config_server_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerTLSConfig.ProtoReflect.Descriptor instead. +func (*ServerTLSConfig) Descriptor() ([]byte, []int) { + return file_ydb_library_yql_providers_generic_connector_app_config_server_proto_rawDescGZIP(), []int{1} +} + +func (x *ServerTLSConfig) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *ServerTLSConfig) GetCert() string { + if x != nil { + return x.Cert + } + return "" +} + +// ServerReadLimit limitates the amount of data extracted from the data source on every read request. +type ServerReadLimit struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The number of rows extracted from the data source + Rows uint64 `protobuf:"varint,1,opt,name=rows,proto3" json:"rows,omitempty"` +} + +func (x *ServerReadLimit) Reset() { + *x = ServerReadLimit{} + if protoimpl.UnsafeEnabled { + mi := &file_ydb_library_yql_providers_generic_connector_app_config_server_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerReadLimit) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerReadLimit) ProtoMessage() {} + +func (x *ServerReadLimit) ProtoReflect() protoreflect.Message { + mi := &file_ydb_library_yql_providers_generic_connector_app_config_server_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerReadLimit.ProtoReflect.Descriptor instead. +func (*ServerReadLimit) Descriptor() ([]byte, []int) { + return file_ydb_library_yql_providers_generic_connector_app_config_server_proto_rawDescGZIP(), []int{2} +} + +func (x *ServerReadLimit) GetRows() uint64 { + if x != nil { + return x.Rows + } + return 0 +} + +var File_ydb_library_yql_providers_generic_connector_app_config_server_proto protoreflect.FileDescriptor + +var file_ydb_library_yql_providers_generic_connector_app_config_server_proto_rawDesc = []byte{ + 0x0a, 0x43, 0x79, 0x64, 0x62, 0x2f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x2f, 0x79, 0x71, + 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x69, 0x63, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x61, 0x70, + 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x19, 0x4e, 0x59, 0x71, 0x6c, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x41, 0x70, 0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x1a, 0x45, 0x79, 0x64, 0x62, 0x2f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x2f, 0x79, 0x71, + 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x69, 0x63, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd4, 0x01, 0x0a, 0x0c, 0x53, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3b, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x4e, 0x59, 0x71, + 0x6c, 0x2e, 0x4e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4e, 0x41, 0x70, + 0x69, 0x2e, 0x54, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x08, 0x65, 0x6e, 0x64, + 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x03, 0x74, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x2e, 0x41, 0x70, 0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x53, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x4c, 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x03, + 0x74, 0x6c, 0x73, 0x12, 0x49, 0x0a, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6c, 0x69, 0x6d, 0x69, + 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x41, 0x70, 0x70, 0x2e, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x61, 0x64, 0x4c, 0x69, + 0x6d, 0x69, 0x74, 0x52, 0x09, 0x72, 0x65, 0x61, 0x64, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0x3d, + 0x0a, 0x0f, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x4c, 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x63, 0x65, 0x72, 0x74, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0x25, 0x0a, + 0x0f, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x61, 0x64, 0x4c, 0x69, 0x6d, 0x69, 0x74, + 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, + 0x72, 0x6f, 0x77, 0x73, 0x42, 0x49, 0x5a, 0x47, 0x61, 0x2e, 0x79, 0x61, 0x6e, 0x64, 0x65, 0x78, + 0x2d, 0x74, 0x65, 0x61, 0x6d, 0x2e, 0x72, 0x75, 0x2f, 0x79, 0x64, 0x62, 0x2f, 0x6c, 0x69, 0x62, + 0x72, 0x61, 0x72, 0x79, 0x2f, 0x79, 0x71, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, + 0x72, 0x73, 0x2f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x61, 0x70, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_ydb_library_yql_providers_generic_connector_app_config_server_proto_rawDescOnce sync.Once + file_ydb_library_yql_providers_generic_connector_app_config_server_proto_rawDescData = file_ydb_library_yql_providers_generic_connector_app_config_server_proto_rawDesc +) + +func file_ydb_library_yql_providers_generic_connector_app_config_server_proto_rawDescGZIP() []byte { + file_ydb_library_yql_providers_generic_connector_app_config_server_proto_rawDescOnce.Do(func() { + file_ydb_library_yql_providers_generic_connector_app_config_server_proto_rawDescData = protoimpl.X.CompressGZIP(file_ydb_library_yql_providers_generic_connector_app_config_server_proto_rawDescData) + }) + return file_ydb_library_yql_providers_generic_connector_app_config_server_proto_rawDescData +} + +var file_ydb_library_yql_providers_generic_connector_app_config_server_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_ydb_library_yql_providers_generic_connector_app_config_server_proto_goTypes = []interface{}{ + (*ServerConfig)(nil), // 0: NYql.Connector.App.Config.ServerConfig + (*ServerTLSConfig)(nil), // 1: NYql.Connector.App.Config.ServerTLSConfig + (*ServerReadLimit)(nil), // 2: NYql.Connector.App.Config.ServerReadLimit + (*common.TEndpoint)(nil), // 3: NYql.NConnector.NApi.TEndpoint +} +var file_ydb_library_yql_providers_generic_connector_app_config_server_proto_depIdxs = []int32{ + 3, // 0: NYql.Connector.App.Config.ServerConfig.endpoint:type_name -> NYql.NConnector.NApi.TEndpoint + 1, // 1: NYql.Connector.App.Config.ServerConfig.tls:type_name -> NYql.Connector.App.Config.ServerTLSConfig + 2, // 2: NYql.Connector.App.Config.ServerConfig.read_limit:type_name -> NYql.Connector.App.Config.ServerReadLimit + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_ydb_library_yql_providers_generic_connector_app_config_server_proto_init() } +func file_ydb_library_yql_providers_generic_connector_app_config_server_proto_init() { + if File_ydb_library_yql_providers_generic_connector_app_config_server_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_ydb_library_yql_providers_generic_connector_app_config_server_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ydb_library_yql_providers_generic_connector_app_config_server_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerTLSConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ydb_library_yql_providers_generic_connector_app_config_server_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerReadLimit); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_ydb_library_yql_providers_generic_connector_app_config_server_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_ydb_library_yql_providers_generic_connector_app_config_server_proto_goTypes, + DependencyIndexes: file_ydb_library_yql_providers_generic_connector_app_config_server_proto_depIdxs, + MessageInfos: file_ydb_library_yql_providers_generic_connector_app_config_server_proto_msgTypes, + }.Build() + File_ydb_library_yql_providers_generic_connector_app_config_server_proto = out.File + file_ydb_library_yql_providers_generic_connector_app_config_server_proto_rawDesc = nil + file_ydb_library_yql_providers_generic_connector_app_config_server_proto_goTypes = nil + file_ydb_library_yql_providers_generic_connector_app_config_server_proto_depIdxs = nil +} diff --git a/ydb/library/yql/providers/generic/connector/app/config/server.proto b/ydb/library/yql/providers/generic/connector/app/config/server.proto new file mode 100644 index 0000000000..b7e64593e2 --- /dev/null +++ b/ydb/library/yql/providers/generic/connector/app/config/server.proto @@ -0,0 +1,34 @@ +syntax = "proto3"; +package NYql.Connector.App.Config; + +import "ydb/library/yql/providers/generic/connector/api/common/endpoint.proto"; + +option go_package = "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/app/config"; + +// Connector server configuration +message ServerConfig { + // Network address server will be listening on + NYql.NConnector.NApi.TEndpoint endpoint = 1; + // TLS settings. + // Leave it empty for insecure connections. + ServerTLSConfig tls = 2; + // This is a rough restriction for YQ memory consumption until + // https://st.yandex-team.ru/YQ-2057 is implemented. + // Leave it empty if you want to avoid any memory limits. + ServerReadLimit read_limit = 3; +} + +message ServerTLSConfig { + // TLS private key path + string key = 2; + // TLS public cert path + string cert = 3; + + reserved 1; +} + +// ServerReadLimit limitates the amount of data extracted from the data source on every read request. +message ServerReadLimit { + // The number of rows extracted from the data source + uint64 rows = 1; +} diff --git a/ydb/library/yql/providers/generic/connector/app/config/ya.make b/ydb/library/yql/providers/generic/connector/app/config/ya.make new file mode 100644 index 0000000000..df2e39997b --- /dev/null +++ b/ydb/library/yql/providers/generic/connector/app/config/ya.make @@ -0,0 +1,14 @@ +PROTO_LIBRARY() + +ONLY_TAGS(GO_PROTO) + +SRCS( + client.proto + server.proto +) + +PEERDIR( + ydb/library/yql/providers/generic/connector/api/common +) + +END() diff --git a/ydb/library/yql/providers/generic/connector/app/main.go b/ydb/library/yql/providers/generic/connector/app/main.go new file mode 100644 index 0000000000..0361f44688 --- /dev/null +++ b/ydb/library/yql/providers/generic/connector/app/main.go @@ -0,0 +1,28 @@ +package main + +import ( + "fmt" + "os" + + "github.com/spf13/cobra" + "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/app/client" + "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/app/server" +) + +var rootCmd = &cobra.Command{ + Use: "connector", + Short: "Connector for external data sources", + // Run: func(cmd *cobra.Command, args []string) {}, +} + +func init() { + rootCmd.AddCommand(server.Cmd) + rootCmd.AddCommand(client.Cmd) +} + +func main() { + if err := rootCmd.Execute(); err != nil { + fmt.Println(err) + os.Exit(1) + } +} diff --git a/ydb/library/yql/providers/generic/connector/app/server/clickhouse/connection_manager.go b/ydb/library/yql/providers/generic/connector/app/server/clickhouse/connection_manager.go new file mode 100644 index 0000000000..007ab9c275 --- /dev/null +++ b/ydb/library/yql/providers/generic/connector/app/server/clickhouse/connection_manager.go @@ -0,0 +1,100 @@ +package clickhouse + +import ( + "context" + "crypto/tls" + "database/sql" + "fmt" + "net" + "time" + + "github.com/ClickHouse/clickhouse-go/v2" + "github.com/ydb-platform/ydb/library/go/core/log" + api_common "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/api/common" + "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/app/server/rdbms" + "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/app/server/utils" +) + +var _ rdbms.Connection = (*connection)(nil) + +type connection struct { + *sql.DB +} + +func (c connection) Query(ctx context.Context, query string, args ...any) (rdbms.Rows, error) { + return c.DB.QueryContext(ctx, query, args...) +} + +var _ rdbms.ConnectionManager = (*connectionManager)(nil) + +type connectionManager struct { + // TODO: cache of connections, remove unused connections with TTL +} + +func (c *connectionManager) Make( + ctx context.Context, + logger log.Logger, + dsi *api_common.TDataSourceInstance, +) (rdbms.Connection, error) { + if dsi.GetCredentials().GetBasic() == nil { + return nil, fmt.Errorf("currently only basic auth is supported") + } + + opts := &clickhouse.Options{ + Addr: []string{utils.EndpointToString(dsi.GetEndpoint())}, + Auth: clickhouse.Auth{ + Database: dsi.Database, + Username: dsi.Credentials.GetBasic().Username, + Password: dsi.Credentials.GetBasic().Password, + }, + DialContext: func(ctx context.Context, addr string) (net.Conn, error) { + var d net.Dialer + return d.DialContext(ctx, "tcp", addr) + }, + Debug: true, + Debugf: func(format string, v ...any) { + logger.Debugf(format, v...) + }, + Compression: &clickhouse.Compression{ + Method: clickhouse.CompressionLZ4, + }, + Protocol: clickhouse.HTTP, + } + + if dsi.UseTls { + opts.TLS = &tls.Config{ + InsecureSkipVerify: false, + } + } + + // FIXME: uncomment after YQ-2286 + // conn, err := clickhouse.Open(opts) + // if err != nil { + // return nil, fmt.Errorf("open connection: %w", err) + // } + + conn := clickhouse.OpenDB(opts) + + if err := conn.Ping(); err != nil { + return nil, fmt.Errorf("conn ping: %w", err) + } + + const ( + maxIdleConns = 5 + maxOpenConns = 10 + ) + + conn.SetMaxIdleConns(maxIdleConns) + conn.SetMaxOpenConns(maxOpenConns) + conn.SetConnMaxLifetime(time.Hour) + + return &connection{DB: conn}, nil +} + +func (c *connectionManager) Release(logger log.Logger, conn rdbms.Connection) { + utils.LogCloserError(logger, conn, "close clickhouse connection") +} + +func NewConnectionManager() rdbms.ConnectionManager { + return &connectionManager{} +} diff --git a/ydb/library/yql/providers/generic/connector/app/server/clickhouse/query_builder.go b/ydb/library/yql/providers/generic/connector/app/server/clickhouse/query_builder.go new file mode 100644 index 0000000000..c55d705836 --- /dev/null +++ b/ydb/library/yql/providers/generic/connector/app/server/clickhouse/query_builder.go @@ -0,0 +1,22 @@ +package clickhouse + +import ( + "fmt" + + "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/app/server/rdbms" + api_service_protos "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/libgo/service/protos" +) + +var _ rdbms.QueryBuilder = (*queryBuilder)(nil) + +type queryBuilder struct { +} + +func (qb queryBuilder) DescribeTable(request *api_service_protos.TDescribeTableRequest) string { + return fmt.Sprintf( + "SELECT name, type FROM system.columns WHERE table = '%s' and database ='%s'", + request.Table, + request.DataSourceInstance.Database) +} + +func NewQueryBuilder() rdbms.QueryBuilder { return queryBuilder{} } diff --git a/ydb/library/yql/providers/generic/connector/app/server/clickhouse/type_mapper.go b/ydb/library/yql/providers/generic/connector/app/server/clickhouse/type_mapper.go new file mode 100644 index 0000000000..93988edbd7 --- /dev/null +++ b/ydb/library/yql/providers/generic/connector/app/server/clickhouse/type_mapper.go @@ -0,0 +1,268 @@ +package clickhouse + +import ( + "fmt" + "regexp" + "time" + + "github.com/apache/arrow/go/v13/arrow/array" + "github.com/ydb-platform/ydb-go-genproto/protos/Ydb" + "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/app/server/utils" +) + +var _ utils.TypeMapper = typeMapper{} + +type typeMapper struct { + isFixedString *regexp.Regexp + isDateTime64 *regexp.Regexp + isNullable *regexp.Regexp +} + +func (tm typeMapper) SQLTypeToYDBColumn(columnName, typeName string) (*Ydb.Column, error) { + var ( + ydbType *Ydb.Type + nullable bool + ) + + if matches := tm.isNullable.FindStringSubmatch(typeName); len(matches) > 0 { + nullable = true + typeName = matches[1] + } + + // Reference table: https://wiki.yandex-team.ru/rtmapreduce/yql-streams-corner/connectors/lld-02-tipy-dannyx/ + switch { + case typeName == "Bool": + ydbType = &Ydb.Type{Type: &Ydb.Type_TypeId{TypeId: Ydb.Type_BOOL}} + case typeName == "Int8": + ydbType = &Ydb.Type{Type: &Ydb.Type_TypeId{TypeId: Ydb.Type_INT8}} + case typeName == "UInt8": + ydbType = &Ydb.Type{Type: &Ydb.Type_TypeId{TypeId: Ydb.Type_UINT8}} + case typeName == "Int16": + ydbType = &Ydb.Type{Type: &Ydb.Type_TypeId{TypeId: Ydb.Type_INT16}} + case typeName == "UInt16": + ydbType = &Ydb.Type{Type: &Ydb.Type_TypeId{TypeId: Ydb.Type_UINT16}} + case typeName == "Int32": + ydbType = &Ydb.Type{Type: &Ydb.Type_TypeId{TypeId: Ydb.Type_INT32}} + case typeName == "UInt32": + ydbType = &Ydb.Type{Type: &Ydb.Type_TypeId{TypeId: Ydb.Type_UINT32}} + case typeName == "Int64": + ydbType = &Ydb.Type{Type: &Ydb.Type_TypeId{TypeId: Ydb.Type_INT64}} + case typeName == "UInt64": + ydbType = &Ydb.Type{Type: &Ydb.Type_TypeId{TypeId: Ydb.Type_UINT64}} + case typeName == "Float32": + ydbType = &Ydb.Type{Type: &Ydb.Type_TypeId{TypeId: Ydb.Type_FLOAT}} + case typeName == "Float64": + ydbType = &Ydb.Type{Type: &Ydb.Type_TypeId{TypeId: Ydb.Type_DOUBLE}} + // String/FixedString are binary in ClickHouse, so we map it to YDB's String instead of UTF8: + // https://ydb.tech/en/docs/yql/reference/types/primitive#string + // https://clickhouse.com/docs/en/sql-reference/data-types/string#encodings + case typeName == "String": + ydbType = &Ydb.Type{Type: &Ydb.Type_TypeId{TypeId: Ydb.Type_STRING}} + case tm.isFixedString.MatchString(typeName): + ydbType = &Ydb.Type{Type: &Ydb.Type_TypeId{TypeId: Ydb.Type_STRING}} + case typeName == "Date": + ydbType = &Ydb.Type{Type: &Ydb.Type_TypeId{TypeId: Ydb.Type_DATE}} + // FIXME: https://st.yandex-team.ru/YQ-2295 + // Date32 is not displayed correctly. + // case typeName == "Date32": + // ydbType = &Ydb.Type{Type: &Ydb.Type_TypeId{TypeId: Ydb.Type_DATE}} + case typeName == "DateTime": + ydbType = &Ydb.Type{Type: &Ydb.Type_TypeId{TypeId: Ydb.Type_DATETIME}} + case tm.isDateTime64.MatchString(typeName): + // NOTE: ClickHouse's DateTime64 value range is much more wide than YDB's Timestamp value range + ydbType = &Ydb.Type{Type: &Ydb.Type_TypeId{TypeId: Ydb.Type_TIMESTAMP}} + default: + return nil, fmt.Errorf("convert type '%s': %w", typeName, utils.ErrDataTypeNotSupported) + } + + // If the column is nullable, wrap it into YQL's optional + if nullable { + ydbType = &Ydb.Type{Type: &Ydb.Type_OptionalType{OptionalType: &Ydb.OptionalType{Item: ydbType}}} + } + + return &Ydb.Column{ + Name: columnName, + Type: ydbType, + }, nil +} + +func (tm typeMapper) YDBTypeToAcceptor(ydbType *Ydb.Type) (any, error) { + var ( + acceptor any + err error + ) + + switch t := ydbType.Type.(type) { + // Primitive types + case *Ydb.Type_TypeId: + acceptor, err = acceptorFromPrimitiveYDBType(t.TypeId, false) + if err != nil { + return nil, fmt.Errorf("make acceptor from primitive YDB type: %w", err) + } + case *Ydb.Type_OptionalType: + acceptor, err = acceptorFromPrimitiveYDBType(t.OptionalType.Item.GetTypeId(), true) + if err != nil { + return nil, fmt.Errorf("make acceptor from optional YDB type: %w", err) + } + default: + return nil, fmt.Errorf("only primitive types are supported, got '%v' instead", ydbType) + } + + return acceptor, nil +} + +func allocatePrimitiveAcceptor[VT utils.ValueType](optional bool) any { + if !optional { + return new(VT) + } else { + return new(*VT) + } +} + +func acceptorFromPrimitiveYDBType(typeID Ydb.Type_PrimitiveTypeId, optional bool) (any, error) { + switch typeID { + case Ydb.Type_BOOL: + return allocatePrimitiveAcceptor[bool](optional), nil + case Ydb.Type_INT8: + return allocatePrimitiveAcceptor[int8](optional), nil + case Ydb.Type_INT16: + return allocatePrimitiveAcceptor[int16](optional), nil + case Ydb.Type_INT32: + return allocatePrimitiveAcceptor[int32](optional), nil + case Ydb.Type_INT64: + return allocatePrimitiveAcceptor[int64](optional), nil + case Ydb.Type_UINT8: + return allocatePrimitiveAcceptor[uint8](optional), nil + case Ydb.Type_UINT16: + return allocatePrimitiveAcceptor[uint16](optional), nil + case Ydb.Type_UINT32: + return allocatePrimitiveAcceptor[uint32](optional), nil + case Ydb.Type_UINT64: + return allocatePrimitiveAcceptor[uint64](optional), nil + case Ydb.Type_FLOAT: + return allocatePrimitiveAcceptor[float32](optional), nil + case Ydb.Type_DOUBLE: + return allocatePrimitiveAcceptor[float64](optional), nil + case Ydb.Type_STRING: + // Looks like []byte would be a better choice here, but clickhouse driver prefers string + return allocatePrimitiveAcceptor[string](optional), nil + case Ydb.Type_DATE, Ydb.Type_DATETIME, Ydb.Type_TIMESTAMP: + return allocatePrimitiveAcceptor[time.Time](optional), nil + default: + return nil, fmt.Errorf("unknown type '%v'", typeID) + } +} + +// AddRow saves a row obtained from the datasource into the buffer +func (tm typeMapper) AddRowToArrowIPCStreaming(ydbTypes []*Ydb.Type, acceptors []any, builders []array.Builder) error { + if len(builders) != len(acceptors) { + return fmt.Errorf("builders vs acceptors mismatch: %v %v", len(builders), len(acceptors)) + } + + if len(ydbTypes) != len(acceptors) { + return fmt.Errorf("ydbtypes vs acceptors mismatch: %v %v", len(ydbTypes), len(acceptors)) + } + + for i, ydbType := range ydbTypes { + + switch t := ydbType.Type.(type) { + case *Ydb.Type_TypeId: + if err := tm.appendValueToBuilder(t.TypeId, acceptors[i], builders[i], false); err != nil { + return fmt.Errorf("add primitive value: %w", err) + } + case *Ydb.Type_OptionalType: + switch t.OptionalType.Item.Type.(type) { + case *Ydb.Type_TypeId: + if err := tm.appendValueToBuilder(t.OptionalType.Item.GetTypeId(), acceptors[i], builders[i], true); err != nil { + return fmt.Errorf("add optional primitive value: %w", err) + } + default: + return fmt.Errorf("unexpected type %v: %w", t.OptionalType.Item, utils.ErrDataTypeNotSupported) + } + default: + return fmt.Errorf("unexpected type %v: %w", t, utils.ErrDataTypeNotSupported) + } + } + + return nil +} + +func appendValueToArrowBuilder[IN utils.ValueType, OUT utils.ValueType, AB utils.ArrowBuilder[OUT], CONV utils.ValueConverter[IN, OUT]]( + acceptor any, + builder array.Builder, + optional bool, +) error { + var value IN + if optional { + cast := acceptor.(**IN) + if *cast == nil { + builder.AppendNull() + return nil + } + value = **cast + } else { + value = *acceptor.(*IN) + } + + var converter CONV + out, err := converter.Convert(value) + if err != nil { + return fmt.Errorf("convert value %v: %w", value, err) + } + + builder.(AB).Append(out) + return nil +} + +func (typeMapper) appendValueToBuilder( + typeID Ydb.Type_PrimitiveTypeId, + acceptor any, + builder array.Builder, + optional bool, +) error { + var err error + switch typeID { + case Ydb.Type_BOOL: + err = appendValueToArrowBuilder[bool, uint8, *array.Uint8Builder, utils.BoolConverter](acceptor, builder, optional) + case Ydb.Type_INT8: + err = appendValueToArrowBuilder[int8, int8, *array.Int8Builder, utils.Int8Converter](acceptor, builder, optional) + case Ydb.Type_INT16: + err = appendValueToArrowBuilder[int16, int16, *array.Int16Builder, utils.Int16Converter](acceptor, builder, optional) + case Ydb.Type_INT32: + err = appendValueToArrowBuilder[int32, int32, *array.Int32Builder, utils.Int32Converter](acceptor, builder, optional) + case Ydb.Type_INT64: + err = appendValueToArrowBuilder[int64, int64, *array.Int64Builder, utils.Int64Converter](acceptor, builder, optional) + case Ydb.Type_UINT8: + err = appendValueToArrowBuilder[uint8, uint8, *array.Uint8Builder, utils.Uint8Converter](acceptor, builder, optional) + case Ydb.Type_UINT16: + err = appendValueToArrowBuilder[uint16, uint16, *array.Uint16Builder, utils.Uint16Converter](acceptor, builder, optional) + case Ydb.Type_UINT32: + err = appendValueToArrowBuilder[uint32, uint32, *array.Uint32Builder, utils.Uint32Converter](acceptor, builder, optional) + case Ydb.Type_UINT64: + err = appendValueToArrowBuilder[uint64, uint64, *array.Uint64Builder, utils.Uint64Converter](acceptor, builder, optional) + case Ydb.Type_FLOAT: + err = appendValueToArrowBuilder[float32, float32, *array.Float32Builder, utils.Float32Converter](acceptor, builder, optional) + case Ydb.Type_DOUBLE: + err = appendValueToArrowBuilder[float64, float64, *array.Float64Builder, utils.Float64Converter](acceptor, builder, optional) + case Ydb.Type_STRING: + err = appendValueToArrowBuilder[string, []byte, *array.BinaryBuilder, utils.StringToBytesConverter](acceptor, builder, optional) + case Ydb.Type_DATE: + err = appendValueToArrowBuilder[time.Time, uint16, *array.Uint16Builder, utils.DateConverter](acceptor, builder, optional) + case Ydb.Type_DATETIME: + err = appendValueToArrowBuilder[time.Time, uint32, *array.Uint32Builder, utils.DatetimeConverter](acceptor, builder, optional) + case Ydb.Type_TIMESTAMP: + err = appendValueToArrowBuilder[time.Time, uint64, *array.Uint64Builder, utils.TimestampConverter](acceptor, builder, optional) + default: + return fmt.Errorf("unexpected type %v: %w", typeID, utils.ErrDataTypeNotSupported) + } + + return err +} + +func NewTypeMapper() utils.TypeMapper { + return typeMapper{ + isFixedString: regexp.MustCompile(`FixedString\([0-9]+\)`), + isDateTime64: regexp.MustCompile(`DateTime64\(\d\)`), + isNullable: regexp.MustCompile(`Nullable\((?P<Internal>\w+)\)`), + } +} diff --git a/ydb/library/yql/providers/generic/connector/app/server/clickhouse/ya.make b/ydb/library/yql/providers/generic/connector/app/server/clickhouse/ya.make new file mode 100644 index 0000000000..314fa142dd --- /dev/null +++ b/ydb/library/yql/providers/generic/connector/app/server/clickhouse/ya.make @@ -0,0 +1,9 @@ +GO_LIBRARY() + +SRCS( + connection_manager.go + query_builder.go + type_mapper.go +) + +END() diff --git a/ydb/library/yql/providers/generic/connector/app/server/cmd.go b/ydb/library/yql/providers/generic/connector/app/server/cmd.go new file mode 100644 index 0000000000..c6c603474a --- /dev/null +++ b/ydb/library/yql/providers/generic/connector/app/server/cmd.go @@ -0,0 +1,30 @@ +package server + +import ( + "fmt" + "log" + "os" + + "github.com/spf13/cobra" +) + +var Cmd = &cobra.Command{ + Use: "server", + Short: "Connector server", + Run: func(cmd *cobra.Command, args []string) { + if err := runServer(cmd, args); err != nil { + fmt.Println(err) + os.Exit(1) + } + }, +} + +const configFlag = "config" + +func init() { + Cmd.Flags().StringP(configFlag, "c", "", "path to server config file") + + if err := Cmd.MarkFlagRequired(configFlag); err != nil { + log.Fatal(err) + } +} diff --git a/ydb/library/yql/providers/generic/connector/app/server/config.go b/ydb/library/yql/providers/generic/connector/app/server/config.go new file mode 100644 index 0000000000..79098addc7 --- /dev/null +++ b/ydb/library/yql/providers/generic/connector/app/server/config.go @@ -0,0 +1,107 @@ +package server + +import ( + "fmt" + "io/ioutil" + "math" + "os" + + api_common "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/api/common" + "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/app/config" + "google.golang.org/protobuf/encoding/prototext" +) + +func validateServerConfig(c *config.ServerConfig) error { + if err := validateEndpoint(c.Endpoint); err != nil { + return fmt.Errorf("validate `Server`: %w", err) + } + + if err := validateServerTLSConfig(c.Tls); err != nil { + return fmt.Errorf("validate `TLS`: %w", err) + } + + if err := validateServerReadLimit(c.ReadLimit); err != nil { + return fmt.Errorf("validate `ReadLimit`: %w", err) + } + + return nil +} + +func validateEndpoint(c *api_common.TEndpoint) error { + if c == nil { + return fmt.Errorf("missing required field `Server`") + } + + if c.Host == "" { + return fmt.Errorf("invalid value of field `Server.Host`: %v", c.Host) + } + + if c.Port == 0 || c.Port > math.MaxUint16 { + return fmt.Errorf("invalid value of field `Server.Port`: %v", c.Port) + } + + return nil +} + +func validateServerTLSConfig(c *config.ServerTLSConfig) error { + if c == nil { + // It's OK not to have TLS config section + return nil + } + + if err := fileMustExist(c.Key); err != nil { + return fmt.Errorf("invalid value of field `TLS.Key`: %w", err) + } + + if err := fileMustExist(c.Cert); err != nil { + return fmt.Errorf("invalid value of field `TLS.Cert`: %w", err) + } + + return nil +} + +func validateServerReadLimit(c *config.ServerReadLimit) error { + if c == nil { + // It's OK not to have read request memory limitation + return nil + } + + // but if it's not nil, one must set limits explicitly + if c.GetRows() == 0 { + return fmt.Errorf("invalid value of field `ServerReadLimit.Rows`") + } + + return nil +} + +func fileMustExist(path string) error { + info, err := os.Stat(path) + if os.IsNotExist(err) { + return fmt.Errorf("path '%s' does not exist", path) + } + + if info.IsDir() { + return fmt.Errorf("path '%s' is a directory", path) + } + + return nil +} + +func newConfigFromPath(configPath string) (*config.ServerConfig, error) { + data, err := ioutil.ReadFile(configPath) + if err != nil { + return nil, fmt.Errorf("read file %v: %w", configPath, err) + } + + var cfg config.ServerConfig + + if err := prototext.Unmarshal(data, &cfg); err != nil { + return nil, fmt.Errorf("prototext unmarshal `%v`: %w", string(data), err) + } + + if err := validateServerConfig(&cfg); err != nil { + return nil, fmt.Errorf("validate config: %w", err) + } + + return &cfg, nil +} diff --git a/ydb/library/yql/providers/generic/connector/app/server/postgresql/connection_manager.go b/ydb/library/yql/providers/generic/connector/app/server/postgresql/connection_manager.go new file mode 100644 index 0000000000..3f3376f569 --- /dev/null +++ b/ydb/library/yql/providers/generic/connector/app/server/postgresql/connection_manager.go @@ -0,0 +1,78 @@ +package postgresql + +import ( + "context" + "fmt" + + "github.com/jackc/pgx/v5" + _ "github.com/jackc/pgx/v5/stdlib" + "github.com/ydb-platform/ydb/library/go/core/log" + api_common "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/api/common" + "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/app/server/rdbms" + "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/app/server/utils" +) + +var _ rdbms.Connection = (*connection)(nil) + +type rows struct { + pgx.Rows +} + +func (r rows) Close() error { + r.Rows.Close() + return nil +} + +type connection struct { + *pgx.Conn +} + +func (c connection) Close() error { + return c.Conn.Close(context.TODO()) +} + +func (c connection) Query(ctx context.Context, query string, args ...any) (rdbms.Rows, error) { + out, err := c.Conn.Query(ctx, query, args...) + return rows{Rows: out}, err +} + +var _ rdbms.ConnectionManager = (*connectionManager)(nil) + +type connectionManager struct { + // TODO: cache of connections, remove unused connections with TTL +} + +func (c *connectionManager) Make( + ctx context.Context, + _ log.Logger, + dsi *api_common.TDataSourceInstance, +) (rdbms.Connection, error) { + connStr := fmt.Sprintf("dbname=%s user=%s password=%s host=%s port=%d", + dsi.Database, + dsi.Credentials.GetBasic().Username, + dsi.Credentials.GetBasic().Password, + dsi.GetEndpoint().GetHost(), + dsi.GetEndpoint().GetPort(), + ) + + if dsi.UseTls { + connStr += " sslmode=verify-full" + } else { + connStr += " sslmode=disable" + } + + conn, err := pgx.Connect(ctx, connStr) + if err != nil { + return nil, fmt.Errorf("open connection: %w", err) + } + + return &connection{Conn: conn}, nil +} + +func (c *connectionManager) Release(logger log.Logger, conn rdbms.Connection) { + utils.LogCloserError(logger, conn, "close connection to PostgreSQL") +} + +func NewConnectionManager() rdbms.ConnectionManager { + return &connectionManager{} +} diff --git a/ydb/library/yql/providers/generic/connector/app/server/postgresql/query_builder.go b/ydb/library/yql/providers/generic/connector/app/server/postgresql/query_builder.go new file mode 100644 index 0000000000..c7dbdfdcda --- /dev/null +++ b/ydb/library/yql/providers/generic/connector/app/server/postgresql/query_builder.go @@ -0,0 +1,22 @@ +package postgresql + +import ( + "fmt" + + "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/app/server/rdbms" + api_service_protos "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/libgo/service/protos" +) + +var _ rdbms.QueryBuilder = (*queryBuilder)(nil) + +type queryBuilder struct { +} + +func (qb queryBuilder) DescribeTable(request *api_service_protos.TDescribeTableRequest) string { + return fmt.Sprintf( + // TODO: is hardconing schema correct? + "SELECT column_name, data_type FROM information_schema.columns WHERE table_name = '%s' AND table_schema ='public'", + request.Table) +} + +func NewQueryBuilder() rdbms.QueryBuilder { return queryBuilder{} } diff --git a/ydb/library/yql/providers/generic/connector/app/server/postgresql/type_mapper.go b/ydb/library/yql/providers/generic/connector/app/server/postgresql/type_mapper.go new file mode 100644 index 0000000000..263816b1a1 --- /dev/null +++ b/ydb/library/yql/providers/generic/connector/app/server/postgresql/type_mapper.go @@ -0,0 +1,183 @@ +package postgresql + +import ( + "fmt" + "time" + + "github.com/apache/arrow/go/v13/arrow/array" + "github.com/jackc/pgx/v5/pgtype" + "github.com/ydb-platform/ydb-go-genproto/protos/Ydb" + "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/app/server/utils" +) + +var _ utils.TypeMapper = typeMapper{} + +type typeMapper struct{} + +func (tm typeMapper) SQLTypeToYDBColumn(columnName, typeName string) (*Ydb.Column, error) { + var ydbType *Ydb.Type + + // Reference table: https://wiki.yandex-team.ru/rtmapreduce/yql-streams-corner/connectors/lld-02-tipy-dannyx/ + switch typeName { + case "boolean", "bool": + ydbType = &Ydb.Type{Type: &Ydb.Type_TypeId{TypeId: Ydb.Type_BOOL}} + case "smallint", "int2", "smallserial", "serial2": + ydbType = &Ydb.Type{Type: &Ydb.Type_TypeId{TypeId: Ydb.Type_INT16}} + case "integer", "int", "int4", "serial", "serial4": + ydbType = &Ydb.Type{Type: &Ydb.Type_TypeId{TypeId: Ydb.Type_INT32}} + case "bigint", "int8", "bigserial", "serial8": + ydbType = &Ydb.Type{Type: &Ydb.Type_TypeId{TypeId: Ydb.Type_INT64}} + case "real", "float4": + ydbType = &Ydb.Type{Type: &Ydb.Type_TypeId{TypeId: Ydb.Type_FLOAT}} + case "double precision", "float8": + ydbType = &Ydb.Type{Type: &Ydb.Type_TypeId{TypeId: Ydb.Type_DOUBLE}} + case "bytea": + ydbType = &Ydb.Type{Type: &Ydb.Type_TypeId{TypeId: Ydb.Type_STRING}} + case "character", "character varying", "text": + ydbType = &Ydb.Type{Type: &Ydb.Type_TypeId{TypeId: Ydb.Type_UTF8}} + case "date": + ydbType = &Ydb.Type{Type: &Ydb.Type_TypeId{TypeId: Ydb.Type_DATE}} + // TODO: PostgreSQL `time` data type has no direct counterparts in the YDB's type system; + // but it can be supported when the PG-compatible types is added to YDB: + // https://st.yandex-team.ru/YQ-2285 + // case "time": + case "timestamp without time zone": + ydbType = &Ydb.Type{Type: &Ydb.Type_TypeId{TypeId: Ydb.Type_TIMESTAMP}} + default: + return nil, fmt.Errorf("convert type '%s': %w", typeName, utils.ErrDataTypeNotSupported) + } + + // In PostgreSQL all columns are actually nullable, hence we wrap every T in Optional<T>. + // See this issue for details: https://st.yandex-team.ru/YQ-2256 + ydbType = &Ydb.Type{Type: &Ydb.Type_OptionalType{OptionalType: &Ydb.OptionalType{Item: ydbType}}} + + return &Ydb.Column{ + Name: columnName, + Type: ydbType, + }, nil +} + +func (tm typeMapper) YDBTypeToAcceptor(ydbType *Ydb.Type) (any, error) { + var ( + acceptor any + err error + ) + + switch t := ydbType.Type.(type) { + // Primitive types + case *Ydb.Type_TypeId: + acceptor, err = acceptorFromPrimitiveYDBType(t.TypeId) + if err != nil { + return nil, fmt.Errorf("make acceptor from primitive YDB type: %w", err) + } + case *Ydb.Type_OptionalType: + acceptor, err = acceptorFromPrimitiveYDBType(t.OptionalType.Item.GetTypeId()) + if err != nil { + return nil, fmt.Errorf("make acceptor from optional YDB type: %w", err) + } + default: + return nil, fmt.Errorf("only primitive types are supported, got '%v' instead", ydbType) + } + + return acceptor, nil +} + +func acceptorFromPrimitiveYDBType(typeID Ydb.Type_PrimitiveTypeId) (any, error) { + switch typeID { + case Ydb.Type_BOOL: + return new(pgtype.Bool), nil + case Ydb.Type_INT16: + return new(pgtype.Int2), nil + case Ydb.Type_INT32: + return new(pgtype.Int4), nil + case Ydb.Type_INT64: + return new(pgtype.Int8), nil + case Ydb.Type_FLOAT: + return new(pgtype.Float4), nil + case Ydb.Type_DOUBLE: + return new(pgtype.Float8), nil + case Ydb.Type_STRING: + return new(*[]byte), nil + case Ydb.Type_UTF8: + return new(pgtype.Text), nil + case Ydb.Type_DATE: + return new(pgtype.Date), nil + case Ydb.Type_TIMESTAMP: + return new(pgtype.Timestamp), nil + default: + return nil, fmt.Errorf("make acceptor for type '%v': %w", typeID, utils.ErrDataTypeNotSupported) + } +} + +func appendValueToArrowBuilder[IN utils.ValueType, OUT utils.ValueType, AB utils.ArrowBuilder[OUT], CONV utils.ValueConverter[IN, OUT]]( + value IN, + builder array.Builder, + valid bool, +) error { + if valid { + var converter CONV + out, err := converter.Convert(value) + if err != nil { + return fmt.Errorf("convert value: %w", err) + } + builder.(AB).Append(out) + } else { + builder.AppendNull() + } + return nil +} + +// AddRow saves a row obtained from the datasource into the buffer +func (tm typeMapper) AddRowToArrowIPCStreaming( + _ []*Ydb.Type, // TODO: use detailed YDB type information when acceptor type is not enough + acceptors []any, + builders []array.Builder, +) error { + if len(builders) != len(acceptors) { + return fmt.Errorf("expected row %v values, got %v", len(builders), len(acceptors)) + } + + for i, acceptor := range acceptors { + var err error + switch t := acceptor.(type) { + case *pgtype.Bool: + err = appendValueToArrowBuilder[bool, uint8, *array.Uint8Builder, utils.BoolConverter](t.Bool, builders[i], t.Valid) + case *pgtype.Int2: + err = appendValueToArrowBuilder[int16, int16, *array.Int16Builder, utils.Int16Converter](t.Int16, builders[i], t.Valid) + case *pgtype.Int4: + err = appendValueToArrowBuilder[int32, int32, *array.Int32Builder, utils.Int32Converter](t.Int32, builders[i], t.Valid) + case *pgtype.Int8: + err = appendValueToArrowBuilder[int64, int64, *array.Int64Builder, utils.Int64Converter](t.Int64, builders[i], t.Valid) + case *pgtype.Float4: + err = appendValueToArrowBuilder[float32, float32, *array.Float32Builder, utils.Float32Converter](t.Float32, builders[i], t.Valid) + case *pgtype.Float8: + err = appendValueToArrowBuilder[float64, float64, *array.Float64Builder, utils.Float64Converter](t.Float64, builders[i], t.Valid) + case *pgtype.Text: + err = appendValueToArrowBuilder[string, string, *array.StringBuilder, utils.StringConverter](t.String, builders[i], t.Valid) + case **[]byte: + // TODO: Bytea exists in the upstream library, but missing in jackx/pgx: + // https://github.com/jackc/pgtype/blob/v1.14.0/bytea.go + // https://github.com/jackc/pgx/blob/v5.3.1/pgtype/bytea.go + // https://github.com/jackc/pgx/issues/1714 + if *t != nil { + builders[i].(*array.BinaryBuilder).Append(**t) + } else { + builders[i].(*array.BinaryBuilder).AppendNull() + } + case *pgtype.Date: + err = appendValueToArrowBuilder[time.Time, uint16, *array.Uint16Builder, utils.DateConverter](t.Time, builders[i], t.Valid) + case *pgtype.Timestamp: + err = appendValueToArrowBuilder[time.Time, uint64, *array.Uint64Builder, utils.TimestampConverter](t.Time, builders[i], t.Valid) + default: + return fmt.Errorf("item #%d of a type '%T': %w", i, t, utils.ErrDataTypeNotSupported) + } + + if err != nil { + return fmt.Errorf("append value to arrow builder: %w", err) + } + } + + return nil +} + +func NewTypeMapper() utils.TypeMapper { return typeMapper{} } diff --git a/ydb/library/yql/providers/generic/connector/app/server/postgresql/ya.make b/ydb/library/yql/providers/generic/connector/app/server/postgresql/ya.make new file mode 100644 index 0000000000..314fa142dd --- /dev/null +++ b/ydb/library/yql/providers/generic/connector/app/server/postgresql/ya.make @@ -0,0 +1,9 @@ +GO_LIBRARY() + +SRCS( + connection_manager.go + query_builder.go + type_mapper.go +) + +END() diff --git a/ydb/library/yql/providers/generic/connector/app/server/rdbms/connection_manager.go b/ydb/library/yql/providers/generic/connector/app/server/rdbms/connection_manager.go new file mode 100644 index 0000000000..7327322f0f --- /dev/null +++ b/ydb/library/yql/providers/generic/connector/app/server/rdbms/connection_manager.go @@ -0,0 +1,25 @@ +package rdbms + +import ( + "context" + + "github.com/ydb-platform/ydb/library/go/core/log" + api_common "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/api/common" +) + +type Connection interface { + Query(ctx context.Context, query string, args ...any) (Rows, error) + Close() error +} + +type Rows interface { + Close() error + Err() error + Next() bool + Scan(dest ...any) error +} + +type ConnectionManager interface { + Make(ctx context.Context, logger log.Logger, dataSourceInstance *api_common.TDataSourceInstance) (Connection, error) + Release(logger log.Logger, conn Connection) +} diff --git a/ydb/library/yql/providers/generic/connector/app/server/rdbms/handler.go b/ydb/library/yql/providers/generic/connector/app/server/rdbms/handler.go new file mode 100644 index 0000000000..1792cfbe85 --- /dev/null +++ b/ydb/library/yql/providers/generic/connector/app/server/rdbms/handler.go @@ -0,0 +1,163 @@ +package rdbms + +import ( + "context" + "fmt" + "strings" + + "github.com/ydb-platform/ydb/library/go/core/log" + api_common "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/api/common" + "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/app/server/utils" + api_service_protos "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/libgo/service/protos" +) + +type Handler struct { + typeMapper utils.TypeMapper + queryBuilder QueryBuilder + connectionManager ConnectionManager + logger log.Logger +} + +func (h *Handler) DescribeTable( + ctx context.Context, + logger log.Logger, + request *api_service_protos.TDescribeTableRequest, +) (*api_service_protos.TDescribeTableResponse, error) { + query := h.queryBuilder.DescribeTable(request) + + conn, err := h.connectionManager.Make(ctx, logger, request.DataSourceInstance) + if err != nil { + return nil, fmt.Errorf("make connection: %w", err) + } + + defer h.connectionManager.Release(logger, conn) + + logger.Debug("execute query", log.String("query", query)) + + rows, err := conn.Query(ctx, query) + if err != nil { + return nil, fmt.Errorf("query '%s' error: %w", query, err) + } + + defer func() { utils.LogCloserError(logger, rows, "close rows") }() + + var ( + columnName string + typeName string + schema api_service_protos.TSchema + ) + + for rows.Next() { + if err := rows.Scan(&columnName, &typeName); err != nil { + return nil, fmt.Errorf("rows scan: %w", err) + } + + column, err := h.typeMapper.SQLTypeToYDBColumn(columnName, typeName) + if err != nil { + return nil, fmt.Errorf("sql type to ydb column (%s, %s): %w", columnName, typeName, err) + } + + schema.Columns = append(schema.Columns, column) + } + + if err := rows.Err(); err != nil { + return nil, fmt.Errorf("rows error: %w", err) + } + + if len(schema.Columns) == 0 { + return nil, utils.ErrTableDoesNotExist + } + + return &api_service_protos.TDescribeTableResponse{Schema: &schema}, nil +} + +func (h *Handler) ReadSplit( + ctx context.Context, + logger log.Logger, + dataSourceInstance *api_common.TDataSourceInstance, + split *api_service_protos.TSplit, + pagingWriter *utils.PagingWriter, +) error { + conn, err := h.connectionManager.Make(ctx, logger, dataSourceInstance) + if err != nil { + return fmt.Errorf("make connection: %w", err) + } + + defer h.connectionManager.Release(logger, conn) + + // SELECT $columns + + // interpolate request + var sb strings.Builder + + sb.WriteString("SELECT ") + + // accumulate acceptors + var acceptors []any + + columns, err := utils.SelectWhatToYDBColumns(split.Select.What) + if err != nil { + return fmt.Errorf("convert Select.What.Items to Ydb.Columns: %w", err) + } + + for i, column := range columns { + sb.WriteString(column.GetName()) + + if i != len(columns)-1 { + sb.WriteString(", ") + } + + var acceptor any + + acceptor, err = h.typeMapper.YDBTypeToAcceptor(column.GetType()) + if err != nil { + return fmt.Errorf("map ydb column to acceptor: %w", err) + } + + acceptors = append(acceptors, acceptor) + } + + // SELECT $columns FROM $from + tableName := split.GetSelect().GetFrom().GetTable() + if tableName == "" { + return fmt.Errorf("empty table name") + } + + sb.WriteString(" FROM ") + sb.WriteString(tableName) + + // execute query + + query := sb.String() + + logger.Debug("execute query", log.String("query", query)) + + rows, err := conn.Query(ctx, query) + if err != nil { + return fmt.Errorf("query '%s' error: %w", query, err) + } + + defer func() { utils.LogCloserError(logger, rows, "close rows") }() + + for rows.Next() { + if err := rows.Scan(acceptors...); err != nil { + return fmt.Errorf("rows scan error: %w", err) + } + + if err := pagingWriter.AddRow(acceptors); err != nil { + return fmt.Errorf("add row to paging writer: %w", err) + } + } + + if err := rows.Err(); err != nil { + return fmt.Errorf("rows error: %w", err) + } + + return nil +} + +func (h *Handler) TypeMapper() utils.TypeMapper { return h.typeMapper } + +func NewHandler(logger log.Logger, queryBuilder QueryBuilder, connectionManager ConnectionManager, typeMapper utils.TypeMapper) *Handler { + return &Handler{logger: logger, queryBuilder: queryBuilder, connectionManager: connectionManager, typeMapper: typeMapper} +} diff --git a/ydb/library/yql/providers/generic/connector/app/server/rdbms/query_builder.go b/ydb/library/yql/providers/generic/connector/app/server/rdbms/query_builder.go new file mode 100644 index 0000000000..6aa6e7991b --- /dev/null +++ b/ydb/library/yql/providers/generic/connector/app/server/rdbms/query_builder.go @@ -0,0 +1,9 @@ +package rdbms + +import ( + api_service_protos "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/libgo/service/protos" +) + +type QueryBuilder interface { + DescribeTable(request *api_service_protos.TDescribeTableRequest) string +} diff --git a/ydb/library/yql/providers/generic/connector/app/server/rdbms/ya.make b/ydb/library/yql/providers/generic/connector/app/server/rdbms/ya.make new file mode 100644 index 0000000000..f73218d66d --- /dev/null +++ b/ydb/library/yql/providers/generic/connector/app/server/rdbms/ya.make @@ -0,0 +1,9 @@ +GO_LIBRARY() + +SRCS( + connection_manager.go + handler.go + query_builder.go +) + +END() diff --git a/ydb/library/yql/providers/generic/connector/app/server/server.go b/ydb/library/yql/providers/generic/connector/app/server/server.go new file mode 100644 index 0000000000..1f8a005822 --- /dev/null +++ b/ydb/library/yql/providers/generic/connector/app/server/server.go @@ -0,0 +1,305 @@ +package server + +import ( + "context" + "fmt" + "net" + + "github.com/apache/arrow/go/v13/arrow/memory" + "github.com/spf13/cobra" + "github.com/ydb-platform/ydb/library/go/core/log" + api_common "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/api/common" + "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/app/config" + "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/app/server/clickhouse" + "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/app/server/postgresql" + "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/app/server/rdbms" + "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/app/server/utils" + api_service "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/libgo/service" + api_service_protos "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/libgo/service/protos" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" +) + +type Server struct { + api_service.UnimplementedConnectorServer + handlers map[api_common.EDataSourceKind]*rdbms.Handler + columnarBufferFactory *utils.ColumnarBufferFactory + cfg *config.ServerConfig + logger log.Logger +} + +func (s *Server) ListTables(_ *api_service_protos.TListTablesRequest, _ api_service.Connector_ListTablesServer) error { + return nil +} + +func (s *Server) DescribeTable( + ctx context.Context, + request *api_service_protos.TDescribeTableRequest, +) (*api_service_protos.TDescribeTableResponse, error) { + logger := utils.AnnotateLogger(s.logger, "DescribeTable", request.DataSourceInstance) + logger.Info("request handling started", log.String("table", request.GetTable())) + + if err := ValidateDescribeTableRequest(logger, request); err != nil { + logger.Error("request handling failed", log.Error(err)) + + return &api_service_protos.TDescribeTableResponse{ + Error: utils.NewAPIErrorFromStdError(err), + }, nil + } + + handler, err := s.getHandler(request.DataSourceInstance.Kind) + if err != nil { + logger.Error("request handling failed", log.Error(err)) + + return &api_service_protos.TDescribeTableResponse{ + Error: utils.NewAPIErrorFromStdError(err), + }, nil + } + + out, err := handler.DescribeTable(ctx, logger, request) + if err != nil { + logger.Error("request handling failed", log.Error(err)) + + out = &api_service_protos.TDescribeTableResponse{Error: utils.NewAPIErrorFromStdError(err)} + + return out, nil + } + + out.Error = utils.NewSuccess() + logger.Info("request handling finished", log.String("response", out.String())) + + return out, nil +} + +func (s *Server) ListSplits(request *api_service_protos.TListSplitsRequest, stream api_service.Connector_ListSplitsServer) error { + logger := utils.AnnotateLogger(s.logger, "ListSplits", request.DataSourceInstance) + logger.Info("request handling started", log.Int("total selects", len(request.Selects))) + + if err := ValidateListSplitsRequest(logger, request); err != nil { + return s.doListSplitsResponse(logger, stream, + &api_service_protos.TListSplitsResponse{Error: utils.NewAPIErrorFromStdError(err)}) + } + + // Make a trivial copy of requested selects + totalSplits := 0 + + for _, slct := range request.Selects { + logger.Debug("responding selects", log.Int("split_id", totalSplits), log.String("select", slct.String())) + resp := &api_service_protos.TListSplitsResponse{ + Error: utils.NewSuccess(), + Splits: []*api_service_protos.TSplit{{Select: slct}}, + } + + for _, split := range resp.Splits { + logger.Debug("responding split", log.Int("split_id", totalSplits), log.String("split", split.Select.String())) + totalSplits++ + } + + if err := s.doListSplitsResponse(logger, stream, resp); err != nil { + return err + } + } + + logger.Info("request handling finished", log.Int("total_splits", totalSplits)) + + return nil +} + +func (s *Server) doListSplitsResponse( + logger log.Logger, + stream api_service.Connector_ListSplitsServer, + response *api_service_protos.TListSplitsResponse, +) error { + if !utils.IsSuccess(response.Error) { + logger.Error("request handling failed", utils.APIErrorToLogFields(response.Error)...) + } + + if err := stream.Send(response); err != nil { + logger.Error("send channel failed", log.Error(err)) + return err + } + + return nil +} + +func (s *Server) ReadSplits(request *api_service_protos.TReadSplitsRequest, stream api_service.Connector_ReadSplitsServer) error { + logger := utils.AnnotateLogger(s.logger, "ReadSplits", request.DataSourceInstance) + logger.Info("request handling started", log.Int("total_splits", len(request.Splits))) + + if err := ValidateReadSplitsRequest(logger, request); err != nil { + logger.Error("request handling failed", log.Error(err)) + + response := &api_service_protos.TReadSplitsResponse{Error: utils.NewAPIErrorFromStdError(err)} + + if err := stream.Send(response); err != nil { + logger.Error("send channel failed", log.Error(err)) + return err + } + } + + logger = log.With(logger, log.String("data source kind", request.DataSourceInstance.GetKind().String())) + + for i, split := range request.Splits { + logger.Info("reading split", log.Int("split_ordered_num", i)) + + err := s.readSplit(logger, stream, request, split) + if err != nil { + logger.Error("request handling failed", log.Error(err)) + + response := &api_service_protos.TReadSplitsResponse{Error: utils.NewAPIErrorFromStdError(err)} + + if err := stream.Send(response); err != nil { + logger.Error("send channel failed", log.Error(err)) + return err + } + } + } + + logger.Info("request handling finished") + + return nil +} + +func (s *Server) readSplit( + logger log.Logger, + stream api_service.Connector_ReadSplitsServer, + request *api_service_protos.TReadSplitsRequest, + split *api_service_protos.TSplit, +) error { + logger.Debug("reading split", log.String("split", split.String())) + + handler, err := s.getHandler(request.DataSourceInstance.Kind) + if err != nil { + return fmt.Errorf("get handler: %w", err) + } + + buf, err := s.columnarBufferFactory.MakeBuffer(logger, request.Format, split.Select.What, handler.TypeMapper()) + if err != nil { + return fmt.Errorf("make buffer: %w", err) + } + + pagingWriter, err := utils.NewPagingWriter( + logger, + buf, + stream, + request.GetPagination(), + ) + if err != nil { + return fmt.Errorf("new paging writer result set: %w", err) + } + + if err = handler.ReadSplit(stream.Context(), logger, request.GetDataSourceInstance(), split, pagingWriter); err != nil { + return fmt.Errorf("read split: %w", err) + } + + rowsReceived, err := pagingWriter.Finish() + if err != nil { + return fmt.Errorf("finish paging writer: %w", err) + } + + logger.Debug("reading split finished", log.UInt64("rows_received", rowsReceived)) + + return nil +} + +func (s *Server) run() error { + endpoint := utils.EndpointToString(s.cfg.Endpoint) + + lis, err := net.Listen("tcp", endpoint) + if err != nil { + return fmt.Errorf("net listen: %w", err) + } + + options, err := s.makeOptions() + if err != nil { + return fmt.Errorf(": %w", err) + } + + grpcSrv := grpc.NewServer(options...) + + api_service.RegisterConnectorServer(grpcSrv, s) + + s.logger.Info("listener started", log.String("address", lis.Addr().String())) + + if err := grpcSrv.Serve(lis); err != nil { + return fmt.Errorf("listener serve: %w", err) + } + + return nil +} + +func (s *Server) makeOptions() ([]grpc.ServerOption, error) { + var opts []grpc.ServerOption + + if s.cfg.Tls != nil { + s.logger.Info("server will use TLS connections") + + s.logger.Info("reading key pair", log.String("cert", s.cfg.Tls.Cert), log.String("key", s.cfg.Tls.Key)) + creds, err := credentials.NewServerTLSFromFile(s.cfg.Tls.Cert, s.cfg.Tls.Key) + if err != nil { + return nil, fmt.Errorf("new server TLS from file: %w", err) + } + + opts = append(opts, grpc.Creds(creds)) + } else { + s.logger.Warn("server will use insecure connections") + } + + return opts, nil +} + +func (s *Server) getHandler(dataSourceType api_common.EDataSourceKind) (*rdbms.Handler, error) { + if h, ok := s.handlers[dataSourceType]; ok { + return h, nil + } + + return nil, fmt.Errorf("pick handler for data source type '%v': %w", dataSourceType, utils.ErrDataSourceNotSupported) +} + +func newServer( + logger log.Logger, + cfg *config.ServerConfig, +) (*Server, error) { + return &Server{ + handlers: map[api_common.EDataSourceKind]*rdbms.Handler{ + api_common.EDataSourceKind_CLICKHOUSE: rdbms.NewHandler( + logger, clickhouse.NewQueryBuilder(), clickhouse.NewConnectionManager(), clickhouse.NewTypeMapper()), + api_common.EDataSourceKind_POSTGRESQL: rdbms.NewHandler( + logger, postgresql.NewQueryBuilder(), postgresql.NewConnectionManager(), postgresql.NewTypeMapper()), + }, + columnarBufferFactory: utils.NewColumnarBufferFactory( + memory.DefaultAllocator, + utils.NewReadLimiterFactory(cfg.ReadLimit), + ), + logger: logger, + cfg: cfg, + }, nil +} + +func runServer(cmd *cobra.Command, _ []string) error { + logger, err := utils.NewDevelopmentLogger() + if err != nil { + return fmt.Errorf("new development logger: %w", err) + } + + configPath, err := cmd.Flags().GetString(configFlag) + if err != nil { + return fmt.Errorf("get config flag: %v", err) + } + + cfg, err := newConfigFromPath(configPath) + if err != nil { + return fmt.Errorf("new config: %w", err) + } + + srv, err := newServer(logger, cfg) + if err != nil { + return fmt.Errorf("new server: %w", err) + } + + if err := srv.run(); err != nil { + return fmt.Errorf("server run: %w", err) + } + + return nil +} diff --git a/ydb/library/yql/providers/generic/connector/app/server/utils/arrow_helpers.go b/ydb/library/yql/providers/generic/connector/app/server/utils/arrow_helpers.go new file mode 100644 index 0000000000..c78e631fcc --- /dev/null +++ b/ydb/library/yql/providers/generic/connector/app/server/utils/arrow_helpers.go @@ -0,0 +1,130 @@ +package utils + +import ( + "fmt" + + "github.com/apache/arrow/go/v13/arrow" + "github.com/apache/arrow/go/v13/arrow/array" + "github.com/apache/arrow/go/v13/arrow/memory" + "github.com/ydb-platform/ydb-go-genproto/protos/Ydb" + api_service_protos "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/libgo/service/protos" +) + +type ArrowBuilder[VT ValueType] interface { + AppendNull() + Append(value VT) +} + +func SelectWhatToArrow( + selectWhat *api_service_protos.TSelect_TWhat, + arrowAllocator memory.Allocator, +) (*arrow.Schema, []array.Builder, error) { + fields := make([]arrow.Field, 0, len(selectWhat.Items)) + builders := make([]array.Builder, 0, len(selectWhat.Items)) + + for i, item := range selectWhat.Items { + column := item.GetColumn() + if column == nil { + return nil, nil, fmt.Errorf("item #%d (%v) is not a column", i, item) + } + + ydbType := column.GetType() + + var ( + field arrow.Field + builder array.Builder + err error + ) + + // Reference table: https://wiki.yandex-team.ru/rtmapreduce/yql-streams-corner/connectors/lld-02-tipy-dannyx + switch t := ydbType.Type.(type) { + // Primitive types + case *Ydb.Type_TypeId: + field, builder, err = primitiveTypeToArrow(t.TypeId, column, arrowAllocator) + if err != nil { + return nil, nil, fmt.Errorf("convert primitive type: %w", err) + } + case *Ydb.Type_OptionalType: + field, builder, err = primitiveTypeToArrow(t.OptionalType.Item.GetTypeId(), column, arrowAllocator) + if err != nil { + return nil, nil, fmt.Errorf("convert optional type: %w", err) + } + default: + return nil, nil, fmt.Errorf("only primitive types are supported, got '%T' instead: %w", t, ErrDataTypeNotSupported) + } + + fields = append(fields, field) + builders = append(builders, builder) + } + + schema := arrow.NewSchema(fields, nil) + + return schema, builders, nil +} + +func primitiveTypeToArrow(typeID Ydb.Type_PrimitiveTypeId, column *Ydb.Column, arrowAllocator memory.Allocator) (arrow.Field, array.Builder, error) { + var ( + field arrow.Field + builder array.Builder + ) + + switch typeID { + case Ydb.Type_BOOL: + // NOTE: for some reason YDB bool type is mapped to Arrow uint8 + // https://st.yandex-team.ru/YQL-15332 + field = arrow.Field{Name: column.Name, Type: arrow.PrimitiveTypes.Uint8} + builder = array.NewUint8Builder(arrowAllocator) + case Ydb.Type_INT8: + field = arrow.Field{Name: column.Name, Type: arrow.PrimitiveTypes.Int8} + builder = array.NewInt8Builder(arrowAllocator) + case Ydb.Type_UINT8: + field = arrow.Field{Name: column.Name, Type: arrow.PrimitiveTypes.Uint8} + builder = array.NewUint8Builder(arrowAllocator) + case Ydb.Type_INT16: + field = arrow.Field{Name: column.Name, Type: arrow.PrimitiveTypes.Int16} + builder = array.NewInt16Builder(arrowAllocator) + case Ydb.Type_UINT16: + field = arrow.Field{Name: column.Name, Type: arrow.PrimitiveTypes.Uint16} + builder = array.NewUint16Builder(arrowAllocator) + case Ydb.Type_INT32: + field = arrow.Field{Name: column.Name, Type: arrow.PrimitiveTypes.Int32} + builder = array.NewInt32Builder(arrowAllocator) + case Ydb.Type_UINT32: + field = arrow.Field{Name: column.Name, Type: arrow.PrimitiveTypes.Uint32} + builder = array.NewUint32Builder(arrowAllocator) + case Ydb.Type_INT64: + field = arrow.Field{Name: column.Name, Type: arrow.PrimitiveTypes.Int64} + builder = array.NewInt64Builder(arrowAllocator) + case Ydb.Type_UINT64: + field = arrow.Field{Name: column.Name, Type: arrow.PrimitiveTypes.Uint64} + builder = array.NewUint64Builder(arrowAllocator) + case Ydb.Type_FLOAT: + field = arrow.Field{Name: column.Name, Type: arrow.PrimitiveTypes.Float32} + builder = array.NewFloat32Builder(arrowAllocator) + case Ydb.Type_DOUBLE: + field = arrow.Field{Name: column.Name, Type: arrow.PrimitiveTypes.Float64} + builder = array.NewFloat64Builder(arrowAllocator) + case Ydb.Type_STRING: + // TODO: what about LargeBinary? + // https://arrow.apache.org/docs/cpp/api/datatype.html#_CPPv4N5arrow4Type4type12LARGE_BINARYE + field = arrow.Field{Name: column.Name, Type: arrow.BinaryTypes.Binary} + builder = array.NewBinaryBuilder(arrowAllocator, arrow.BinaryTypes.Binary) + case Ydb.Type_UTF8: + // TODO: what about LargeString? + // https://arrow.apache.org/docs/cpp/api/datatype.html#_CPPv4N5arrow4Type4type12LARGE_STRINGE + field = arrow.Field{Name: column.Name, Type: arrow.BinaryTypes.String} + builder = array.NewStringBuilder(arrowAllocator) + case Ydb.Type_DATE: + field = arrow.Field{Name: column.Name, Type: arrow.PrimitiveTypes.Uint16} + builder = array.NewUint16Builder(arrowAllocator) + case Ydb.Type_DATETIME: + field = arrow.Field{Name: column.Name, Type: arrow.PrimitiveTypes.Uint32} + builder = array.NewUint32Builder(arrowAllocator) + case Ydb.Type_TIMESTAMP: + field = arrow.Field{Name: column.Name, Type: arrow.PrimitiveTypes.Uint64} + builder = array.NewUint64Builder(arrowAllocator) + default: + return arrow.Field{}, nil, fmt.Errorf("register type '%v': %w", typeID, ErrDataTypeNotSupported) + } + return field, builder, nil +} diff --git a/ydb/library/yql/providers/generic/connector/app/server/utils/columnar_buffer_arrow_ipc_streaming.go b/ydb/library/yql/providers/generic/connector/app/server/utils/columnar_buffer_arrow_ipc_streaming.go new file mode 100644 index 0000000000..0db0782a43 --- /dev/null +++ b/ydb/library/yql/providers/generic/connector/app/server/utils/columnar_buffer_arrow_ipc_streaming.go @@ -0,0 +1,84 @@ +package utils + +import ( + "bytes" + "fmt" + + "github.com/apache/arrow/go/v13/arrow" + "github.com/apache/arrow/go/v13/arrow/array" + "github.com/apache/arrow/go/v13/arrow/ipc" + "github.com/apache/arrow/go/v13/arrow/memory" + "github.com/ydb-platform/ydb-go-genproto/protos/Ydb" + api_service_protos "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/libgo/service/protos" +) + +type columnarBufferArrowIPCStreaming struct { + arrowAllocator memory.Allocator + builders []array.Builder + readLimiter ReadLimiter + schema *arrow.Schema + typeMapper TypeMapper + ydbTypes []*Ydb.Type +} + +// AddRow saves a row obtained from the datasource into the buffer +func (cb *columnarBufferArrowIPCStreaming) AddRow(acceptors []any) error { + if len(cb.builders) != len(acceptors) { + return fmt.Errorf("expected row %v values, got %v", len(cb.builders), len(acceptors)) + } + + if err := cb.readLimiter.AddRow(); err != nil { + return fmt.Errorf("check read limiter: %w", err) + } + + if err := cb.typeMapper.AddRowToArrowIPCStreaming(cb.ydbTypes, acceptors, cb.builders); err != nil { + return fmt.Errorf("add row to arrow IPC Streaming: %w", err) + } + + return nil +} + +// ToResponse returns all the accumulated data and clears buffer +func (cb *columnarBufferArrowIPCStreaming) ToResponse() (*api_service_protos.TReadSplitsResponse, error) { + chunk := make([]arrow.Array, 0, len(cb.builders)) + + // prepare arrow record + for _, builder := range cb.builders { + chunk = append(chunk, builder.NewArray()) + } + + record := array.NewRecord(cb.schema, chunk, -1) + + for _, col := range chunk { + col.Release() + } + + // prepare arrow writer + var buf bytes.Buffer + + writer := ipc.NewWriter(&buf, ipc.WithSchema(cb.schema), ipc.WithAllocator(cb.arrowAllocator)) + + if err := writer.Write(record); err != nil { + return nil, fmt.Errorf("write record: %w", err) + } + + if err := writer.Close(); err != nil { + return nil, fmt.Errorf("close arrow writer: %w", err) + } + + out := &api_service_protos.TReadSplitsResponse{ + Payload: &api_service_protos.TReadSplitsResponse_ArrowIpcStreaming{ + ArrowIpcStreaming: buf.Bytes(), + }, + } + + return out, nil +} + +// Frees resources if buffer is no longer used +func (cb *columnarBufferArrowIPCStreaming) Release() { + // cleanup builders + for _, b := range cb.builders { + b.Release() + } +} diff --git a/ydb/library/yql/providers/generic/connector/app/server/utils/columnar_buffer_factory.go b/ydb/library/yql/providers/generic/connector/app/server/utils/columnar_buffer_factory.go new file mode 100644 index 0000000000..5d6b683940 --- /dev/null +++ b/ydb/library/yql/providers/generic/connector/app/server/utils/columnar_buffer_factory.go @@ -0,0 +1,64 @@ +package utils + +import ( + "fmt" + + "github.com/apache/arrow/go/v13/arrow/memory" + "github.com/ydb-platform/ydb/library/go/core/log" + api_service_protos "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/libgo/service/protos" +) + +type ColumnarBuffer interface { + // AddRow saves a row obtained from the datasource into the buffer + AddRow(acceptors []any) error + // ToResponse returns all the accumulated data and clears buffer + ToResponse() (*api_service_protos.TReadSplitsResponse, error) + // Frees resources if buffer is no longer used + Release() +} + +type ColumnarBufferFactory struct { + arrowAllocator memory.Allocator + readLimiterFactory *ReadLimiterFactory +} + +func (cbf *ColumnarBufferFactory) MakeBuffer( + logger log.Logger, + format api_service_protos.TReadSplitsRequest_EFormat, + selectWhat *api_service_protos.TSelect_TWhat, + typeMapper TypeMapper, +) (ColumnarBuffer, error) { + switch format { + case api_service_protos.TReadSplitsRequest_ARROW_IPC_STREAMING: + schema, builders, err := SelectWhatToArrow(selectWhat, cbf.arrowAllocator) + if err != nil { + return nil, fmt.Errorf("convert Select.What to arrow.Schema: %w", err) + } + + ydbTypes, err := SelectWhatToYDBTypes(selectWhat) + if err != nil { + return nil, fmt.Errorf("convert Select.What to Ydb.Types: %w", err) + } + + return &columnarBufferArrowIPCStreaming{ + arrowAllocator: cbf.arrowAllocator, + schema: schema, + builders: builders, + readLimiter: cbf.readLimiterFactory.MakeReadLimiter(logger), + typeMapper: typeMapper, + ydbTypes: ydbTypes, + }, nil + default: + return nil, fmt.Errorf("unknown format: %v", format) + } +} + +func NewColumnarBufferFactory( + arrowAllocator memory.Allocator, + readLimiterFactory *ReadLimiterFactory, +) *ColumnarBufferFactory { + return &ColumnarBufferFactory{ + arrowAllocator: arrowAllocator, + readLimiterFactory: readLimiterFactory, + } +} diff --git a/ydb/library/yql/providers/generic/connector/app/server/utils/converters.go b/ydb/library/yql/providers/generic/connector/app/server/utils/converters.go new file mode 100644 index 0000000000..1387976c3b --- /dev/null +++ b/ydb/library/yql/providers/generic/connector/app/server/utils/converters.go @@ -0,0 +1,111 @@ +package utils + +import ( + "fmt" + "time" +) + +type ValueType interface { + bool | int8 | int16 | int32 | int64 | uint8 | uint16 | uint32 | uint64 | float32 | float64 | string | []byte | time.Time +} + +type ValueConverter[IN ValueType, OUT ValueType] interface { + Convert(in IN) (OUT, error) +} + +type BoolConverter struct{} + +func (BoolConverter) Convert(in bool) (uint8, error) { + // For a some reason, Bool values are converted to Arrow Uint8 rather than to Arrow native Bool. + // See https://st.yandex-team.ru/YQL-15332 for more details. + if in { + return 1, nil + } + + return 0, nil +} + +type Int8Converter struct{} + +func (Int8Converter) Convert(in int8) (int8, error) { return in, nil } + +type Int16Converter struct{} + +func (Int16Converter) Convert(in int16) (int16, error) { return in, nil } + +type Int32Converter struct{} + +func (Int32Converter) Convert(in int32) (int32, error) { return in, nil } + +type Int64Converter struct{} + +func (Int64Converter) Convert(in int64) (int64, error) { return in, nil } + +type Uint8Converter struct{} + +func (Uint8Converter) Convert(in uint8) (uint8, error) { return in, nil } + +type Uint16Converter struct{} + +func (Uint16Converter) Convert(in uint16) (uint16, error) { return in, nil } + +type Uint32Converter struct{} + +func (Uint32Converter) Convert(in uint32) (uint32, error) { return in, nil } + +type Uint64Converter struct{} + +func (Uint64Converter) Convert(in uint64) (uint64, error) { return in, nil } + +type Float32Converter struct{} + +func (Float32Converter) Convert(in float32) (float32, error) { return in, nil } + +type Float64Converter struct{} + +func (Float64Converter) Convert(in float64) (float64, error) { return in, nil } + +type StringConverter struct{} + +func (StringConverter) Convert(in string) (string, error) { return in, nil } + +type StringToBytesConverter struct{} + +func (StringToBytesConverter) Convert(in string) ([]byte, error) { return []byte(in), nil } + +type BytesConverter struct{} + +func (BytesConverter) Convert(in []byte) ([]byte, error) { return in, nil } + +type DateConverter struct{} + +func (DateConverter) Convert(in time.Time) (uint16, error) { + out, err := TimeToYDBDate(&in) + if err != nil { + return 0, fmt.Errorf("convert time to YDB Date: %w", err) + } + + return out, nil +} + +type DatetimeConverter struct{} + +func (DatetimeConverter) Convert(in time.Time) (uint32, error) { + out, err := TimeToYDBDatetime(&in) + if err != nil { + return 0, fmt.Errorf("convert time to YDB Datetime: %w", err) + } + + return out, nil +} + +type TimestampConverter struct{} + +func (TimestampConverter) Convert(in time.Time) (uint64, error) { + out, err := TimeToYDBTimestamp(&in) + if err != nil { + return 0, fmt.Errorf("convert time to YDB Timestamp: %w", err) + } + + return out, nil +} diff --git a/ydb/library/yql/providers/generic/connector/app/server/utils/endpoint.go b/ydb/library/yql/providers/generic/connector/app/server/utils/endpoint.go new file mode 100644 index 0000000000..56fbde80da --- /dev/null +++ b/ydb/library/yql/providers/generic/connector/app/server/utils/endpoint.go @@ -0,0 +1,11 @@ +package utils + +import ( + "fmt" + + api_common "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/api/common" +) + +func EndpointToString(ep *api_common.TEndpoint) string { + return fmt.Sprintf("%s:%d", ep.GetHost(), ep.GetPort()) +} diff --git a/ydb/library/yql/providers/generic/connector/app/server/utils/errors.go b/ydb/library/yql/providers/generic/connector/app/server/utils/errors.go new file mode 100644 index 0000000000..a9093be650 --- /dev/null +++ b/ydb/library/yql/providers/generic/connector/app/server/utils/errors.go @@ -0,0 +1,76 @@ +package utils + +import ( + "errors" + "fmt" + + "github.com/ydb-platform/ydb-go-genproto/protos/Ydb" + "github.com/ydb-platform/ydb/library/go/core/log" + api_service_protos "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/libgo/service/protos" +) + +var ( + ErrTableDoesNotExist = fmt.Errorf("table does not exist") + ErrDataSourceNotSupported = fmt.Errorf("data source not supported") + ErrDataTypeNotSupported = fmt.Errorf("data type not supported") + ErrReadLimitExceeded = fmt.Errorf("read limit exceeded") + ErrInvalidRequest = fmt.Errorf("invalid request") + ErrValueOutOfTypeBounds = fmt.Errorf("value is out of possible range of values for the type") +) + +func NewSuccess() *api_service_protos.TError { + return &api_service_protos.TError{ + Status: Ydb.StatusIds_SUCCESS, + Message: "succeeded", + } +} + +func IsSuccess(apiErr *api_service_protos.TError) bool { + if apiErr.Status == Ydb.StatusIds_STATUS_CODE_UNSPECIFIED { + panic("status uninitialized") + } + + return apiErr.Status == Ydb.StatusIds_SUCCESS +} + +func NewAPIErrorFromStdError(err error) *api_service_protos.TError { + var status Ydb.StatusIds_StatusCode + + switch { + case errors.Is(err, ErrTableDoesNotExist): + status = Ydb.StatusIds_NOT_FOUND + case errors.Is(err, ErrReadLimitExceeded): + // Return BAD_REQUEST to avoid retrying + status = Ydb.StatusIds_BAD_REQUEST + case errors.Is(err, ErrInvalidRequest): + status = Ydb.StatusIds_BAD_REQUEST + case errors.Is(err, ErrDataSourceNotSupported): + status = Ydb.StatusIds_UNSUPPORTED + case errors.Is(err, ErrDataTypeNotSupported): + status = Ydb.StatusIds_UNSUPPORTED + case errors.Is(err, ErrValueOutOfTypeBounds): + status = Ydb.StatusIds_UNSUPPORTED + default: + status = Ydb.StatusIds_INTERNAL_ERROR + } + + return &api_service_protos.TError{ + Status: status, + Message: err.Error(), + } +} + +func APIErrorToLogFields(apiErr *api_service_protos.TError) []log.Field { + return []log.Field{ + log.String("message", apiErr.Message), + log.String("status", apiErr.Status.String()), + } +} + +func NewSTDErrorFromAPIError(apiErr *api_service_protos.TError) error { + if IsSuccess(apiErr) { + return nil + } + + return errors.New(apiErr.Message) +} diff --git a/ydb/library/yql/providers/generic/connector/app/server/utils/logger.go b/ydb/library/yql/providers/generic/connector/app/server/utils/logger.go new file mode 100644 index 0000000000..55814163ec --- /dev/null +++ b/ydb/library/yql/providers/generic/connector/app/server/utils/logger.go @@ -0,0 +1,66 @@ +package utils + +import ( + "fmt" + "io" + + "github.com/ydb-platform/ydb/library/go/core/log" + "github.com/ydb-platform/ydb/library/go/core/log/zap" + api_common "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/api/common" + api_service_protos "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/libgo/service/protos" + "go.uber.org/zap/zapcore" +) + +// TODO: it's better to do this in GRPC middleware +func AnnotateLogger(logger log.Logger, method string, dsi *api_common.TDataSourceInstance) log.Logger { + logger = log.With(logger, log.String("method", method)) + + if dsi != nil { + logger = log.With(logger, + log.String("data_source_kind", api_common.EDataSourceKind_name[int32(dsi.Kind)]), + log.String("host", dsi.Endpoint.Host), + log.UInt32("port", dsi.Endpoint.Port), + log.String("database", dsi.Database), + log.Bool("use_tls", dsi.UseTls), + // TODO: can we print just a login without a password? + ) + } + + return logger +} + +func LogCloserError(logger log.Logger, closer io.Closer, msg string) { + if err := closer.Close(); err != nil { + logger.Error(msg, log.Error(err)) + } +} + +func NewDevelopmentLogger() (log.Logger, error) { + cfg := zap.NewDeployConfig() + cfg.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder + cfg.Encoding = "console" + + zapLogger, err := cfg.Build() + if err != nil { + return nil, fmt.Errorf("new logger: %w", err) + } + + return &zap.Logger{L: zapLogger}, nil +} + +func DumpReadSplitsResponse(logger log.Logger, resp *api_service_protos.TReadSplitsResponse) { + if columnSet := resp.GetColumnSet(); columnSet != nil { + for i := range columnSet.Data { + data := columnSet.Data[i] + meta := columnSet.Meta[i] + + logger.Debug("response", log.Int("column_id", i), log.String("meta", meta.String()), log.String("data", data.String())) + } + + return + } + + if dump := resp.GetArrowIpcStreaming(); dump != nil { + logger.Debug("response", log.Int("arrow_blob_length", len(dump))) + } +} diff --git a/ydb/library/yql/providers/generic/connector/app/server/utils/paging_writer.go b/ydb/library/yql/providers/generic/connector/app/server/utils/paging_writer.go new file mode 100644 index 0000000000..c5f5328a71 --- /dev/null +++ b/ydb/library/yql/providers/generic/connector/app/server/utils/paging_writer.go @@ -0,0 +1,97 @@ +package utils + +import ( + "fmt" + + "github.com/ydb-platform/ydb/library/go/core/log" + api_service "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/libgo/service" + api_service_protos "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/libgo/service/protos" +) + +type PagingWriter struct { + buffer ColumnarBuffer // row data accumulator + stream api_service.Connector_ReadSplitsServer // outgoing data stream + pagination *api_service_protos.TPagination // settings + rowsReceived uint64 // simple stats + logger log.Logger // annotated logger + operational bool // flag showing if it's ready to return data +} + +func (pw *PagingWriter) AddRow(acceptors []any) error { + if !pw.operational { + return fmt.Errorf("paging writer is not operational") + } + + if err := pw.buffer.AddRow(acceptors); err != nil { + return fmt.Errorf("acceptors to row set: %w", err) + } + + if pw.isEnough() { + if err := pw.flush(); err != nil { + return fmt.Errorf("flush: %w", err) + } + } + + pw.rowsReceived++ + + return nil +} + +func (pw *PagingWriter) isEnough() bool { + // TODO: implement pagination logic, check limits provided by client + return false +} + +func (pw *PagingWriter) flush() error { + if pw.buffer == nil { + return nil + } + + response, err := pw.buffer.ToResponse() + if err != nil { + return fmt.Errorf("build response: %v", err) + } + + response.Error = NewSuccess() + + DumpReadSplitsResponse(pw.logger, response) + + if err := pw.stream.Send(response); err != nil { + return fmt.Errorf("send stream") + } + + pw.buffer.Release() + + pw.buffer = nil + + return nil +} + +func (pw *PagingWriter) Finish() (uint64, error) { + if err := pw.flush(); err != nil { + return 0, fmt.Errorf("flush: %w", err) + } + + pw.operational = false + + return pw.rowsReceived, nil +} + +func NewPagingWriter( + logger log.Logger, + buffer ColumnarBuffer, + stream api_service.Connector_ReadSplitsServer, + pagination *api_service_protos.TPagination, +) (*PagingWriter, error) { + if pagination != nil { + return nil, fmt.Errorf("pagination settings are not supported yet") + } + + return &PagingWriter{ + buffer: buffer, + logger: logger, + stream: stream, + pagination: pagination, + operational: true, + }, nil +} diff --git a/ydb/library/yql/providers/generic/connector/app/server/utils/protobuf.go b/ydb/library/yql/providers/generic/connector/app/server/utils/protobuf.go new file mode 100644 index 0000000000..b6528a96a0 --- /dev/null +++ b/ydb/library/yql/providers/generic/connector/app/server/utils/protobuf.go @@ -0,0 +1,26 @@ +package utils + +import ( + "fmt" + "io" + + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" +) + +func DumpProtoMessageToJSON(msg proto.Message, stream io.Writer) error { + opts := protojson.MarshalOptions{ + Indent: " ", + } + + data, err := opts.Marshal(msg) + if err != nil { + return fmt.Errorf("protojson marshal: %w", err) + } + + if _, err := stream.Write(data); err != nil { + return fmt.Errorf("stream write: %w", err) + } + + return nil +} diff --git a/ydb/library/yql/providers/generic/connector/app/server/utils/read_limiter.go b/ydb/library/yql/providers/generic/connector/app/server/utils/read_limiter.go new file mode 100644 index 0000000000..7b9906f21f --- /dev/null +++ b/ydb/library/yql/providers/generic/connector/app/server/utils/read_limiter.go @@ -0,0 +1,52 @@ +package utils + +import ( + "fmt" + + "github.com/ydb-platform/ydb/library/go/core/log" + "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/app/config" +) + +// ReadLimiter helps to limitate amount of data returned by Connector server in every read request. +// This is generally should be avoided after https://st.yandex-team.ru/YQ-2057 +type ReadLimiter interface { + AddRow() error +} + +type readLimiterNoop struct { +} + +func (rl readLimiterNoop) AddRow() error { return nil } + +type readLimiterRows struct { + rowsRead uint64 + rowsLimit uint64 +} + +func (rl *readLimiterRows) AddRow() error { + if rl.rowsRead == rl.rowsLimit { + return fmt.Errorf("can read only %d line(s) from data source per request: %w", rl.rowsLimit, ErrReadLimitExceeded) + } + + rl.rowsRead++ + + return nil +} + +type ReadLimiterFactory struct { + cfg *config.ServerReadLimit +} + +func (rlf *ReadLimiterFactory) MakeReadLimiter(logger log.Logger) ReadLimiter { + if rlf.cfg == nil { + return readLimiterNoop{} + } + + logger.Warn(fmt.Sprintf("Server will return only first %d lines from the data source", rlf.cfg.GetRows())) + + return &readLimiterRows{rowsRead: 0, rowsLimit: rlf.cfg.GetRows()} +} + +func NewReadLimiterFactory(cfg *config.ServerReadLimit) *ReadLimiterFactory { + return &ReadLimiterFactory{cfg: cfg} +} diff --git a/ydb/library/yql/providers/generic/connector/app/server/utils/select_helpers.go b/ydb/library/yql/providers/generic/connector/app/server/utils/select_helpers.go new file mode 100644 index 0000000000..520bfcbbee --- /dev/null +++ b/ydb/library/yql/providers/generic/connector/app/server/utils/select_helpers.go @@ -0,0 +1,38 @@ +package utils + +import ( + "fmt" + + "github.com/ydb-platform/ydb-go-genproto/protos/Ydb" + api_service_protos "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/libgo/service/protos" +) + +func SelectWhatToYDBTypes(selectWhat *api_service_protos.TSelect_TWhat) ([]*Ydb.Type, error) { + var ydbTypes []*Ydb.Type + + for i, item := range selectWhat.Items { + ydbType := item.GetColumn().GetType() + if ydbType == nil { + return nil, fmt.Errorf("item #%d (%v) is not a column", i, item) + } + + ydbTypes = append(ydbTypes, ydbType) + } + + return ydbTypes, nil +} + +func SelectWhatToYDBColumns(selectWhat *api_service_protos.TSelect_TWhat) ([]*Ydb.Column, error) { + var columns []*Ydb.Column + + for i, item := range selectWhat.Items { + column := item.GetColumn() + if column == nil { + return nil, fmt.Errorf("item #%d (%v) is not a column", i, item) + } + + columns = append(columns, column) + } + + return columns, nil +} diff --git a/ydb/library/yql/providers/generic/connector/app/server/utils/time.go b/ydb/library/yql/providers/generic/connector/app/server/utils/time.go new file mode 100644 index 0000000000..d21310262f --- /dev/null +++ b/ydb/library/yql/providers/generic/connector/app/server/utils/time.go @@ -0,0 +1,42 @@ +package utils + +import ( + "fmt" + "time" +) + +var ( + // According to https://ydb.tech/en/docs/yql/reference/types/primitive#datetime + minYDBTime = time.Date(1970, time.January, 1, 0, 0, 0, 0, time.UTC) + maxYDBTime = time.Date(2106, time.January, 1, 0, 0, 0, 0, time.UTC) +) + +func TimeToYDBDate(t *time.Time) (uint16, error) { + if t.Before(minYDBTime) || t.After(maxYDBTime) { + return 0, fmt.Errorf("convert '%v' to YDB Date: %w", t, ErrValueOutOfTypeBounds) + } + + days := t.Sub(minYDBTime).Hours() / 24 + + return uint16(days), nil +} + +func TimeToYDBDatetime(t *time.Time) (uint32, error) { + if t.Before(minYDBTime) || t.After(maxYDBTime) { + return 0, fmt.Errorf("convert '%v' to YDB Date: %w", t, ErrValueOutOfTypeBounds) + } + + seconds := t.Unix() + + return uint32(seconds), nil +} + +func TimeToYDBTimestamp(t *time.Time) (uint64, error) { + if t.Before(minYDBTime) || t.After(maxYDBTime) { + return 0, fmt.Errorf("convert '%v' to YDB Date: %w", t, ErrValueOutOfTypeBounds) + } + + seconds := t.UnixMicro() + + return uint64(seconds), nil +} diff --git a/ydb/library/yql/providers/generic/connector/app/server/utils/type_mapper.go b/ydb/library/yql/providers/generic/connector/app/server/utils/type_mapper.go new file mode 100644 index 0000000000..e04e8274c7 --- /dev/null +++ b/ydb/library/yql/providers/generic/connector/app/server/utils/type_mapper.go @@ -0,0 +1,15 @@ +package utils + +import ( + "github.com/apache/arrow/go/v13/arrow/array" + "github.com/ydb-platform/ydb-go-genproto/protos/Ydb" +) + +type TypeMapper interface { + SQLTypeToYDBColumn(columnName, typeName string) (*Ydb.Column, error) + YDBTypeToAcceptor(ydbType *Ydb.Type) (any, error) + AddRowToArrowIPCStreaming( + ydbTypes []*Ydb.Type, + acceptors []any, + builders []array.Builder) error +} diff --git a/ydb/library/yql/providers/generic/connector/app/server/utils/ya.make b/ydb/library/yql/providers/generic/connector/app/server/utils/ya.make new file mode 100644 index 0000000000..1b9a99f962 --- /dev/null +++ b/ydb/library/yql/providers/generic/connector/app/server/utils/ya.make @@ -0,0 +1,23 @@ +GO_LIBRARY() + +SRCS( + arrow_helpers.go + columnar_buffer_arrow_ipc_streaming.go + columnar_buffer_factory.go + converters.go + errors.go + endpoint.go + logger.go + paging_writer.go + protobuf.go + read_limiter.go + select_helpers.go + time.go + type_mapper.go +) + +GO_TEST_SRCS( + time_test.go +) + +END() diff --git a/ydb/library/yql/providers/generic/connector/app/server/validate.go b/ydb/library/yql/providers/generic/connector/app/server/validate.go new file mode 100644 index 0000000000..28f717b677 --- /dev/null +++ b/ydb/library/yql/providers/generic/connector/app/server/validate.go @@ -0,0 +1,90 @@ +package server + +import ( + "fmt" + + "github.com/ydb-platform/ydb/library/go/core/log" + api_common "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/api/common" + "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/app/server/utils" + api_service_protos "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/libgo/service/protos" +) + +func ValidateDescribeTableRequest(logger log.Logger, request *api_service_protos.TDescribeTableRequest) error { + if err := validateDataSourceInstance(logger, request.GetDataSourceInstance()); err != nil { + return fmt.Errorf("validate data source instance: %w", err) + } + + if request.GetTable() == "" { + return fmt.Errorf("empty table: %w", utils.ErrInvalidRequest) + } + + return nil +} + +func ValidateListSplitsRequest(logger log.Logger, request *api_service_protos.TListSplitsRequest) error { + if err := validateDataSourceInstance(logger, request.GetDataSourceInstance()); err != nil { + return fmt.Errorf("validate data source instance: %w", err) + } + + if len(request.Selects) == 0 { + return fmt.Errorf("empty select list: %w", utils.ErrInvalidRequest) + } + + for i, slct := range request.Selects { + if err := validateSelect(slct); err != nil { + return fmt.Errorf("validate select %d: %w", i, err) + } + } + + return nil +} + +func ValidateReadSplitsRequest(logger log.Logger, request *api_service_protos.TReadSplitsRequest) error { + if err := validateDataSourceInstance(logger, request.GetDataSourceInstance()); err != nil { + return fmt.Errorf("validate data source instance: %w", err) + } + + return nil +} + +func validateSelect(slct *api_service_protos.TSelect) error { + if slct == nil { + return fmt.Errorf("select is empty: %w", utils.ErrInvalidRequest) + } + + if len(slct.GetWhat().GetItems()) == 0 { + return fmt.Errorf("empty items: %w", utils.ErrInvalidRequest) + } + + return nil +} + +func validateDataSourceInstance(logger log.Logger, dsi *api_common.TDataSourceInstance) error { + if dsi.GetKind() == api_common.EDataSourceKind_DATA_SOURCE_KIND_RESERVED { + return fmt.Errorf("empty type: %w", utils.ErrInvalidRequest) + } + + if dsi.Endpoint == nil { + return fmt.Errorf("endpoint is empty: %w", utils.ErrInvalidRequest) + } + + if dsi.Endpoint.Host == "" { + return fmt.Errorf("endpoint.host is empty: %w", utils.ErrInvalidRequest) + } + + if dsi.Endpoint.Port == 0 { + return fmt.Errorf("endpoint.port is empty: %w", utils.ErrInvalidRequest) + } + + if dsi.Database == "" { + return fmt.Errorf("database field is empty: %w", utils.ErrInvalidRequest) + } + + if dsi.UseTls { + logger.Info("connector will use secure connection to access data source") + } else { + logger.Warn("connector will use insecure connection to access data source") + } + + return nil +} diff --git a/ydb/library/yql/providers/generic/connector/app/server/ya.make b/ydb/library/yql/providers/generic/connector/app/server/ya.make new file mode 100644 index 0000000000..500a6363a2 --- /dev/null +++ b/ydb/library/yql/providers/generic/connector/app/server/ya.make @@ -0,0 +1,17 @@ +GO_LIBRARY() + +SRCS( + cmd.go + config.go + server.go + validate.go +) + +END() + +RECURSE( + clickhouse + postgresql + rdbms + utils +) diff --git a/ydb/library/yql/providers/generic/connector/app/ya.make b/ydb/library/yql/providers/generic/connector/app/ya.make new file mode 100644 index 0000000000..44d6e2b962 --- /dev/null +++ b/ydb/library/yql/providers/generic/connector/app/ya.make @@ -0,0 +1,11 @@ +GO_PROGRAM(yq-connector) + +SRCS(main.go) + +END() + +RECURSE( + client + config + server +) diff --git a/ydb/library/yql/providers/generic/connector/libgo/service/connector.pb.go b/ydb/library/yql/providers/generic/connector/libgo/service/connector.pb.go new file mode 100644 index 0000000000..5732bbb215 --- /dev/null +++ b/ydb/library/yql/providers/generic/connector/libgo/service/connector.pb.go @@ -0,0 +1,119 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.19.0 +// source: ydb/library/yql/providers/generic/connector/api/service/connector.proto + +package service + +import ( + protos "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/libgo/service/protos" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +var File_ydb_library_yql_providers_generic_connector_api_service_connector_proto protoreflect.FileDescriptor + +var file_ydb_library_yql_providers_generic_connector_api_service_connector_proto_rawDesc = []byte{ + 0x0a, 0x47, 0x79, 0x64, 0x62, 0x2f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x2f, 0x79, 0x71, + 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x69, 0x63, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x4e, 0x59, 0x71, 0x6c, 0x2e, + 0x4e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4e, 0x41, 0x70, 0x69, 0x1a, + 0x4e, 0x79, 0x64, 0x62, 0x2f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x2f, 0x79, 0x71, 0x6c, + 0x2f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x69, 0x63, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x61, 0x70, 0x69, + 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2f, + 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x32, + 0xa6, 0x03, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x63, 0x0a, + 0x0a, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x28, 0x2e, 0x4e, 0x59, + 0x71, 0x6c, 0x2e, 0x4e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4e, 0x41, + 0x70, 0x69, 0x2e, 0x54, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e, 0x4e, 0x43, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4e, 0x41, 0x70, 0x69, 0x2e, 0x54, 0x4c, 0x69, + 0x73, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x30, 0x01, 0x12, 0x6a, 0x0a, 0x0d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x12, 0x2b, 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e, 0x4e, 0x43, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4e, 0x41, 0x70, 0x69, 0x2e, 0x54, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x62, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x2c, 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e, 0x4e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x2e, 0x4e, 0x41, 0x70, 0x69, 0x2e, 0x54, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x62, + 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x63, + 0x0a, 0x0a, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x70, 0x6c, 0x69, 0x74, 0x73, 0x12, 0x28, 0x2e, 0x4e, + 0x59, 0x71, 0x6c, 0x2e, 0x4e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4e, + 0x41, 0x70, 0x69, 0x2e, 0x54, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x70, 0x6c, 0x69, 0x74, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e, 0x4e, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4e, 0x41, 0x70, 0x69, 0x2e, 0x54, 0x4c, + 0x69, 0x73, 0x74, 0x53, 0x70, 0x6c, 0x69, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x30, 0x01, 0x12, 0x63, 0x0a, 0x0a, 0x52, 0x65, 0x61, 0x64, 0x53, 0x70, 0x6c, 0x69, 0x74, + 0x73, 0x12, 0x28, 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e, 0x4e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x2e, 0x4e, 0x41, 0x70, 0x69, 0x2e, 0x54, 0x52, 0x65, 0x61, 0x64, 0x53, 0x70, + 0x6c, 0x69, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x4e, 0x59, + 0x71, 0x6c, 0x2e, 0x4e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4e, 0x41, + 0x70, 0x69, 0x2e, 0x54, 0x52, 0x65, 0x61, 0x64, 0x53, 0x70, 0x6c, 0x69, 0x74, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x42, 0x4c, 0x5a, 0x4a, 0x61, 0x2e, 0x79, 0x61, + 0x6e, 0x64, 0x65, 0x78, 0x2d, 0x74, 0x65, 0x61, 0x6d, 0x2e, 0x72, 0x75, 0x2f, 0x79, 0x64, 0x62, + 0x2f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x2f, 0x79, 0x71, 0x6c, 0x2f, 0x70, 0x72, 0x6f, + 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x2f, 0x63, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x6c, 0x69, 0x62, 0x67, 0x6f, 0x2f, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var file_ydb_library_yql_providers_generic_connector_api_service_connector_proto_goTypes = []interface{}{ + (*protos.TListTablesRequest)(nil), // 0: NYql.NConnector.NApi.TListTablesRequest + (*protos.TDescribeTableRequest)(nil), // 1: NYql.NConnector.NApi.TDescribeTableRequest + (*protos.TListSplitsRequest)(nil), // 2: NYql.NConnector.NApi.TListSplitsRequest + (*protos.TReadSplitsRequest)(nil), // 3: NYql.NConnector.NApi.TReadSplitsRequest + (*protos.TListTablesResponse)(nil), // 4: NYql.NConnector.NApi.TListTablesResponse + (*protos.TDescribeTableResponse)(nil), // 5: NYql.NConnector.NApi.TDescribeTableResponse + (*protos.TListSplitsResponse)(nil), // 6: NYql.NConnector.NApi.TListSplitsResponse + (*protos.TReadSplitsResponse)(nil), // 7: NYql.NConnector.NApi.TReadSplitsResponse +} +var file_ydb_library_yql_providers_generic_connector_api_service_connector_proto_depIdxs = []int32{ + 0, // 0: NYql.NConnector.NApi.Connector.ListTables:input_type -> NYql.NConnector.NApi.TListTablesRequest + 1, // 1: NYql.NConnector.NApi.Connector.DescribeTable:input_type -> NYql.NConnector.NApi.TDescribeTableRequest + 2, // 2: NYql.NConnector.NApi.Connector.ListSplits:input_type -> NYql.NConnector.NApi.TListSplitsRequest + 3, // 3: NYql.NConnector.NApi.Connector.ReadSplits:input_type -> NYql.NConnector.NApi.TReadSplitsRequest + 4, // 4: NYql.NConnector.NApi.Connector.ListTables:output_type -> NYql.NConnector.NApi.TListTablesResponse + 5, // 5: NYql.NConnector.NApi.Connector.DescribeTable:output_type -> NYql.NConnector.NApi.TDescribeTableResponse + 6, // 6: NYql.NConnector.NApi.Connector.ListSplits:output_type -> NYql.NConnector.NApi.TListSplitsResponse + 7, // 7: NYql.NConnector.NApi.Connector.ReadSplits:output_type -> NYql.NConnector.NApi.TReadSplitsResponse + 4, // [4:8] is the sub-list for method output_type + 0, // [0:4] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_ydb_library_yql_providers_generic_connector_api_service_connector_proto_init() } +func file_ydb_library_yql_providers_generic_connector_api_service_connector_proto_init() { + if File_ydb_library_yql_providers_generic_connector_api_service_connector_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_ydb_library_yql_providers_generic_connector_api_service_connector_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_ydb_library_yql_providers_generic_connector_api_service_connector_proto_goTypes, + DependencyIndexes: file_ydb_library_yql_providers_generic_connector_api_service_connector_proto_depIdxs, + }.Build() + File_ydb_library_yql_providers_generic_connector_api_service_connector_proto = out.File + file_ydb_library_yql_providers_generic_connector_api_service_connector_proto_rawDesc = nil + file_ydb_library_yql_providers_generic_connector_api_service_connector_proto_goTypes = nil + file_ydb_library_yql_providers_generic_connector_api_service_connector_proto_depIdxs = nil +} diff --git a/ydb/library/yql/providers/generic/connector/libgo/service/connector_grpc.pb.go b/ydb/library/yql/providers/generic/connector/libgo/service/connector_grpc.pb.go new file mode 100644 index 0000000000..18ed5e9d46 --- /dev/null +++ b/ydb/library/yql/providers/generic/connector/libgo/service/connector_grpc.pb.go @@ -0,0 +1,313 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v3.19.0 +// source: ydb/library/yql/providers/generic/connector/api/service/connector.proto + +package service + +import ( + protos "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/libgo/service/protos" + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + Connector_ListTables_FullMethodName = "/NYql.NConnector.NApi.Connector/ListTables" + Connector_DescribeTable_FullMethodName = "/NYql.NConnector.NApi.Connector/DescribeTable" + Connector_ListSplits_FullMethodName = "/NYql.NConnector.NApi.Connector/ListSplits" + Connector_ReadSplits_FullMethodName = "/NYql.NConnector.NApi.Connector/ReadSplits" +) + +// ConnectorClient is the client API for Connector service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ConnectorClient interface { + // ListTables returns the list of tables existing in a particular database. + ListTables(ctx context.Context, in *protos.TListTablesRequest, opts ...grpc.CallOption) (Connector_ListTablesClient, error) + // DescribeTable returns table's schema. + DescribeTable(ctx context.Context, in *protos.TDescribeTableRequest, opts ...grpc.CallOption) (*protos.TDescribeTableResponse, error) + // ListSplits asks Connector to partition the data that are going to be read + // into elementary parts suitable for parallel reading. + ListSplits(ctx context.Context, in *protos.TListSplitsRequest, opts ...grpc.CallOption) (Connector_ListSplitsClient, error) + // ReadSplits reads data associated with splits. + ReadSplits(ctx context.Context, in *protos.TReadSplitsRequest, opts ...grpc.CallOption) (Connector_ReadSplitsClient, error) +} + +type connectorClient struct { + cc grpc.ClientConnInterface +} + +func NewConnectorClient(cc grpc.ClientConnInterface) ConnectorClient { + return &connectorClient{cc} +} + +func (c *connectorClient) ListTables(ctx context.Context, in *protos.TListTablesRequest, opts ...grpc.CallOption) (Connector_ListTablesClient, error) { + stream, err := c.cc.NewStream(ctx, &Connector_ServiceDesc.Streams[0], Connector_ListTables_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &connectorListTablesClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Connector_ListTablesClient interface { + Recv() (*protos.TListTablesResponse, error) + grpc.ClientStream +} + +type connectorListTablesClient struct { + grpc.ClientStream +} + +func (x *connectorListTablesClient) Recv() (*protos.TListTablesResponse, error) { + m := new(protos.TListTablesResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *connectorClient) DescribeTable(ctx context.Context, in *protos.TDescribeTableRequest, opts ...grpc.CallOption) (*protos.TDescribeTableResponse, error) { + out := new(protos.TDescribeTableResponse) + err := c.cc.Invoke(ctx, Connector_DescribeTable_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *connectorClient) ListSplits(ctx context.Context, in *protos.TListSplitsRequest, opts ...grpc.CallOption) (Connector_ListSplitsClient, error) { + stream, err := c.cc.NewStream(ctx, &Connector_ServiceDesc.Streams[1], Connector_ListSplits_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &connectorListSplitsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Connector_ListSplitsClient interface { + Recv() (*protos.TListSplitsResponse, error) + grpc.ClientStream +} + +type connectorListSplitsClient struct { + grpc.ClientStream +} + +func (x *connectorListSplitsClient) Recv() (*protos.TListSplitsResponse, error) { + m := new(protos.TListSplitsResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *connectorClient) ReadSplits(ctx context.Context, in *protos.TReadSplitsRequest, opts ...grpc.CallOption) (Connector_ReadSplitsClient, error) { + stream, err := c.cc.NewStream(ctx, &Connector_ServiceDesc.Streams[2], Connector_ReadSplits_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &connectorReadSplitsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Connector_ReadSplitsClient interface { + Recv() (*protos.TReadSplitsResponse, error) + grpc.ClientStream +} + +type connectorReadSplitsClient struct { + grpc.ClientStream +} + +func (x *connectorReadSplitsClient) Recv() (*protos.TReadSplitsResponse, error) { + m := new(protos.TReadSplitsResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// ConnectorServer is the server API for Connector service. +// All implementations must embed UnimplementedConnectorServer +// for forward compatibility +type ConnectorServer interface { + // ListTables returns the list of tables existing in a particular database. + ListTables(*protos.TListTablesRequest, Connector_ListTablesServer) error + // DescribeTable returns table's schema. + DescribeTable(context.Context, *protos.TDescribeTableRequest) (*protos.TDescribeTableResponse, error) + // ListSplits asks Connector to partition the data that are going to be read + // into elementary parts suitable for parallel reading. + ListSplits(*protos.TListSplitsRequest, Connector_ListSplitsServer) error + // ReadSplits reads data associated with splits. + ReadSplits(*protos.TReadSplitsRequest, Connector_ReadSplitsServer) error + mustEmbedUnimplementedConnectorServer() +} + +// UnimplementedConnectorServer must be embedded to have forward compatible implementations. +type UnimplementedConnectorServer struct { +} + +func (UnimplementedConnectorServer) ListTables(*protos.TListTablesRequest, Connector_ListTablesServer) error { + return status.Errorf(codes.Unimplemented, "method ListTables not implemented") +} +func (UnimplementedConnectorServer) DescribeTable(context.Context, *protos.TDescribeTableRequest) (*protos.TDescribeTableResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DescribeTable not implemented") +} +func (UnimplementedConnectorServer) ListSplits(*protos.TListSplitsRequest, Connector_ListSplitsServer) error { + return status.Errorf(codes.Unimplemented, "method ListSplits not implemented") +} +func (UnimplementedConnectorServer) ReadSplits(*protos.TReadSplitsRequest, Connector_ReadSplitsServer) error { + return status.Errorf(codes.Unimplemented, "method ReadSplits not implemented") +} +func (UnimplementedConnectorServer) mustEmbedUnimplementedConnectorServer() {} + +// UnsafeConnectorServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ConnectorServer will +// result in compilation errors. +type UnsafeConnectorServer interface { + mustEmbedUnimplementedConnectorServer() +} + +func RegisterConnectorServer(s grpc.ServiceRegistrar, srv ConnectorServer) { + s.RegisterService(&Connector_ServiceDesc, srv) +} + +func _Connector_ListTables_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(protos.TListTablesRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ConnectorServer).ListTables(m, &connectorListTablesServer{stream}) +} + +type Connector_ListTablesServer interface { + Send(*protos.TListTablesResponse) error + grpc.ServerStream +} + +type connectorListTablesServer struct { + grpc.ServerStream +} + +func (x *connectorListTablesServer) Send(m *protos.TListTablesResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Connector_DescribeTable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(protos.TDescribeTableRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ConnectorServer).DescribeTable(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Connector_DescribeTable_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ConnectorServer).DescribeTable(ctx, req.(*protos.TDescribeTableRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Connector_ListSplits_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(protos.TListSplitsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ConnectorServer).ListSplits(m, &connectorListSplitsServer{stream}) +} + +type Connector_ListSplitsServer interface { + Send(*protos.TListSplitsResponse) error + grpc.ServerStream +} + +type connectorListSplitsServer struct { + grpc.ServerStream +} + +func (x *connectorListSplitsServer) Send(m *protos.TListSplitsResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Connector_ReadSplits_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(protos.TReadSplitsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ConnectorServer).ReadSplits(m, &connectorReadSplitsServer{stream}) +} + +type Connector_ReadSplitsServer interface { + Send(*protos.TReadSplitsResponse) error + grpc.ServerStream +} + +type connectorReadSplitsServer struct { + grpc.ServerStream +} + +func (x *connectorReadSplitsServer) Send(m *protos.TReadSplitsResponse) error { + return x.ServerStream.SendMsg(m) +} + +// Connector_ServiceDesc is the grpc.ServiceDesc for Connector service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Connector_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "NYql.NConnector.NApi.Connector", + HandlerType: (*ConnectorServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "DescribeTable", + Handler: _Connector_DescribeTable_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "ListTables", + Handler: _Connector_ListTables_Handler, + ServerStreams: true, + }, + { + StreamName: "ListSplits", + Handler: _Connector_ListSplits_Handler, + ServerStreams: true, + }, + { + StreamName: "ReadSplits", + Handler: _Connector_ReadSplits_Handler, + ServerStreams: true, + }, + }, + Metadata: "ydb/library/yql/providers/generic/connector/api/service/connector.proto", +} diff --git a/ydb/library/yql/providers/generic/connector/libgo/service/protos/connector.pb.go b/ydb/library/yql/providers/generic/connector/libgo/service/protos/connector.pb.go new file mode 100644 index 0000000000..c0725c0b22 --- /dev/null +++ b/ydb/library/yql/providers/generic/connector/libgo/service/protos/connector.pb.go @@ -0,0 +1,3439 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.19.0 +// source: ydb/library/yql/providers/generic/connector/api/service/protos/connector.proto + +package protos + +import ( + common "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/api/common" + Ydb "github.com/ydb-platform/ydb-go-genproto/protos/Ydb" + Ydb_Issue "github.com/ydb-platform/ydb-go-genproto/protos/Ydb_Issue" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type TReadSplitsRequest_EMode int32 + +const ( + TReadSplitsRequest_MODE_RESERVED TReadSplitsRequest_EMode = 0 + // Connector will read splits in a single thread one by one. + // The data will be returned in the order corresponding to the order of requested splits. + TReadSplitsRequest_ORDERED TReadSplitsRequest_EMode = 1 + // Connector may read different splits concurrently and send the split fragments to the response stream + // as soon as the data is obtained from the data source. Thus the stream is multiplexed between splits. + TReadSplitsRequest_UNORDERED TReadSplitsRequest_EMode = 2 +) + +// Enum value maps for TReadSplitsRequest_EMode. +var ( + TReadSplitsRequest_EMode_name = map[int32]string{ + 0: "MODE_RESERVED", + 1: "ORDERED", + 2: "UNORDERED", + } + TReadSplitsRequest_EMode_value = map[string]int32{ + "MODE_RESERVED": 0, + "ORDERED": 1, + "UNORDERED": 2, + } +) + +func (x TReadSplitsRequest_EMode) Enum() *TReadSplitsRequest_EMode { + p := new(TReadSplitsRequest_EMode) + *p = x + return p +} + +func (x TReadSplitsRequest_EMode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (TReadSplitsRequest_EMode) Descriptor() protoreflect.EnumDescriptor { + return file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_enumTypes[0].Descriptor() +} + +func (TReadSplitsRequest_EMode) Type() protoreflect.EnumType { + return &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_enumTypes[0] +} + +func (x TReadSplitsRequest_EMode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use TReadSplitsRequest_EMode.Descriptor instead. +func (TReadSplitsRequest_EMode) EnumDescriptor() ([]byte, []int) { + return file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_rawDescGZIP(), []int{9, 0} +} + +type TReadSplitsRequest_EFormat int32 + +const ( + TReadSplitsRequest_FORMAT_RESERVED TReadSplitsRequest_EFormat = 0 + // ColumnSet is a simple representation of columnar data. + // Do not use in production. + TReadSplitsRequest_COLUMN_SET TReadSplitsRequest_EFormat = 1 + // Arrow IPC Streaming format: + // https://arrow.apache.org/docs/format/Columnar.html#ipc-streaming-format + TReadSplitsRequest_ARROW_IPC_STREAMING TReadSplitsRequest_EFormat = 2 +) + +// Enum value maps for TReadSplitsRequest_EFormat. +var ( + TReadSplitsRequest_EFormat_name = map[int32]string{ + 0: "FORMAT_RESERVED", + 1: "COLUMN_SET", + 2: "ARROW_IPC_STREAMING", + } + TReadSplitsRequest_EFormat_value = map[string]int32{ + "FORMAT_RESERVED": 0, + "COLUMN_SET": 1, + "ARROW_IPC_STREAMING": 2, + } +) + +func (x TReadSplitsRequest_EFormat) Enum() *TReadSplitsRequest_EFormat { + p := new(TReadSplitsRequest_EFormat) + *p = x + return p +} + +func (x TReadSplitsRequest_EFormat) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (TReadSplitsRequest_EFormat) Descriptor() protoreflect.EnumDescriptor { + return file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_enumTypes[1].Descriptor() +} + +func (TReadSplitsRequest_EFormat) Type() protoreflect.EnumType { + return &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_enumTypes[1] +} + +func (x TReadSplitsRequest_EFormat) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use TReadSplitsRequest_EFormat.Descriptor instead. +func (TReadSplitsRequest_EFormat) EnumDescriptor() ([]byte, []int) { + return file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_rawDescGZIP(), []int{9, 1} +} + +// An operation code. +type TFilter_TComparison_TBinary_EOperation int32 + +const ( + TFilter_TComparison_TBinary_RESERVED TFilter_TComparison_TBinary_EOperation = 0 + TFilter_TComparison_TBinary_L TFilter_TComparison_TBinary_EOperation = 1 // renders to "col < value" + TFilter_TComparison_TBinary_LE TFilter_TComparison_TBinary_EOperation = 2 // renders to "col <= value" + TFilter_TComparison_TBinary_EQ TFilter_TComparison_TBinary_EOperation = 3 // renders to "col = value" + TFilter_TComparison_TBinary_NE TFilter_TComparison_TBinary_EOperation = 4 // renders to "col != value" + TFilter_TComparison_TBinary_GE TFilter_TComparison_TBinary_EOperation = 5 // renders to "col >= value" + TFilter_TComparison_TBinary_G TFilter_TComparison_TBinary_EOperation = 6 // renders to "col > value" +) + +// Enum value maps for TFilter_TComparison_TBinary_EOperation. +var ( + TFilter_TComparison_TBinary_EOperation_name = map[int32]string{ + 0: "RESERVED", + 1: "L", + 2: "LE", + 3: "EQ", + 4: "NE", + 5: "GE", + 6: "G", + } + TFilter_TComparison_TBinary_EOperation_value = map[string]int32{ + "RESERVED": 0, + "L": 1, + "LE": 2, + "EQ": 3, + "NE": 4, + "GE": 5, + "G": 6, + } +) + +func (x TFilter_TComparison_TBinary_EOperation) Enum() *TFilter_TComparison_TBinary_EOperation { + p := new(TFilter_TComparison_TBinary_EOperation) + *p = x + return p +} + +func (x TFilter_TComparison_TBinary_EOperation) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (TFilter_TComparison_TBinary_EOperation) Descriptor() protoreflect.EnumDescriptor { + return file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_enumTypes[2].Descriptor() +} + +func (TFilter_TComparison_TBinary_EOperation) Type() protoreflect.EnumType { + return &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_enumTypes[2] +} + +func (x TFilter_TComparison_TBinary_EOperation) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use TFilter_TComparison_TBinary_EOperation.Descriptor instead. +func (TFilter_TComparison_TBinary_EOperation) EnumDescriptor() ([]byte, []int) { + return file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_rawDescGZIP(), []int{13, 0, 0, 0} +} + +// TListTablesRequest requests the list of tables in a particular database of the data source +type TListTablesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Data source instance to connect + DataSourceInstance *common.TDataSourceInstance `protobuf:"bytes,1,opt,name=data_source_instance,json=dataSourceInstance,proto3" json:"data_source_instance,omitempty"` + // There may be a huge number of tables in the data source, + // and here are ways to extract only necessary ones: + // + // Types that are assignable to Filtering: + // + // *TListTablesRequest_Pattern + Filtering isTListTablesRequest_Filtering `protobuf_oneof:"filtering"` +} + +func (x *TListTablesRequest) Reset() { + *x = TListTablesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TListTablesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TListTablesRequest) ProtoMessage() {} + +func (x *TListTablesRequest) ProtoReflect() protoreflect.Message { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TListTablesRequest.ProtoReflect.Descriptor instead. +func (*TListTablesRequest) Descriptor() ([]byte, []int) { + return file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_rawDescGZIP(), []int{0} +} + +func (x *TListTablesRequest) GetDataSourceInstance() *common.TDataSourceInstance { + if x != nil { + return x.DataSourceInstance + } + return nil +} + +func (m *TListTablesRequest) GetFiltering() isTListTablesRequest_Filtering { + if m != nil { + return m.Filtering + } + return nil +} + +func (x *TListTablesRequest) GetPattern() string { + if x, ok := x.GetFiltering().(*TListTablesRequest_Pattern); ok { + return x.Pattern + } + return "" +} + +type isTListTablesRequest_Filtering interface { + isTListTablesRequest_Filtering() +} + +type TListTablesRequest_Pattern struct { + // Regexp to filter table names + Pattern string `protobuf:"bytes,2,opt,name=pattern,proto3,oneof"` +} + +func (*TListTablesRequest_Pattern) isTListTablesRequest_Filtering() {} + +// TListTablesResponse returns the list of tables in a particular database of the data source +type TListTablesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Table names list + Tables []string `protobuf:"bytes,1,rep,name=tables,proto3" json:"tables,omitempty"` + // Call result + Error *TError `protobuf:"bytes,100,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *TListTablesResponse) Reset() { + *x = TListTablesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TListTablesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TListTablesResponse) ProtoMessage() {} + +func (x *TListTablesResponse) ProtoReflect() protoreflect.Message { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TListTablesResponse.ProtoReflect.Descriptor instead. +func (*TListTablesResponse) Descriptor() ([]byte, []int) { + return file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_rawDescGZIP(), []int{1} +} + +func (x *TListTablesResponse) GetTables() []string { + if x != nil { + return x.Tables + } + return nil +} + +func (x *TListTablesResponse) GetError() *TError { + if x != nil { + return x.Error + } + return nil +} + +// TDescribeTableRequest requests table metadata +type TDescribeTableRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Data source instance to connect + DataSourceInstance *common.TDataSourceInstance `protobuf:"bytes,1,opt,name=data_source_instance,json=dataSourceInstance,proto3" json:"data_source_instance,omitempty"` + // Table to describe + Table string `protobuf:"bytes,2,opt,name=table,proto3" json:"table,omitempty"` +} + +func (x *TDescribeTableRequest) Reset() { + *x = TDescribeTableRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TDescribeTableRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TDescribeTableRequest) ProtoMessage() {} + +func (x *TDescribeTableRequest) ProtoReflect() protoreflect.Message { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TDescribeTableRequest.ProtoReflect.Descriptor instead. +func (*TDescribeTableRequest) Descriptor() ([]byte, []int) { + return file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_rawDescGZIP(), []int{2} +} + +func (x *TDescribeTableRequest) GetDataSourceInstance() *common.TDataSourceInstance { + if x != nil { + return x.DataSourceInstance + } + return nil +} + +func (x *TDescribeTableRequest) GetTable() string { + if x != nil { + return x.Table + } + return "" +} + +// TDescribeTableResponse returns table metadata +type TDescribeTableResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The whole schema of a table + Schema *TSchema `protobuf:"bytes,1,opt,name=schema,proto3" json:"schema,omitempty"` + // Call result + Error *TError `protobuf:"bytes,100,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *TDescribeTableResponse) Reset() { + *x = TDescribeTableResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TDescribeTableResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TDescribeTableResponse) ProtoMessage() {} + +func (x *TDescribeTableResponse) ProtoReflect() protoreflect.Message { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TDescribeTableResponse.ProtoReflect.Descriptor instead. +func (*TDescribeTableResponse) Descriptor() ([]byte, []int) { + return file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_rawDescGZIP(), []int{3} +} + +func (x *TDescribeTableResponse) GetSchema() *TSchema { + if x != nil { + return x.Schema + } + return nil +} + +func (x *TDescribeTableResponse) GetError() *TError { + if x != nil { + return x.Error + } + return nil +} + +// TSchema represents the schema of the table +type TSchema struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Columns with YQL types + Columns []*Ydb.Column `protobuf:"bytes,1,rep,name=columns,proto3" json:"columns,omitempty"` // TODO: optional metadata? +} + +func (x *TSchema) Reset() { + *x = TSchema{} + if protoimpl.UnsafeEnabled { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TSchema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TSchema) ProtoMessage() {} + +func (x *TSchema) ProtoReflect() protoreflect.Message { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TSchema.ProtoReflect.Descriptor instead. +func (*TSchema) Descriptor() ([]byte, []int) { + return file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_rawDescGZIP(), []int{4} +} + +func (x *TSchema) GetColumns() []*Ydb.Column { + if x != nil { + return x.Columns + } + return nil +} + +// TListSplitRequest asks Connector to split the requested data into elementary parts. +type TListSplitsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Data source instance to connect + DataSourceInstance *common.TDataSourceInstance `protobuf:"bytes,1,opt,name=data_source_instance,json=dataSourceInstance,proto3" json:"data_source_instance,omitempty"` + // YQ engine may want to read data from different tables simultaneously. + // Perhaps Connector will provide consistency guarantees across the tables some day. + Selects []*TSelect `protobuf:"bytes,2,rep,name=selects,proto3" json:"selects,omitempty"` + // Defines the number of splits (and, as a consequence, affects the size of the split). + // If you don't want splitting, set 1. + MaxSplitCount uint32 `protobuf:"varint,3,opt,name=max_split_count,json=maxSplitCount,proto3" json:"max_split_count,omitempty"` + // Connector will try to divide the data into the splits of this size, + // but the exact match is not guaranteed. + // Also this setting can be overridden by max_split_count. + SplitSize uint64 `protobuf:"varint,4,opt,name=split_size,json=splitSize,proto3" json:"split_size,omitempty"` + // Sometimes YQ doesn't know the exact size of the data set, + // so it asks Connector to split the data into the splits of $split_size, + // and the $max_split_count = MaxUINT32. + // But if the data is too large, and too many splits will be generated, + // this may exceed the memory available for YQ. + // In such case, it's better to fail fast. This limit helps to implement it: + SplitNumberLimit uint64 `protobuf:"varint,5,opt,name=split_number_limit,json=splitNumberLimit,proto3" json:"split_number_limit,omitempty"` +} + +func (x *TListSplitsRequest) Reset() { + *x = TListSplitsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TListSplitsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TListSplitsRequest) ProtoMessage() {} + +func (x *TListSplitsRequest) ProtoReflect() protoreflect.Message { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TListSplitsRequest.ProtoReflect.Descriptor instead. +func (*TListSplitsRequest) Descriptor() ([]byte, []int) { + return file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_rawDescGZIP(), []int{5} +} + +func (x *TListSplitsRequest) GetDataSourceInstance() *common.TDataSourceInstance { + if x != nil { + return x.DataSourceInstance + } + return nil +} + +func (x *TListSplitsRequest) GetSelects() []*TSelect { + if x != nil { + return x.Selects + } + return nil +} + +func (x *TListSplitsRequest) GetMaxSplitCount() uint32 { + if x != nil { + return x.MaxSplitCount + } + return 0 +} + +func (x *TListSplitsRequest) GetSplitSize() uint64 { + if x != nil { + return x.SplitSize + } + return 0 +} + +func (x *TListSplitsRequest) GetSplitNumberLimit() uint64 { + if x != nil { + return x.SplitNumberLimit + } + return 0 +} + +// TListSplitResponse returns the list of splits for a particular set of table partitions +type TListSplitsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // the list of splits for concurrent reading + Splits []*TSplit `protobuf:"bytes,1,rep,name=splits,proto3" json:"splits,omitempty"` + // Call result + Error *TError `protobuf:"bytes,100,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *TListSplitsResponse) Reset() { + *x = TListSplitsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TListSplitsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TListSplitsResponse) ProtoMessage() {} + +func (x *TListSplitsResponse) ProtoReflect() protoreflect.Message { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TListSplitsResponse.ProtoReflect.Descriptor instead. +func (*TListSplitsResponse) Descriptor() ([]byte, []int) { + return file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_rawDescGZIP(), []int{6} +} + +func (x *TListSplitsResponse) GetSplits() []*TSplit { + if x != nil { + return x.Splits + } + return nil +} + +func (x *TListSplitsResponse) GetError() *TError { + if x != nil { + return x.Error + } + return nil +} + +// Select describes what to read from the data source. +// +// In RDBMS systems this call internally transforms into SQL expression using this template: +// SELECT $what +// FROM $from +// WHERE $filter +// LIMIT $limit [OFFSET $offset] +// TODO: support JOIN, ORDER BY, GROUP BY +type TSelect struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Data source instance to connect + DataSourceInstance *common.TDataSourceInstance `protobuf:"bytes,1,opt,name=data_source_instance,json=dataSourceInstance,proto3" json:"data_source_instance,omitempty"` + // Transforms into SELECT $what. + What *TSelect_TWhat `protobuf:"bytes,2,opt,name=what,proto3" json:"what,omitempty"` + // Transforms into FROM $from + From *TSelect_TFrom `protobuf:"bytes,3,opt,name=from,proto3" json:"from,omitempty"` + // Transforms into WHERE $filter. + // Optional field. + Where *TSelect_TWhere `protobuf:"bytes,4,opt,name=where,proto3" json:"where,omitempty"` + // Transforms into LIMIT $limit [OFFSET $offset]. + // Optional field. + Limit *TSelect_TLimit `protobuf:"bytes,5,opt,name=limit,proto3" json:"limit,omitempty"` +} + +func (x *TSelect) Reset() { + *x = TSelect{} + if protoimpl.UnsafeEnabled { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TSelect) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TSelect) ProtoMessage() {} + +func (x *TSelect) ProtoReflect() protoreflect.Message { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TSelect.ProtoReflect.Descriptor instead. +func (*TSelect) Descriptor() ([]byte, []int) { + return file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_rawDescGZIP(), []int{7} +} + +func (x *TSelect) GetDataSourceInstance() *common.TDataSourceInstance { + if x != nil { + return x.DataSourceInstance + } + return nil +} + +func (x *TSelect) GetWhat() *TSelect_TWhat { + if x != nil { + return x.What + } + return nil +} + +func (x *TSelect) GetFrom() *TSelect_TFrom { + if x != nil { + return x.From + } + return nil +} + +func (x *TSelect) GetWhere() *TSelect_TWhere { + if x != nil { + return x.Where + } + return nil +} + +func (x *TSelect) GetLimit() *TSelect_TLimit { + if x != nil { + return x.Limit + } + return nil +} + +// Split is a certain part of a table. In general, it should be much smaller than a partition. +// It also represents a unit of a parallel work for YQ engine. +type TSplit struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Every split contains the description of SELECT it was generated for. + Select *TSelect `protobuf:"bytes,1,opt,name=select,proto3" json:"select,omitempty"` + // Types that are assignable to Payload: + // + // *TSplit_Description + Payload isTSplit_Payload `protobuf_oneof:"payload"` +} + +func (x *TSplit) Reset() { + *x = TSplit{} + if protoimpl.UnsafeEnabled { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TSplit) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TSplit) ProtoMessage() {} + +func (x *TSplit) ProtoReflect() protoreflect.Message { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TSplit.ProtoReflect.Descriptor instead. +func (*TSplit) Descriptor() ([]byte, []int) { + return file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_rawDescGZIP(), []int{8} +} + +func (x *TSplit) GetSelect() *TSelect { + if x != nil { + return x.Select + } + return nil +} + +func (m *TSplit) GetPayload() isTSplit_Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (x *TSplit) GetDescription() []byte { + if x, ok := x.GetPayload().(*TSplit_Description); ok { + return x.Description + } + return nil +} + +type isTSplit_Payload interface { + isTSplit_Payload() +} + +type TSplit_Description struct { + // Different data sources may use different ways to describe a split, + // and we don't want YQ to dig into its internals (at least now), + // so we make the description opaque for YQ. + Description []byte `protobuf:"bytes,2,opt,name=description,proto3,oneof"` +} + +func (*TSplit_Description) isTSplit_Payload() {} + +// ReadDataRequest reads the data associated with a particular table split. +type TReadSplitsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Data source instance to connect + DataSourceInstance *common.TDataSourceInstance `protobuf:"bytes,1,opt,name=data_source_instance,json=dataSourceInstance,proto3" json:"data_source_instance,omitempty"` + // Splits that YQ engine would like to read. + Splits []*TSplit `protobuf:"bytes,2,rep,name=splits,proto3" json:"splits,omitempty"` + // Determines the mode of data extraction + Mode TReadSplitsRequest_EMode `protobuf:"varint,3,opt,name=mode,proto3,enum=NYql.NConnector.NApi.TReadSplitsRequest_EMode" json:"mode,omitempty"` + // Determines the format of data representation + Format TReadSplitsRequest_EFormat `protobuf:"varint,4,opt,name=format,proto3,enum=NYql.NConnector.NApi.TReadSplitsRequest_EFormat" json:"format,omitempty"` + // Pagination helps to manage the process of fragmentation data stream from the data source into the fragments. + // If empty, the whole result will be sent back in a single message. + Pagination *TPagination `protobuf:"bytes,5,opt,name=pagination,proto3" json:"pagination,omitempty"` + // Specifies the location of split from where to start reading. + // If stream has been recently interrupted, YQ may retry reading the split from the interrupted block + // instead of reading the split from scratch. + // If empty, the connector will return the split data from the very beginning. + Continuation *TContinuation `protobuf:"bytes,6,opt,name=continuation,proto3" json:"continuation,omitempty"` +} + +func (x *TReadSplitsRequest) Reset() { + *x = TReadSplitsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TReadSplitsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TReadSplitsRequest) ProtoMessage() {} + +func (x *TReadSplitsRequest) ProtoReflect() protoreflect.Message { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TReadSplitsRequest.ProtoReflect.Descriptor instead. +func (*TReadSplitsRequest) Descriptor() ([]byte, []int) { + return file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_rawDescGZIP(), []int{9} +} + +func (x *TReadSplitsRequest) GetDataSourceInstance() *common.TDataSourceInstance { + if x != nil { + return x.DataSourceInstance + } + return nil +} + +func (x *TReadSplitsRequest) GetSplits() []*TSplit { + if x != nil { + return x.Splits + } + return nil +} + +func (x *TReadSplitsRequest) GetMode() TReadSplitsRequest_EMode { + if x != nil { + return x.Mode + } + return TReadSplitsRequest_MODE_RESERVED +} + +func (x *TReadSplitsRequest) GetFormat() TReadSplitsRequest_EFormat { + if x != nil { + return x.Format + } + return TReadSplitsRequest_FORMAT_RESERVED +} + +func (x *TReadSplitsRequest) GetPagination() *TPagination { + if x != nil { + return x.Pagination + } + return nil +} + +func (x *TReadSplitsRequest) GetContinuation() *TContinuation { + if x != nil { + return x.Continuation + } + return nil +} + +// ReadDataResponse returns the data corresponding to a particular split +type TReadSplitsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // There may be various formats to represent data + // + // Types that are assignable to Payload: + // + // *TReadSplitsResponse_ColumnSet + // *TReadSplitsResponse_ArrowIpcStreaming + Payload isTReadSplitsResponse_Payload `protobuf_oneof:"payload"` + // Since multiple splits can be read within one request, it's important to + // match the received data with the requested split. + SplitIndexNumber uint32 `protobuf:"varint,3,opt,name=split_index_number,json=splitIndexNumber,proto3" json:"split_index_number,omitempty"` + // Specifies the location where the next block starts. + // If stream has been interrupted, YQ may retry reading using the Continuation message + // received for the last time. + Continuation *TContinuation `protobuf:"bytes,4,opt,name=continuation,proto3" json:"continuation,omitempty"` + // Call result + Error *TError `protobuf:"bytes,100,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *TReadSplitsResponse) Reset() { + *x = TReadSplitsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TReadSplitsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TReadSplitsResponse) ProtoMessage() {} + +func (x *TReadSplitsResponse) ProtoReflect() protoreflect.Message { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TReadSplitsResponse.ProtoReflect.Descriptor instead. +func (*TReadSplitsResponse) Descriptor() ([]byte, []int) { + return file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_rawDescGZIP(), []int{10} +} + +func (m *TReadSplitsResponse) GetPayload() isTReadSplitsResponse_Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (x *TReadSplitsResponse) GetColumnSet() *TReadSplitsResponse_TColumnSet { + if x, ok := x.GetPayload().(*TReadSplitsResponse_ColumnSet); ok { + return x.ColumnSet + } + return nil +} + +func (x *TReadSplitsResponse) GetArrowIpcStreaming() []byte { + if x, ok := x.GetPayload().(*TReadSplitsResponse_ArrowIpcStreaming); ok { + return x.ArrowIpcStreaming + } + return nil +} + +func (x *TReadSplitsResponse) GetSplitIndexNumber() uint32 { + if x != nil { + return x.SplitIndexNumber + } + return 0 +} + +func (x *TReadSplitsResponse) GetContinuation() *TContinuation { + if x != nil { + return x.Continuation + } + return nil +} + +func (x *TReadSplitsResponse) GetError() *TError { + if x != nil { + return x.Error + } + return nil +} + +type isTReadSplitsResponse_Payload interface { + isTReadSplitsResponse_Payload() +} + +type TReadSplitsResponse_ColumnSet struct { + // Columnar data in protobuf format with YDB types. + // Use it only for debugging, don't use in production. + ColumnSet *TReadSplitsResponse_TColumnSet `protobuf:"bytes,1,opt,name=column_set,json=columnSet,proto3,oneof"` +} + +type TReadSplitsResponse_ArrowIpcStreaming struct { + // Data in Arrow IPC streaming format. + ArrowIpcStreaming []byte `protobuf:"bytes,2,opt,name=arrow_ipc_streaming,json=arrowIpcStreaming,proto3,oneof"` +} + +func (*TReadSplitsResponse_ColumnSet) isTReadSplitsResponse_Payload() {} + +func (*TReadSplitsResponse_ArrowIpcStreaming) isTReadSplitsResponse_Payload() {} + +// Pagination describes how to split data stream from data source into fragments +// in order to return them as separate GRPC messages to YQ. +type TPagination struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // In the RDBMS the default pagination method is LIMIT/OFFSET. + // Instead of full table scan: + // + // # SELECT col1 FROM table + // + // connector may internally invoke multiple requests to the data: + // + // SELECT col1 FROM table LIMIT $rows // return first $rows to YQ + // SELECT col1 FROM table LIMIT $rows OFFSET $rows // return next $rows to YQ + // SELECT col1 FROM table LIMIT $rows OFFSET $rows // (...) + // SELECT col1 FROM table LIMIT $rows OFFSET $rows + // (...) + // + // Some RDBMS may support cursors as the more advanced way of pagination. + // + // BEGIN; + // DECLARE cur CURSOR FOR SELECT col1 FROM table; + // FETCH $rows FROM cur; // return first $rows to YQ + // FETCH $rows FROM cur; // return next $rows to YQ + // FETCH $rows FROM cur; // (...) + // (...) + // COMMIT; + // + // The choice of pagination method is the responsibility of the connector (at least now), + // but the engine can specify how many rows to return in each message of the stream. + Rows uint64 `protobuf:"varint,1,opt,name=rows,proto3" json:"rows,omitempty"` + // Memory limitation helps to control the effective size of a block returned in every response. + // It may override the previous limit. + Bytes uint64 `protobuf:"varint,2,opt,name=bytes,proto3" json:"bytes,omitempty"` +} + +func (x *TPagination) Reset() { + *x = TPagination{} + if protoimpl.UnsafeEnabled { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TPagination) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TPagination) ProtoMessage() {} + +func (x *TPagination) ProtoReflect() protoreflect.Message { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TPagination.ProtoReflect.Descriptor instead. +func (*TPagination) Descriptor() ([]byte, []int) { + return file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_rawDescGZIP(), []int{11} +} + +func (x *TPagination) GetRows() uint64 { + if x != nil { + return x.Rows + } + return 0 +} + +func (x *TPagination) GetBytes() uint64 { + if x != nil { + return x.Bytes + } + return 0 +} + +// Continuation is a special type useful for the request retry. +// In case if split reading was interrupted, +// the engine does not have to read all the split data from the very beginning, +// it can specify the location from where it wants to reread the data instead. +type TContinuation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Payload: + // + // *TContinuation_Description + Payload isTContinuation_Payload `protobuf_oneof:"payload"` +} + +func (x *TContinuation) Reset() { + *x = TContinuation{} + if protoimpl.UnsafeEnabled { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TContinuation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TContinuation) ProtoMessage() {} + +func (x *TContinuation) ProtoReflect() protoreflect.Message { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TContinuation.ProtoReflect.Descriptor instead. +func (*TContinuation) Descriptor() ([]byte, []int) { + return file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_rawDescGZIP(), []int{12} +} + +func (m *TContinuation) GetPayload() isTContinuation_Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (x *TContinuation) GetDescription() []byte { + if x, ok := x.GetPayload().(*TContinuation_Description); ok { + return x.Description + } + return nil +} + +type isTContinuation_Payload interface { + isTContinuation_Payload() +} + +type TContinuation_Description struct { + // In general description should be opaque to YQ. + Description []byte `protobuf:"bytes,1,opt,name=description,proto3,oneof"` +} + +func (*TContinuation_Description) isTContinuation_Payload() {} + +// Filter - a special type to describe a constraint (or a set of constraints) applied to SQL expression: +// SELECT $columns FROM $table WHERE $filter. +// It can be also used for the purpose of split pruning. +type TFilter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Payload: + // + // *TFilter_Comparison + // *TFilter_Conjunction + // *TFilter_Disjunction + // *TFilter_Negation + // *TFilter_SubExpr + Payload isTFilter_Payload `protobuf_oneof:"payload"` +} + +func (x *TFilter) Reset() { + *x = TFilter{} + if protoimpl.UnsafeEnabled { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TFilter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TFilter) ProtoMessage() {} + +func (x *TFilter) ProtoReflect() protoreflect.Message { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TFilter.ProtoReflect.Descriptor instead. +func (*TFilter) Descriptor() ([]byte, []int) { + return file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_rawDescGZIP(), []int{13} +} + +func (m *TFilter) GetPayload() isTFilter_Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (x *TFilter) GetComparison() *TFilter_TComparison { + if x, ok := x.GetPayload().(*TFilter_Comparison); ok { + return x.Comparison + } + return nil +} + +func (x *TFilter) GetConjunction() *TFilter_TConjunction { + if x, ok := x.GetPayload().(*TFilter_Conjunction); ok { + return x.Conjunction + } + return nil +} + +func (x *TFilter) GetDisjunction() *TFilter_TDisjunction { + if x, ok := x.GetPayload().(*TFilter_Disjunction); ok { + return x.Disjunction + } + return nil +} + +func (x *TFilter) GetNegation() *TFilter_TNegation { + if x, ok := x.GetPayload().(*TFilter_Negation); ok { + return x.Negation + } + return nil +} + +func (x *TFilter) GetSubExpr() *TFilter_TSubExpr { + if x, ok := x.GetPayload().(*TFilter_SubExpr); ok { + return x.SubExpr + } + return nil +} + +type isTFilter_Payload interface { + isTFilter_Payload() +} + +type TFilter_Comparison struct { + Comparison *TFilter_TComparison `protobuf:"bytes,1,opt,name=comparison,proto3,oneof"` +} + +type TFilter_Conjunction struct { + Conjunction *TFilter_TConjunction `protobuf:"bytes,2,opt,name=conjunction,proto3,oneof"` +} + +type TFilter_Disjunction struct { + Disjunction *TFilter_TDisjunction `protobuf:"bytes,3,opt,name=disjunction,proto3,oneof"` +} + +type TFilter_Negation struct { + Negation *TFilter_TNegation `protobuf:"bytes,4,opt,name=negation,proto3,oneof"` +} + +type TFilter_SubExpr struct { + SubExpr *TFilter_TSubExpr `protobuf:"bytes,5,opt,name=sub_expr,json=subExpr,proto3,oneof"` +} + +func (*TFilter_Comparison) isTFilter_Payload() {} + +func (*TFilter_Conjunction) isTFilter_Payload() {} + +func (*TFilter_Disjunction) isTFilter_Payload() {} + +func (*TFilter_Negation) isTFilter_Payload() {} + +func (*TFilter_SubExpr) isTFilter_Payload() {} + +// Special type to describe the result of any operation +type TError struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // High-level code + Status Ydb.StatusIds_StatusCode `protobuf:"varint,1,opt,name=status,proto3,enum=Ydb.StatusIds_StatusCode" json:"status,omitempty"` + // Error message + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + // Detailed explanation of a problem; + // must be empty if status == SUCCESS + Issues []*Ydb_Issue.IssueMessage `protobuf:"bytes,3,rep,name=issues,proto3" json:"issues,omitempty"` +} + +func (x *TError) Reset() { + *x = TError{} + if protoimpl.UnsafeEnabled { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TError) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TError) ProtoMessage() {} + +func (x *TError) ProtoReflect() protoreflect.Message { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TError.ProtoReflect.Descriptor instead. +func (*TError) Descriptor() ([]byte, []int) { + return file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_rawDescGZIP(), []int{14} +} + +func (x *TError) GetStatus() Ydb.StatusIds_StatusCode { + if x != nil { + return x.Status + } + return Ydb.StatusIds_StatusCode(0) +} + +func (x *TError) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *TError) GetIssues() []*Ydb_Issue.IssueMessage { + if x != nil { + return x.Issues + } + return nil +} + +// TAst is an internal representation of an YQL request. +// Advanced connectors may use it for the full-fledged implementations of the push down. +type TAst struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Payload: + // + // *TAst_Atom + // *TAst_List + Payload isTAst_Payload `protobuf_oneof:"payload"` +} + +func (x *TAst) Reset() { + *x = TAst{} + if protoimpl.UnsafeEnabled { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TAst) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TAst) ProtoMessage() {} + +func (x *TAst) ProtoReflect() protoreflect.Message { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TAst.ProtoReflect.Descriptor instead. +func (*TAst) Descriptor() ([]byte, []int) { + return file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_rawDescGZIP(), []int{15} +} + +func (m *TAst) GetPayload() isTAst_Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (x *TAst) GetAtom() []byte { + if x, ok := x.GetPayload().(*TAst_Atom); ok { + return x.Atom + } + return nil +} + +func (x *TAst) GetList() *TAst_TList { + if x, ok := x.GetPayload().(*TAst_List); ok { + return x.List + } + return nil +} + +type isTAst_Payload interface { + isTAst_Payload() +} + +type TAst_Atom struct { + Atom []byte `protobuf:"bytes,1,opt,name=atom,proto3,oneof"` +} + +type TAst_List struct { + List *TAst_TList `protobuf:"bytes,2,opt,name=list,proto3,oneof"` +} + +func (*TAst_Atom) isTAst_Payload() {} + +func (*TAst_List) isTAst_Payload() {} + +// Describes what particularly to get from the data source +type TSelect_TWhat struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // NOTE: this API intentionally makes it not possible to request 'SELECT *'. + // YQ must provide all the column names explicitly. + // + // Еmpty list means that YQ wants to get empty tuples in the response. + // On the connector's side this request will be transformed into something like + // SELECT 1 FROM $table (...) + Items []*TSelect_TWhat_TItem `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"` +} + +func (x *TSelect_TWhat) Reset() { + *x = TSelect_TWhat{} + if protoimpl.UnsafeEnabled { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TSelect_TWhat) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TSelect_TWhat) ProtoMessage() {} + +func (x *TSelect_TWhat) ProtoReflect() protoreflect.Message { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TSelect_TWhat.ProtoReflect.Descriptor instead. +func (*TSelect_TWhat) Descriptor() ([]byte, []int) { + return file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_rawDescGZIP(), []int{7, 0} +} + +func (x *TSelect_TWhat) GetItems() []*TSelect_TWhat_TItem { + if x != nil { + return x.Items + } + return nil +} + +type TSelect_TFrom struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // TODO: for some of RDBMS the table name may be not enough + Table string `protobuf:"bytes,1,opt,name=table,proto3" json:"table,omitempty"` +} + +func (x *TSelect_TFrom) Reset() { + *x = TSelect_TFrom{} + if protoimpl.UnsafeEnabled { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TSelect_TFrom) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TSelect_TFrom) ProtoMessage() {} + +func (x *TSelect_TFrom) ProtoReflect() protoreflect.Message { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TSelect_TFrom.ProtoReflect.Descriptor instead. +func (*TSelect_TFrom) Descriptor() ([]byte, []int) { + return file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_rawDescGZIP(), []int{7, 1} +} + +func (x *TSelect_TFrom) GetTable() string { + if x != nil { + return x.Table + } + return "" +} + +type TSelect_TWhere struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Strongly typed tree of predicates + FilterTyped *TFilter `protobuf:"bytes,1,opt,name=filter_typed,json=filterTyped,proto3" json:"filter_typed,omitempty"` + // An internal representation of YQL request part describing filters. + // Advanced connectors may use it for the full-fledged implementations of the push down. + FilterRaw *TAst `protobuf:"bytes,2,opt,name=filter_raw,json=filterRaw,proto3" json:"filter_raw,omitempty"` +} + +func (x *TSelect_TWhere) Reset() { + *x = TSelect_TWhere{} + if protoimpl.UnsafeEnabled { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TSelect_TWhere) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TSelect_TWhere) ProtoMessage() {} + +func (x *TSelect_TWhere) ProtoReflect() protoreflect.Message { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TSelect_TWhere.ProtoReflect.Descriptor instead. +func (*TSelect_TWhere) Descriptor() ([]byte, []int) { + return file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_rawDescGZIP(), []int{7, 2} +} + +func (x *TSelect_TWhere) GetFilterTyped() *TFilter { + if x != nil { + return x.FilterTyped + } + return nil +} + +func (x *TSelect_TWhere) GetFilterRaw() *TAst { + if x != nil { + return x.FilterRaw + } + return nil +} + +type TSelect_TLimit struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Limit uint64 `protobuf:"varint,1,opt,name=limit,proto3" json:"limit,omitempty"` + Offset uint64 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"` +} + +func (x *TSelect_TLimit) Reset() { + *x = TSelect_TLimit{} + if protoimpl.UnsafeEnabled { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TSelect_TLimit) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TSelect_TLimit) ProtoMessage() {} + +func (x *TSelect_TLimit) ProtoReflect() protoreflect.Message { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TSelect_TLimit.ProtoReflect.Descriptor instead. +func (*TSelect_TLimit) Descriptor() ([]byte, []int) { + return file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_rawDescGZIP(), []int{7, 3} +} + +func (x *TSelect_TLimit) GetLimit() uint64 { + if x != nil { + return x.Limit + } + return 0 +} + +func (x *TSelect_TLimit) GetOffset() uint64 { + if x != nil { + return x.Offset + } + return 0 +} + +type TSelect_TWhat_TItem struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // YQ can read particular table columns or call aggregate functions, for example. + // + // Types that are assignable to Payload: + // + // *TSelect_TWhat_TItem_Column + Payload isTSelect_TWhat_TItem_Payload `protobuf_oneof:"payload"` +} + +func (x *TSelect_TWhat_TItem) Reset() { + *x = TSelect_TWhat_TItem{} + if protoimpl.UnsafeEnabled { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TSelect_TWhat_TItem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TSelect_TWhat_TItem) ProtoMessage() {} + +func (x *TSelect_TWhat_TItem) ProtoReflect() protoreflect.Message { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TSelect_TWhat_TItem.ProtoReflect.Descriptor instead. +func (*TSelect_TWhat_TItem) Descriptor() ([]byte, []int) { + return file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_rawDescGZIP(), []int{7, 0, 0} +} + +func (m *TSelect_TWhat_TItem) GetPayload() isTSelect_TWhat_TItem_Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (x *TSelect_TWhat_TItem) GetColumn() *Ydb.Column { + if x, ok := x.GetPayload().(*TSelect_TWhat_TItem_Column); ok { + return x.Column + } + return nil +} + +type isTSelect_TWhat_TItem_Payload interface { + isTSelect_TWhat_TItem_Payload() +} + +type TSelect_TWhat_TItem_Column struct { + // a column to read + Column *Ydb.Column `protobuf:"bytes,1,opt,name=column,proto3,oneof"` +} + +func (*TSelect_TWhat_TItem_Column) isTSelect_TWhat_TItem_Payload() {} + +// Protobuf columnar representation of data. +// Use it only for debugging, don't use in production. +type TReadSplitsResponse_TColumnSet struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Meta []*Ydb.Column `protobuf:"bytes,1,rep,name=meta,proto3" json:"meta,omitempty"` + Data []*TReadSplitsResponse_TColumnSet_TColumn `protobuf:"bytes,2,rep,name=data,proto3" json:"data,omitempty"` +} + +func (x *TReadSplitsResponse_TColumnSet) Reset() { + *x = TReadSplitsResponse_TColumnSet{} + if protoimpl.UnsafeEnabled { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TReadSplitsResponse_TColumnSet) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TReadSplitsResponse_TColumnSet) ProtoMessage() {} + +func (x *TReadSplitsResponse_TColumnSet) ProtoReflect() protoreflect.Message { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TReadSplitsResponse_TColumnSet.ProtoReflect.Descriptor instead. +func (*TReadSplitsResponse_TColumnSet) Descriptor() ([]byte, []int) { + return file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_rawDescGZIP(), []int{10, 0} +} + +func (x *TReadSplitsResponse_TColumnSet) GetMeta() []*Ydb.Column { + if x != nil { + return x.Meta + } + return nil +} + +func (x *TReadSplitsResponse_TColumnSet) GetData() []*TReadSplitsResponse_TColumnSet_TColumn { + if x != nil { + return x.Data + } + return nil +} + +type TReadSplitsResponse_TColumnSet_TColumn struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Data []*Ydb.Value `protobuf:"bytes,1,rep,name=data,proto3" json:"data,omitempty"` +} + +func (x *TReadSplitsResponse_TColumnSet_TColumn) Reset() { + *x = TReadSplitsResponse_TColumnSet_TColumn{} + if protoimpl.UnsafeEnabled { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TReadSplitsResponse_TColumnSet_TColumn) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TReadSplitsResponse_TColumnSet_TColumn) ProtoMessage() {} + +func (x *TReadSplitsResponse_TColumnSet_TColumn) ProtoReflect() protoreflect.Message { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TReadSplitsResponse_TColumnSet_TColumn.ProtoReflect.Descriptor instead. +func (*TReadSplitsResponse_TColumnSet_TColumn) Descriptor() ([]byte, []int) { + return file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_rawDescGZIP(), []int{10, 0, 0} +} + +func (x *TReadSplitsResponse_TColumnSet_TColumn) GetData() []*Ydb.Value { + if x != nil { + return x.Data + } + return nil +} + +// Represents an elementary comparison between a column and some value +type TFilter_TComparison struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Payload: + // + // *TFilter_TComparison_Binary + // *TFilter_TComparison_IsNull + // *TFilter_TComparison_IsNotNull + // *TFilter_TComparison_In + // *TFilter_TComparison_Between + Payload isTFilter_TComparison_Payload `protobuf_oneof:"payload"` +} + +func (x *TFilter_TComparison) Reset() { + *x = TFilter_TComparison{} + if protoimpl.UnsafeEnabled { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TFilter_TComparison) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TFilter_TComparison) ProtoMessage() {} + +func (x *TFilter_TComparison) ProtoReflect() protoreflect.Message { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TFilter_TComparison.ProtoReflect.Descriptor instead. +func (*TFilter_TComparison) Descriptor() ([]byte, []int) { + return file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_rawDescGZIP(), []int{13, 0} +} + +func (m *TFilter_TComparison) GetPayload() isTFilter_TComparison_Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (x *TFilter_TComparison) GetBinary() *TFilter_TComparison_TBinary { + if x, ok := x.GetPayload().(*TFilter_TComparison_Binary); ok { + return x.Binary + } + return nil +} + +func (x *TFilter_TComparison) GetIsNull() *TFilter_TComparison_TIsNull { + if x, ok := x.GetPayload().(*TFilter_TComparison_IsNull); ok { + return x.IsNull + } + return nil +} + +func (x *TFilter_TComparison) GetIsNotNull() *TFilter_TComparison_TIsNotNull { + if x, ok := x.GetPayload().(*TFilter_TComparison_IsNotNull); ok { + return x.IsNotNull + } + return nil +} + +func (x *TFilter_TComparison) GetIn() *TFilter_TComparison_TIn { + if x, ok := x.GetPayload().(*TFilter_TComparison_In); ok { + return x.In + } + return nil +} + +func (x *TFilter_TComparison) GetBetween() *TFilter_TComparison_TBetween { + if x, ok := x.GetPayload().(*TFilter_TComparison_Between); ok { + return x.Between + } + return nil +} + +type isTFilter_TComparison_Payload interface { + isTFilter_TComparison_Payload() +} + +type TFilter_TComparison_Binary struct { + Binary *TFilter_TComparison_TBinary `protobuf:"bytes,1,opt,name=binary,proto3,oneof"` +} + +type TFilter_TComparison_IsNull struct { + IsNull *TFilter_TComparison_TIsNull `protobuf:"bytes,2,opt,name=is_null,json=isNull,proto3,oneof"` +} + +type TFilter_TComparison_IsNotNull struct { + IsNotNull *TFilter_TComparison_TIsNotNull `protobuf:"bytes,3,opt,name=is_not_null,json=isNotNull,proto3,oneof"` +} + +type TFilter_TComparison_In struct { + In *TFilter_TComparison_TIn `protobuf:"bytes,4,opt,name=in,proto3,oneof"` +} + +type TFilter_TComparison_Between struct { + Between *TFilter_TComparison_TBetween `protobuf:"bytes,5,opt,name=between,proto3,oneof"` +} + +func (*TFilter_TComparison_Binary) isTFilter_TComparison_Payload() {} + +func (*TFilter_TComparison_IsNull) isTFilter_TComparison_Payload() {} + +func (*TFilter_TComparison_IsNotNull) isTFilter_TComparison_Payload() {} + +func (*TFilter_TComparison_In) isTFilter_TComparison_Payload() {} + +func (*TFilter_TComparison_Between) isTFilter_TComparison_Payload() {} + +// transforms into "AND" +type TFilter_TConjunction struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Operands []*TFilter `protobuf:"bytes,1,rep,name=operands,proto3" json:"operands,omitempty"` +} + +func (x *TFilter_TConjunction) Reset() { + *x = TFilter_TConjunction{} + if protoimpl.UnsafeEnabled { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TFilter_TConjunction) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TFilter_TConjunction) ProtoMessage() {} + +func (x *TFilter_TConjunction) ProtoReflect() protoreflect.Message { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TFilter_TConjunction.ProtoReflect.Descriptor instead. +func (*TFilter_TConjunction) Descriptor() ([]byte, []int) { + return file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_rawDescGZIP(), []int{13, 1} +} + +func (x *TFilter_TConjunction) GetOperands() []*TFilter { + if x != nil { + return x.Operands + } + return nil +} + +// transforms into "OR" +type TFilter_TDisjunction struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Operands []*TFilter `protobuf:"bytes,1,rep,name=operands,proto3" json:"operands,omitempty"` +} + +func (x *TFilter_TDisjunction) Reset() { + *x = TFilter_TDisjunction{} + if protoimpl.UnsafeEnabled { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TFilter_TDisjunction) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TFilter_TDisjunction) ProtoMessage() {} + +func (x *TFilter_TDisjunction) ProtoReflect() protoreflect.Message { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TFilter_TDisjunction.ProtoReflect.Descriptor instead. +func (*TFilter_TDisjunction) Descriptor() ([]byte, []int) { + return file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_rawDescGZIP(), []int{13, 2} +} + +func (x *TFilter_TDisjunction) GetOperands() []*TFilter { + if x != nil { + return x.Operands + } + return nil +} + +// transforms into "NOT" +type TFilter_TNegation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Operand *TFilter `protobuf:"bytes,1,opt,name=operand,proto3" json:"operand,omitempty"` +} + +func (x *TFilter_TNegation) Reset() { + *x = TFilter_TNegation{} + if protoimpl.UnsafeEnabled { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TFilter_TNegation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TFilter_TNegation) ProtoMessage() {} + +func (x *TFilter_TNegation) ProtoReflect() protoreflect.Message { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TFilter_TNegation.ProtoReflect.Descriptor instead. +func (*TFilter_TNegation) Descriptor() ([]byte, []int) { + return file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_rawDescGZIP(), []int{13, 3} +} + +func (x *TFilter_TNegation) GetOperand() *TFilter { + if x != nil { + return x.Operand + } + return nil +} + +// SubExpr transformation rules: +// 1. top-level: $children +// 2. other levels: ($children) +type TFilter_TSubExpr struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Next level expression + Operand *TFilter `protobuf:"bytes,1,opt,name=operand,proto3" json:"operand,omitempty"` +} + +func (x *TFilter_TSubExpr) Reset() { + *x = TFilter_TSubExpr{} + if protoimpl.UnsafeEnabled { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TFilter_TSubExpr) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TFilter_TSubExpr) ProtoMessage() {} + +func (x *TFilter_TSubExpr) ProtoReflect() protoreflect.Message { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TFilter_TSubExpr.ProtoReflect.Descriptor instead. +func (*TFilter_TSubExpr) Descriptor() ([]byte, []int) { + return file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_rawDescGZIP(), []int{13, 4} +} + +func (x *TFilter_TSubExpr) GetOperand() *TFilter { + if x != nil { + return x.Operand + } + return nil +} + +// A subset of comparators corresponding to the binary logical operators +type TFilter_TComparison_TBinary struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Payload: + // + // *TFilter_TComparison_TBinary_TypedValue + // *TFilter_TComparison_TBinary_Column + Payload isTFilter_TComparison_TBinary_Payload `protobuf_oneof:"payload"` +} + +func (x *TFilter_TComparison_TBinary) Reset() { + *x = TFilter_TComparison_TBinary{} + if protoimpl.UnsafeEnabled { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TFilter_TComparison_TBinary) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TFilter_TComparison_TBinary) ProtoMessage() {} + +func (x *TFilter_TComparison_TBinary) ProtoReflect() protoreflect.Message { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TFilter_TComparison_TBinary.ProtoReflect.Descriptor instead. +func (*TFilter_TComparison_TBinary) Descriptor() ([]byte, []int) { + return file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_rawDescGZIP(), []int{13, 0, 0} +} + +func (m *TFilter_TComparison_TBinary) GetPayload() isTFilter_TComparison_TBinary_Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (x *TFilter_TComparison_TBinary) GetTypedValue() *Ydb.TypedValue { + if x, ok := x.GetPayload().(*TFilter_TComparison_TBinary_TypedValue); ok { + return x.TypedValue + } + return nil +} + +func (x *TFilter_TComparison_TBinary) GetColumn() string { + if x, ok := x.GetPayload().(*TFilter_TComparison_TBinary_Column); ok { + return x.Column + } + return "" +} + +type isTFilter_TComparison_TBinary_Payload interface { + isTFilter_TComparison_TBinary_Payload() +} + +type TFilter_TComparison_TBinary_TypedValue struct { + // A scalar value + TypedValue *Ydb.TypedValue `protobuf:"bytes,1,opt,name=typed_value,json=typedValue,proto3,oneof"` +} + +type TFilter_TComparison_TBinary_Column struct { + // A name of another column to compare with + Column string `protobuf:"bytes,2,opt,name=column,proto3,oneof"` +} + +func (*TFilter_TComparison_TBinary_TypedValue) isTFilter_TComparison_TBinary_Payload() {} + +func (*TFilter_TComparison_TBinary_Column) isTFilter_TComparison_TBinary_Payload() {} + +// Renders to "$column IS NULL" +type TFilter_TComparison_TIsNull struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Column string `protobuf:"bytes,1,opt,name=column,proto3" json:"column,omitempty"` +} + +func (x *TFilter_TComparison_TIsNull) Reset() { + *x = TFilter_TComparison_TIsNull{} + if protoimpl.UnsafeEnabled { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TFilter_TComparison_TIsNull) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TFilter_TComparison_TIsNull) ProtoMessage() {} + +func (x *TFilter_TComparison_TIsNull) ProtoReflect() protoreflect.Message { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TFilter_TComparison_TIsNull.ProtoReflect.Descriptor instead. +func (*TFilter_TComparison_TIsNull) Descriptor() ([]byte, []int) { + return file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_rawDescGZIP(), []int{13, 0, 1} +} + +func (x *TFilter_TComparison_TIsNull) GetColumn() string { + if x != nil { + return x.Column + } + return "" +} + +// Renders to "$column IS NOT NULL" +type TFilter_TComparison_TIsNotNull struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Column string `protobuf:"bytes,1,opt,name=column,proto3" json:"column,omitempty"` +} + +func (x *TFilter_TComparison_TIsNotNull) Reset() { + *x = TFilter_TComparison_TIsNotNull{} + if protoimpl.UnsafeEnabled { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TFilter_TComparison_TIsNotNull) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TFilter_TComparison_TIsNotNull) ProtoMessage() {} + +func (x *TFilter_TComparison_TIsNotNull) ProtoReflect() protoreflect.Message { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[30] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TFilter_TComparison_TIsNotNull.ProtoReflect.Descriptor instead. +func (*TFilter_TComparison_TIsNotNull) Descriptor() ([]byte, []int) { + return file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_rawDescGZIP(), []int{13, 0, 2} +} + +func (x *TFilter_TComparison_TIsNotNull) GetColumn() string { + if x != nil { + return x.Column + } + return "" +} + +// Renders to "$column IN $(values)" +type TFilter_TComparison_TIn struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Values []*Ydb.TypedValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` +} + +func (x *TFilter_TComparison_TIn) Reset() { + *x = TFilter_TComparison_TIn{} + if protoimpl.UnsafeEnabled { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TFilter_TComparison_TIn) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TFilter_TComparison_TIn) ProtoMessage() {} + +func (x *TFilter_TComparison_TIn) ProtoReflect() protoreflect.Message { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[31] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TFilter_TComparison_TIn.ProtoReflect.Descriptor instead. +func (*TFilter_TComparison_TIn) Descriptor() ([]byte, []int) { + return file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_rawDescGZIP(), []int{13, 0, 3} +} + +func (x *TFilter_TComparison_TIn) GetValues() []*Ydb.TypedValue { + if x != nil { + return x.Values + } + return nil +} + +// Renders to "$column BETWEEN $least AND $greatest" +type TFilter_TComparison_TBetween struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Least *Ydb.TypedValue `protobuf:"bytes,1,opt,name=least,proto3" json:"least,omitempty"` + Greatest *Ydb.TypedValue `protobuf:"bytes,2,opt,name=greatest,proto3" json:"greatest,omitempty"` +} + +func (x *TFilter_TComparison_TBetween) Reset() { + *x = TFilter_TComparison_TBetween{} + if protoimpl.UnsafeEnabled { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TFilter_TComparison_TBetween) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TFilter_TComparison_TBetween) ProtoMessage() {} + +func (x *TFilter_TComparison_TBetween) ProtoReflect() protoreflect.Message { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[32] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TFilter_TComparison_TBetween.ProtoReflect.Descriptor instead. +func (*TFilter_TComparison_TBetween) Descriptor() ([]byte, []int) { + return file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_rawDescGZIP(), []int{13, 0, 4} +} + +func (x *TFilter_TComparison_TBetween) GetLeast() *Ydb.TypedValue { + if x != nil { + return x.Least + } + return nil +} + +func (x *TFilter_TComparison_TBetween) GetGreatest() *Ydb.TypedValue { + if x != nil { + return x.Greatest + } + return nil +} + +type TAst_TList struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Children []*TAst `protobuf:"bytes,1,rep,name=children,proto3" json:"children,omitempty"` +} + +func (x *TAst_TList) Reset() { + *x = TAst_TList{} + if protoimpl.UnsafeEnabled { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TAst_TList) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TAst_TList) ProtoMessage() {} + +func (x *TAst_TList) ProtoReflect() protoreflect.Message { + mi := &file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[33] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TAst_TList.ProtoReflect.Descriptor instead. +func (*TAst_TList) Descriptor() ([]byte, []int) { + return file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_rawDescGZIP(), []int{15, 0} +} + +func (x *TAst_TList) GetChildren() []*TAst { + if x != nil { + return x.Children + } + return nil +} + +var File_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto protoreflect.FileDescriptor + +var file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_rawDesc = []byte{ + 0x0a, 0x4e, 0x79, 0x64, 0x62, 0x2f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x2f, 0x79, 0x71, + 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x69, 0x63, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, + 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x14, 0x4e, 0x59, 0x71, 0x6c, 0x2e, 0x4e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x2e, 0x4e, 0x41, 0x70, 0x69, 0x1a, 0x25, 0x79, 0x64, 0x62, 0x2f, 0x70, 0x75, 0x62, 0x6c, + 0x69, 0x63, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2f, 0x79, 0x64, + 0x62, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2c, 0x79, + 0x64, 0x62, 0x2f, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x73, 0x2f, 0x79, 0x64, 0x62, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, + 0x63, 0x6f, 0x64, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2d, 0x79, 0x64, 0x62, + 0x2f, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x73, 0x2f, 0x79, 0x64, 0x62, 0x5f, 0x69, 0x73, 0x73, 0x75, 0x65, 0x5f, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x48, 0x79, 0x64, 0x62, 0x2f, + 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x2f, 0x79, 0x71, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x76, + 0x69, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x2f, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9a, 0x01, 0x0a, 0x12, 0x54, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x5b, 0x0a, 0x14, 0x64, + 0x61, 0x74, 0x61, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, + 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x4e, 0x59, 0x71, 0x6c, + 0x2e, 0x4e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4e, 0x41, 0x70, 0x69, + 0x2e, 0x54, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x73, 0x74, + 0x61, 0x6e, 0x63, 0x65, 0x52, 0x12, 0x64, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, + 0x65, 0x72, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x07, 0x70, 0x61, 0x74, + 0x74, 0x65, 0x72, 0x6e, 0x42, 0x0b, 0x0a, 0x09, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x69, 0x6e, + 0x67, 0x22, 0x61, 0x0a, 0x13, 0x54, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, + 0x12, 0x32, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x64, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e, 0x4e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x2e, 0x4e, 0x41, 0x70, 0x69, 0x2e, 0x54, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x22, 0x8a, 0x01, 0x0a, 0x15, 0x54, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x62, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x5b, + 0x0a, 0x14, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, + 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x4e, + 0x59, 0x71, 0x6c, 0x2e, 0x4e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4e, + 0x41, 0x70, 0x69, 0x2e, 0x54, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, + 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x12, 0x64, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x22, 0x83, 0x01, 0x0a, 0x16, 0x54, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x06, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x4e, + 0x59, 0x71, 0x6c, 0x2e, 0x4e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4e, + 0x41, 0x70, 0x69, 0x2e, 0x54, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x06, 0x73, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x12, 0x32, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x64, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e, 0x4e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4e, 0x41, 0x70, 0x69, 0x2e, 0x54, 0x45, 0x72, 0x72, 0x6f, 0x72, + 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x30, 0x0a, 0x07, 0x54, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x12, 0x25, 0x0a, 0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x59, 0x64, 0x62, 0x2e, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, + 0x52, 0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x22, 0x9f, 0x02, 0x0a, 0x12, 0x54, 0x4c, + 0x69, 0x73, 0x74, 0x53, 0x70, 0x6c, 0x69, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x5b, 0x0a, 0x14, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, + 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e, 0x4e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, + 0x2e, 0x4e, 0x41, 0x70, 0x69, 0x2e, 0x54, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x12, 0x64, 0x61, 0x74, 0x61, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x37, 0x0a, + 0x07, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, + 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e, 0x4e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, + 0x2e, 0x4e, 0x41, 0x70, 0x69, 0x2e, 0x54, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x52, 0x07, 0x73, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x70, + 0x6c, 0x69, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x0d, 0x6d, 0x61, 0x78, 0x53, 0x70, 0x6c, 0x69, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1d, + 0x0a, 0x0a, 0x73, 0x70, 0x6c, 0x69, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x09, 0x73, 0x70, 0x6c, 0x69, 0x74, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x2c, 0x0a, + 0x12, 0x73, 0x70, 0x6c, 0x69, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x6c, 0x69, + 0x6d, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x73, 0x70, 0x6c, 0x69, 0x74, + 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0x7f, 0x0a, 0x13, 0x54, + 0x4c, 0x69, 0x73, 0x74, 0x53, 0x70, 0x6c, 0x69, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x73, 0x70, 0x6c, 0x69, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e, 0x4e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4e, 0x41, 0x70, 0x69, 0x2e, 0x54, 0x53, 0x70, 0x6c, 0x69, 0x74, + 0x52, 0x06, 0x73, 0x70, 0x6c, 0x69, 0x74, 0x73, 0x12, 0x32, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x18, 0x64, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e, 0x4e, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4e, 0x41, 0x70, 0x69, 0x2e, 0x54, + 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0xb5, 0x05, 0x0a, + 0x07, 0x54, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x12, 0x5b, 0x0a, 0x14, 0x64, 0x61, 0x74, 0x61, + 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e, 0x4e, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4e, 0x41, 0x70, 0x69, 0x2e, 0x54, 0x44, + 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, + 0x65, 0x52, 0x12, 0x64, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x73, + 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x37, 0x0a, 0x04, 0x77, 0x68, 0x61, 0x74, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e, 0x4e, 0x43, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4e, 0x41, 0x70, 0x69, 0x2e, 0x54, 0x53, 0x65, 0x6c, 0x65, + 0x63, 0x74, 0x2e, 0x54, 0x57, 0x68, 0x61, 0x74, 0x52, 0x04, 0x77, 0x68, 0x61, 0x74, 0x12, 0x37, + 0x0a, 0x04, 0x66, 0x72, 0x6f, 0x6d, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x4e, + 0x59, 0x71, 0x6c, 0x2e, 0x4e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4e, + 0x41, 0x70, 0x69, 0x2e, 0x54, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x2e, 0x54, 0x46, 0x72, 0x6f, + 0x6d, 0x52, 0x04, 0x66, 0x72, 0x6f, 0x6d, 0x12, 0x3a, 0x0a, 0x05, 0x77, 0x68, 0x65, 0x72, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e, 0x4e, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4e, 0x41, 0x70, 0x69, 0x2e, 0x54, 0x53, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x2e, 0x54, 0x57, 0x68, 0x65, 0x72, 0x65, 0x52, 0x05, 0x77, 0x68, + 0x65, 0x72, 0x65, 0x12, 0x3a, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e, 0x4e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4e, 0x41, 0x70, 0x69, 0x2e, 0x54, 0x53, 0x65, 0x6c, 0x65, 0x63, + 0x74, 0x2e, 0x54, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x1a, + 0x83, 0x01, 0x0a, 0x05, 0x54, 0x57, 0x68, 0x61, 0x74, 0x12, 0x3f, 0x0a, 0x05, 0x69, 0x74, 0x65, + 0x6d, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e, + 0x4e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4e, 0x41, 0x70, 0x69, 0x2e, + 0x54, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x2e, 0x54, 0x57, 0x68, 0x61, 0x74, 0x2e, 0x54, 0x49, + 0x74, 0x65, 0x6d, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x1a, 0x39, 0x0a, 0x05, 0x54, 0x49, + 0x74, 0x65, 0x6d, 0x12, 0x25, 0x0a, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x59, 0x64, 0x62, 0x2e, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, + 0x48, 0x00, 0x52, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, + 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x1a, 0x1d, 0x0a, 0x05, 0x54, 0x46, 0x72, 0x6f, 0x6d, 0x12, 0x14, + 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x1a, 0x85, 0x01, 0x0a, 0x06, 0x54, 0x57, 0x68, 0x65, 0x72, 0x65, 0x12, + 0x40, 0x0a, 0x0c, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e, 0x4e, 0x43, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4e, 0x41, 0x70, 0x69, 0x2e, 0x54, 0x46, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x52, 0x0b, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, + 0x64, 0x12, 0x39, 0x0a, 0x0a, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x72, 0x61, 0x77, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e, 0x4e, 0x43, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4e, 0x41, 0x70, 0x69, 0x2e, 0x54, 0x41, 0x73, + 0x74, 0x52, 0x09, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x61, 0x77, 0x1a, 0x36, 0x0a, 0x06, + 0x54, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x16, 0x0a, 0x06, + 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6f, 0x66, + 0x66, 0x73, 0x65, 0x74, 0x22, 0x6e, 0x0a, 0x06, 0x54, 0x53, 0x70, 0x6c, 0x69, 0x74, 0x12, 0x35, + 0x0a, 0x06, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, + 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e, 0x4e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, + 0x2e, 0x4e, 0x41, 0x70, 0x69, 0x2e, 0x54, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x52, 0x06, 0x73, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x12, 0x22, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, + 0x6c, 0x6f, 0x61, 0x64, 0x22, 0xc2, 0x04, 0x0a, 0x12, 0x54, 0x52, 0x65, 0x61, 0x64, 0x53, 0x70, + 0x6c, 0x69, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x5b, 0x0a, 0x14, 0x64, + 0x61, 0x74, 0x61, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, + 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x4e, 0x59, 0x71, 0x6c, + 0x2e, 0x4e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4e, 0x41, 0x70, 0x69, + 0x2e, 0x54, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x73, 0x74, + 0x61, 0x6e, 0x63, 0x65, 0x52, 0x12, 0x64, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x73, 0x70, 0x6c, 0x69, + 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e, + 0x4e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4e, 0x41, 0x70, 0x69, 0x2e, + 0x54, 0x53, 0x70, 0x6c, 0x69, 0x74, 0x52, 0x06, 0x73, 0x70, 0x6c, 0x69, 0x74, 0x73, 0x12, 0x42, + 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x4e, + 0x59, 0x71, 0x6c, 0x2e, 0x4e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4e, + 0x41, 0x70, 0x69, 0x2e, 0x54, 0x52, 0x65, 0x61, 0x64, 0x53, 0x70, 0x6c, 0x69, 0x74, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x45, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6d, 0x6f, + 0x64, 0x65, 0x12, 0x48, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x30, 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e, 0x4e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4e, 0x41, 0x70, 0x69, 0x2e, 0x54, 0x52, 0x65, 0x61, 0x64, 0x53, + 0x70, 0x6c, 0x69, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x45, 0x46, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x41, 0x0a, 0x0a, + 0x70, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x21, 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e, 0x4e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x2e, 0x4e, 0x41, 0x70, 0x69, 0x2e, 0x54, 0x50, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x70, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x47, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e, 0x4e, 0x43, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4e, 0x41, 0x70, 0x69, 0x2e, 0x54, 0x43, 0x6f, + 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, + 0x69, 0x6e, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x36, 0x0a, 0x05, 0x45, 0x4d, 0x6f, 0x64, + 0x65, 0x12, 0x11, 0x0a, 0x0d, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x52, 0x45, 0x53, 0x45, 0x52, 0x56, + 0x45, 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x4f, 0x52, 0x44, 0x45, 0x52, 0x45, 0x44, 0x10, + 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x55, 0x4e, 0x4f, 0x52, 0x44, 0x45, 0x52, 0x45, 0x44, 0x10, 0x02, + 0x22, 0x47, 0x0a, 0x07, 0x45, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x13, 0x0a, 0x0f, 0x46, + 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x52, 0x45, 0x53, 0x45, 0x52, 0x56, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x0e, 0x0a, 0x0a, 0x43, 0x4f, 0x4c, 0x55, 0x4d, 0x4e, 0x5f, 0x53, 0x45, 0x54, 0x10, 0x01, + 0x12, 0x17, 0x0a, 0x13, 0x41, 0x52, 0x52, 0x4f, 0x57, 0x5f, 0x49, 0x50, 0x43, 0x5f, 0x53, 0x54, + 0x52, 0x45, 0x41, 0x4d, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x22, 0x81, 0x04, 0x0a, 0x13, 0x54, 0x52, + 0x65, 0x61, 0x64, 0x53, 0x70, 0x6c, 0x69, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x55, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x5f, 0x73, 0x65, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e, 0x4e, 0x43, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4e, 0x41, 0x70, 0x69, 0x2e, 0x54, 0x52, 0x65, + 0x61, 0x64, 0x53, 0x70, 0x6c, 0x69, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x2e, 0x54, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52, 0x09, 0x63, + 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x53, 0x65, 0x74, 0x12, 0x30, 0x0a, 0x13, 0x61, 0x72, 0x72, 0x6f, + 0x77, 0x5f, 0x69, 0x70, 0x63, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x11, 0x61, 0x72, 0x72, 0x6f, 0x77, 0x49, 0x70, + 0x63, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x2c, 0x0a, 0x12, 0x73, 0x70, + 0x6c, 0x69, 0x74, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x10, 0x73, 0x70, 0x6c, 0x69, 0x74, 0x49, 0x6e, 0x64, + 0x65, 0x78, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x47, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, + 0x69, 0x6e, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, + 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e, 0x4e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, + 0x2e, 0x4e, 0x41, 0x70, 0x69, 0x2e, 0x54, 0x43, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x32, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x64, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1c, 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e, 0x4e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x2e, 0x4e, 0x41, 0x70, 0x69, 0x2e, 0x54, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x1a, 0xaa, 0x01, 0x0a, 0x0a, 0x54, 0x43, 0x6f, 0x6c, 0x75, 0x6d, + 0x6e, 0x53, 0x65, 0x74, 0x12, 0x1f, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x59, 0x64, 0x62, 0x2e, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x52, + 0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, 0x50, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e, 0x4e, 0x43, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4e, 0x41, 0x70, 0x69, 0x2e, 0x54, 0x52, 0x65, 0x61, 0x64, + 0x53, 0x70, 0x6c, 0x69, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, + 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x53, 0x65, 0x74, 0x2e, 0x54, 0x43, 0x6f, 0x6c, 0x75, 0x6d, + 0x6e, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x29, 0x0a, 0x07, 0x54, 0x43, 0x6f, 0x6c, 0x75, + 0x6d, 0x6e, 0x12, 0x1e, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x0a, 0x2e, 0x59, 0x64, 0x62, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x04, 0x64, 0x61, + 0x74, 0x61, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x37, 0x0a, + 0x0b, 0x54, 0x50, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, + 0x72, 0x6f, 0x77, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x72, 0x6f, 0x77, 0x73, + 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x22, 0x3e, 0x0a, 0x0d, 0x54, 0x43, 0x6f, 0x6e, 0x74, 0x69, + 0x6e, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, 0x0a, 0x07, 0x70, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0xd5, 0x0b, 0x0a, 0x07, 0x54, 0x46, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x12, 0x4b, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e, 0x4e, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4e, 0x41, 0x70, 0x69, 0x2e, 0x54, 0x46, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x54, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, + 0x6e, 0x48, 0x00, 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x12, + 0x4e, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x6a, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e, 0x4e, 0x43, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4e, 0x41, 0x70, 0x69, 0x2e, 0x54, 0x46, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x2e, 0x54, 0x43, 0x6f, 0x6e, 0x6a, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x48, 0x00, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x6a, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x4e, 0x0a, 0x0b, 0x64, 0x69, 0x73, 0x6a, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e, 0x4e, 0x43, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4e, 0x41, 0x70, 0x69, 0x2e, 0x54, 0x46, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x2e, 0x54, 0x44, 0x69, 0x73, 0x6a, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x48, 0x00, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x6a, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x45, 0x0a, 0x08, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x27, 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e, 0x4e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x2e, 0x4e, 0x41, 0x70, 0x69, 0x2e, 0x54, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x2e, 0x54, 0x4e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x08, 0x6e, 0x65, + 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x43, 0x0a, 0x08, 0x73, 0x75, 0x62, 0x5f, 0x65, 0x78, + 0x70, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e, + 0x4e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4e, 0x41, 0x70, 0x69, 0x2e, + 0x54, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x54, 0x53, 0x75, 0x62, 0x45, 0x78, 0x70, 0x72, + 0x48, 0x00, 0x52, 0x07, 0x73, 0x75, 0x62, 0x45, 0x78, 0x70, 0x72, 0x1a, 0xa4, 0x06, 0x0a, 0x0b, + 0x54, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x12, 0x4b, 0x0a, 0x06, 0x62, + 0x69, 0x6e, 0x61, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x4e, 0x59, + 0x71, 0x6c, 0x2e, 0x4e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4e, 0x41, + 0x70, 0x69, 0x2e, 0x54, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x54, 0x43, 0x6f, 0x6d, 0x70, + 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x2e, 0x54, 0x42, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x48, 0x00, + 0x52, 0x06, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x12, 0x4c, 0x0a, 0x07, 0x69, 0x73, 0x5f, 0x6e, + 0x75, 0x6c, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x4e, 0x59, 0x71, 0x6c, + 0x2e, 0x4e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4e, 0x41, 0x70, 0x69, + 0x2e, 0x54, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x54, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, + 0x69, 0x73, 0x6f, 0x6e, 0x2e, 0x54, 0x49, 0x73, 0x4e, 0x75, 0x6c, 0x6c, 0x48, 0x00, 0x52, 0x06, + 0x69, 0x73, 0x4e, 0x75, 0x6c, 0x6c, 0x12, 0x56, 0x0a, 0x0b, 0x69, 0x73, 0x5f, 0x6e, 0x6f, 0x74, + 0x5f, 0x6e, 0x75, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x4e, 0x59, + 0x71, 0x6c, 0x2e, 0x4e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4e, 0x41, + 0x70, 0x69, 0x2e, 0x54, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x54, 0x43, 0x6f, 0x6d, 0x70, + 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x2e, 0x54, 0x49, 0x73, 0x4e, 0x6f, 0x74, 0x4e, 0x75, 0x6c, + 0x6c, 0x48, 0x00, 0x52, 0x09, 0x69, 0x73, 0x4e, 0x6f, 0x74, 0x4e, 0x75, 0x6c, 0x6c, 0x12, 0x3f, + 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x4e, 0x59, 0x71, + 0x6c, 0x2e, 0x4e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4e, 0x41, 0x70, + 0x69, 0x2e, 0x54, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x54, 0x43, 0x6f, 0x6d, 0x70, 0x61, + 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x2e, 0x54, 0x49, 0x6e, 0x48, 0x00, 0x52, 0x02, 0x69, 0x6e, 0x12, + 0x4e, 0x0a, 0x07, 0x62, 0x65, 0x74, 0x77, 0x65, 0x65, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x32, 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e, 0x4e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x2e, 0x4e, 0x41, 0x70, 0x69, 0x2e, 0x54, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, + 0x54, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x2e, 0x54, 0x42, 0x65, 0x74, + 0x77, 0x65, 0x65, 0x6e, 0x48, 0x00, 0x52, 0x07, 0x62, 0x65, 0x74, 0x77, 0x65, 0x65, 0x6e, 0x1a, + 0xac, 0x01, 0x0a, 0x07, 0x54, 0x42, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x12, 0x32, 0x0a, 0x0b, 0x74, + 0x79, 0x70, 0x65, 0x64, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x0f, 0x2e, 0x59, 0x64, 0x62, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x48, 0x00, 0x52, 0x0a, 0x74, 0x79, 0x70, 0x65, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, + 0x18, 0x0a, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x00, 0x52, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x22, 0x48, 0x0a, 0x0a, 0x45, 0x4f, 0x70, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x45, 0x53, 0x45, 0x52, + 0x56, 0x45, 0x44, 0x10, 0x00, 0x12, 0x05, 0x0a, 0x01, 0x4c, 0x10, 0x01, 0x12, 0x06, 0x0a, 0x02, + 0x4c, 0x45, 0x10, 0x02, 0x12, 0x06, 0x0a, 0x02, 0x45, 0x51, 0x10, 0x03, 0x12, 0x06, 0x0a, 0x02, + 0x4e, 0x45, 0x10, 0x04, 0x12, 0x06, 0x0a, 0x02, 0x47, 0x45, 0x10, 0x05, 0x12, 0x05, 0x0a, 0x01, + 0x47, 0x10, 0x06, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x1a, 0x21, + 0x0a, 0x07, 0x54, 0x49, 0x73, 0x4e, 0x75, 0x6c, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6c, + 0x75, 0x6d, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, + 0x6e, 0x1a, 0x24, 0x0a, 0x0a, 0x54, 0x49, 0x73, 0x4e, 0x6f, 0x74, 0x4e, 0x75, 0x6c, 0x6c, 0x12, + 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x1a, 0x2e, 0x0a, 0x03, 0x54, 0x49, 0x6e, 0x12, 0x27, + 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, + 0x2e, 0x59, 0x64, 0x62, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, + 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x1a, 0x5e, 0x0a, 0x08, 0x54, 0x42, 0x65, 0x74, 0x77, + 0x65, 0x65, 0x6e, 0x12, 0x25, 0x0a, 0x05, 0x6c, 0x65, 0x61, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x59, 0x64, 0x62, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x05, 0x6c, 0x65, 0x61, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x08, 0x67, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x59, + 0x64, 0x62, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x08, 0x67, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x73, 0x74, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x1a, 0x49, 0x0a, 0x0c, 0x54, 0x43, 0x6f, 0x6e, 0x6a, 0x75, 0x6e, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x08, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e, 0x4e, 0x43, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4e, 0x41, 0x70, 0x69, 0x2e, 0x54, 0x46, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x52, 0x08, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x73, 0x1a, 0x49, 0x0a, + 0x0c, 0x54, 0x44, 0x69, 0x73, 0x6a, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, + 0x08, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x1d, 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e, 0x4e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x2e, 0x4e, 0x41, 0x70, 0x69, 0x2e, 0x54, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x08, + 0x6f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x73, 0x1a, 0x44, 0x0a, 0x09, 0x54, 0x4e, 0x65, 0x67, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e, 0x4e, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4e, 0x41, 0x70, 0x69, 0x2e, 0x54, 0x46, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x07, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x1a, 0x43, + 0x0a, 0x08, 0x54, 0x53, 0x75, 0x62, 0x45, 0x78, 0x70, 0x72, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, + 0x65, 0x72, 0x61, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x4e, 0x59, + 0x71, 0x6c, 0x2e, 0x4e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4e, 0x41, + 0x70, 0x69, 0x2e, 0x54, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x07, 0x6f, 0x70, 0x65, 0x72, + 0x61, 0x6e, 0x64, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x86, + 0x01, 0x0a, 0x06, 0x54, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x31, 0x0a, 0x06, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x59, 0x64, 0x62, 0x2e, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x49, 0x64, 0x73, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x43, 0x6f, 0x64, 0x65, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x59, 0x64, 0x62, 0x2e, 0x49, 0x73, 0x73, + 0x75, 0x65, 0x2e, 0x49, 0x73, 0x73, 0x75, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, + 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, 0x73, 0x22, 0xa0, 0x01, 0x0a, 0x04, 0x54, 0x41, 0x73, 0x74, + 0x12, 0x14, 0x0a, 0x04, 0x61, 0x74, 0x6f, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, + 0x52, 0x04, 0x61, 0x74, 0x6f, 0x6d, 0x12, 0x36, 0x0a, 0x04, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e, 0x4e, 0x43, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4e, 0x41, 0x70, 0x69, 0x2e, 0x54, 0x41, 0x73, 0x74, + 0x2e, 0x54, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x04, 0x6c, 0x69, 0x73, 0x74, 0x1a, 0x3f, + 0x0a, 0x05, 0x54, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, + 0x72, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x4e, 0x59, 0x71, 0x6c, + 0x2e, 0x4e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4e, 0x41, 0x70, 0x69, + 0x2e, 0x54, 0x41, 0x73, 0x74, 0x52, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x42, + 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x53, 0x5a, 0x51, 0x61, 0x2e, + 0x79, 0x61, 0x6e, 0x64, 0x65, 0x78, 0x2d, 0x74, 0x65, 0x61, 0x6d, 0x2e, 0x72, 0x75, 0x2f, 0x79, + 0x64, 0x62, 0x2f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x2f, 0x79, 0x71, 0x6c, 0x2f, 0x70, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, + 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x6c, 0x69, 0x62, 0x67, 0x6f, + 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_rawDescOnce sync.Once + file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_rawDescData = file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_rawDesc +) + +func file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_rawDescGZIP() []byte { + file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_rawDescOnce.Do(func() { + file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_rawDescData = protoimpl.X.CompressGZIP(file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_rawDescData) + }) + return file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_rawDescData +} + +var file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes = make([]protoimpl.MessageInfo, 34) +var file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_goTypes = []interface{}{ + (TReadSplitsRequest_EMode)(0), // 0: NYql.NConnector.NApi.TReadSplitsRequest.EMode + (TReadSplitsRequest_EFormat)(0), // 1: NYql.NConnector.NApi.TReadSplitsRequest.EFormat + (TFilter_TComparison_TBinary_EOperation)(0), // 2: NYql.NConnector.NApi.TFilter.TComparison.TBinary.EOperation + (*TListTablesRequest)(nil), // 3: NYql.NConnector.NApi.TListTablesRequest + (*TListTablesResponse)(nil), // 4: NYql.NConnector.NApi.TListTablesResponse + (*TDescribeTableRequest)(nil), // 5: NYql.NConnector.NApi.TDescribeTableRequest + (*TDescribeTableResponse)(nil), // 6: NYql.NConnector.NApi.TDescribeTableResponse + (*TSchema)(nil), // 7: NYql.NConnector.NApi.TSchema + (*TListSplitsRequest)(nil), // 8: NYql.NConnector.NApi.TListSplitsRequest + (*TListSplitsResponse)(nil), // 9: NYql.NConnector.NApi.TListSplitsResponse + (*TSelect)(nil), // 10: NYql.NConnector.NApi.TSelect + (*TSplit)(nil), // 11: NYql.NConnector.NApi.TSplit + (*TReadSplitsRequest)(nil), // 12: NYql.NConnector.NApi.TReadSplitsRequest + (*TReadSplitsResponse)(nil), // 13: NYql.NConnector.NApi.TReadSplitsResponse + (*TPagination)(nil), // 14: NYql.NConnector.NApi.TPagination + (*TContinuation)(nil), // 15: NYql.NConnector.NApi.TContinuation + (*TFilter)(nil), // 16: NYql.NConnector.NApi.TFilter + (*TError)(nil), // 17: NYql.NConnector.NApi.TError + (*TAst)(nil), // 18: NYql.NConnector.NApi.TAst + (*TSelect_TWhat)(nil), // 19: NYql.NConnector.NApi.TSelect.TWhat + (*TSelect_TFrom)(nil), // 20: NYql.NConnector.NApi.TSelect.TFrom + (*TSelect_TWhere)(nil), // 21: NYql.NConnector.NApi.TSelect.TWhere + (*TSelect_TLimit)(nil), // 22: NYql.NConnector.NApi.TSelect.TLimit + (*TSelect_TWhat_TItem)(nil), // 23: NYql.NConnector.NApi.TSelect.TWhat.TItem + (*TReadSplitsResponse_TColumnSet)(nil), // 24: NYql.NConnector.NApi.TReadSplitsResponse.TColumnSet + (*TReadSplitsResponse_TColumnSet_TColumn)(nil), // 25: NYql.NConnector.NApi.TReadSplitsResponse.TColumnSet.TColumn + (*TFilter_TComparison)(nil), // 26: NYql.NConnector.NApi.TFilter.TComparison + (*TFilter_TConjunction)(nil), // 27: NYql.NConnector.NApi.TFilter.TConjunction + (*TFilter_TDisjunction)(nil), // 28: NYql.NConnector.NApi.TFilter.TDisjunction + (*TFilter_TNegation)(nil), // 29: NYql.NConnector.NApi.TFilter.TNegation + (*TFilter_TSubExpr)(nil), // 30: NYql.NConnector.NApi.TFilter.TSubExpr + (*TFilter_TComparison_TBinary)(nil), // 31: NYql.NConnector.NApi.TFilter.TComparison.TBinary + (*TFilter_TComparison_TIsNull)(nil), // 32: NYql.NConnector.NApi.TFilter.TComparison.TIsNull + (*TFilter_TComparison_TIsNotNull)(nil), // 33: NYql.NConnector.NApi.TFilter.TComparison.TIsNotNull + (*TFilter_TComparison_TIn)(nil), // 34: NYql.NConnector.NApi.TFilter.TComparison.TIn + (*TFilter_TComparison_TBetween)(nil), // 35: NYql.NConnector.NApi.TFilter.TComparison.TBetween + (*TAst_TList)(nil), // 36: NYql.NConnector.NApi.TAst.TList + (*common.TDataSourceInstance)(nil), // 37: NYql.NConnector.NApi.TDataSourceInstance + (*Ydb.Column)(nil), // 38: Ydb.Column + (Ydb.StatusIds_StatusCode)(0), // 39: Ydb.StatusIds.StatusCode + (*Ydb_Issue.IssueMessage)(nil), // 40: Ydb.Issue.IssueMessage + (*Ydb.Value)(nil), // 41: Ydb.Value + (*Ydb.TypedValue)(nil), // 42: Ydb.TypedValue +} +var file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_depIdxs = []int32{ + 37, // 0: NYql.NConnector.NApi.TListTablesRequest.data_source_instance:type_name -> NYql.NConnector.NApi.TDataSourceInstance + 17, // 1: NYql.NConnector.NApi.TListTablesResponse.error:type_name -> NYql.NConnector.NApi.TError + 37, // 2: NYql.NConnector.NApi.TDescribeTableRequest.data_source_instance:type_name -> NYql.NConnector.NApi.TDataSourceInstance + 7, // 3: NYql.NConnector.NApi.TDescribeTableResponse.schema:type_name -> NYql.NConnector.NApi.TSchema + 17, // 4: NYql.NConnector.NApi.TDescribeTableResponse.error:type_name -> NYql.NConnector.NApi.TError + 38, // 5: NYql.NConnector.NApi.TSchema.columns:type_name -> Ydb.Column + 37, // 6: NYql.NConnector.NApi.TListSplitsRequest.data_source_instance:type_name -> NYql.NConnector.NApi.TDataSourceInstance + 10, // 7: NYql.NConnector.NApi.TListSplitsRequest.selects:type_name -> NYql.NConnector.NApi.TSelect + 11, // 8: NYql.NConnector.NApi.TListSplitsResponse.splits:type_name -> NYql.NConnector.NApi.TSplit + 17, // 9: NYql.NConnector.NApi.TListSplitsResponse.error:type_name -> NYql.NConnector.NApi.TError + 37, // 10: NYql.NConnector.NApi.TSelect.data_source_instance:type_name -> NYql.NConnector.NApi.TDataSourceInstance + 19, // 11: NYql.NConnector.NApi.TSelect.what:type_name -> NYql.NConnector.NApi.TSelect.TWhat + 20, // 12: NYql.NConnector.NApi.TSelect.from:type_name -> NYql.NConnector.NApi.TSelect.TFrom + 21, // 13: NYql.NConnector.NApi.TSelect.where:type_name -> NYql.NConnector.NApi.TSelect.TWhere + 22, // 14: NYql.NConnector.NApi.TSelect.limit:type_name -> NYql.NConnector.NApi.TSelect.TLimit + 10, // 15: NYql.NConnector.NApi.TSplit.select:type_name -> NYql.NConnector.NApi.TSelect + 37, // 16: NYql.NConnector.NApi.TReadSplitsRequest.data_source_instance:type_name -> NYql.NConnector.NApi.TDataSourceInstance + 11, // 17: NYql.NConnector.NApi.TReadSplitsRequest.splits:type_name -> NYql.NConnector.NApi.TSplit + 0, // 18: NYql.NConnector.NApi.TReadSplitsRequest.mode:type_name -> NYql.NConnector.NApi.TReadSplitsRequest.EMode + 1, // 19: NYql.NConnector.NApi.TReadSplitsRequest.format:type_name -> NYql.NConnector.NApi.TReadSplitsRequest.EFormat + 14, // 20: NYql.NConnector.NApi.TReadSplitsRequest.pagination:type_name -> NYql.NConnector.NApi.TPagination + 15, // 21: NYql.NConnector.NApi.TReadSplitsRequest.continuation:type_name -> NYql.NConnector.NApi.TContinuation + 24, // 22: NYql.NConnector.NApi.TReadSplitsResponse.column_set:type_name -> NYql.NConnector.NApi.TReadSplitsResponse.TColumnSet + 15, // 23: NYql.NConnector.NApi.TReadSplitsResponse.continuation:type_name -> NYql.NConnector.NApi.TContinuation + 17, // 24: NYql.NConnector.NApi.TReadSplitsResponse.error:type_name -> NYql.NConnector.NApi.TError + 26, // 25: NYql.NConnector.NApi.TFilter.comparison:type_name -> NYql.NConnector.NApi.TFilter.TComparison + 27, // 26: NYql.NConnector.NApi.TFilter.conjunction:type_name -> NYql.NConnector.NApi.TFilter.TConjunction + 28, // 27: NYql.NConnector.NApi.TFilter.disjunction:type_name -> NYql.NConnector.NApi.TFilter.TDisjunction + 29, // 28: NYql.NConnector.NApi.TFilter.negation:type_name -> NYql.NConnector.NApi.TFilter.TNegation + 30, // 29: NYql.NConnector.NApi.TFilter.sub_expr:type_name -> NYql.NConnector.NApi.TFilter.TSubExpr + 39, // 30: NYql.NConnector.NApi.TError.status:type_name -> Ydb.StatusIds.StatusCode + 40, // 31: NYql.NConnector.NApi.TError.issues:type_name -> Ydb.Issue.IssueMessage + 36, // 32: NYql.NConnector.NApi.TAst.list:type_name -> NYql.NConnector.NApi.TAst.TList + 23, // 33: NYql.NConnector.NApi.TSelect.TWhat.items:type_name -> NYql.NConnector.NApi.TSelect.TWhat.TItem + 16, // 34: NYql.NConnector.NApi.TSelect.TWhere.filter_typed:type_name -> NYql.NConnector.NApi.TFilter + 18, // 35: NYql.NConnector.NApi.TSelect.TWhere.filter_raw:type_name -> NYql.NConnector.NApi.TAst + 38, // 36: NYql.NConnector.NApi.TSelect.TWhat.TItem.column:type_name -> Ydb.Column + 38, // 37: NYql.NConnector.NApi.TReadSplitsResponse.TColumnSet.meta:type_name -> Ydb.Column + 25, // 38: NYql.NConnector.NApi.TReadSplitsResponse.TColumnSet.data:type_name -> NYql.NConnector.NApi.TReadSplitsResponse.TColumnSet.TColumn + 41, // 39: NYql.NConnector.NApi.TReadSplitsResponse.TColumnSet.TColumn.data:type_name -> Ydb.Value + 31, // 40: NYql.NConnector.NApi.TFilter.TComparison.binary:type_name -> NYql.NConnector.NApi.TFilter.TComparison.TBinary + 32, // 41: NYql.NConnector.NApi.TFilter.TComparison.is_null:type_name -> NYql.NConnector.NApi.TFilter.TComparison.TIsNull + 33, // 42: NYql.NConnector.NApi.TFilter.TComparison.is_not_null:type_name -> NYql.NConnector.NApi.TFilter.TComparison.TIsNotNull + 34, // 43: NYql.NConnector.NApi.TFilter.TComparison.in:type_name -> NYql.NConnector.NApi.TFilter.TComparison.TIn + 35, // 44: NYql.NConnector.NApi.TFilter.TComparison.between:type_name -> NYql.NConnector.NApi.TFilter.TComparison.TBetween + 16, // 45: NYql.NConnector.NApi.TFilter.TConjunction.operands:type_name -> NYql.NConnector.NApi.TFilter + 16, // 46: NYql.NConnector.NApi.TFilter.TDisjunction.operands:type_name -> NYql.NConnector.NApi.TFilter + 16, // 47: NYql.NConnector.NApi.TFilter.TNegation.operand:type_name -> NYql.NConnector.NApi.TFilter + 16, // 48: NYql.NConnector.NApi.TFilter.TSubExpr.operand:type_name -> NYql.NConnector.NApi.TFilter + 42, // 49: NYql.NConnector.NApi.TFilter.TComparison.TBinary.typed_value:type_name -> Ydb.TypedValue + 42, // 50: NYql.NConnector.NApi.TFilter.TComparison.TIn.values:type_name -> Ydb.TypedValue + 42, // 51: NYql.NConnector.NApi.TFilter.TComparison.TBetween.least:type_name -> Ydb.TypedValue + 42, // 52: NYql.NConnector.NApi.TFilter.TComparison.TBetween.greatest:type_name -> Ydb.TypedValue + 18, // 53: NYql.NConnector.NApi.TAst.TList.children:type_name -> NYql.NConnector.NApi.TAst + 54, // [54:54] is the sub-list for method output_type + 54, // [54:54] is the sub-list for method input_type + 54, // [54:54] is the sub-list for extension type_name + 54, // [54:54] is the sub-list for extension extendee + 0, // [0:54] is the sub-list for field type_name +} + +func init() { + file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_init() +} +func file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_init() { + if File_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TListTablesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TListTablesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TDescribeTableRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TDescribeTableResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TSchema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TListSplitsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TListSplitsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TSelect); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TSplit); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TReadSplitsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TReadSplitsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TPagination); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TContinuation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TFilter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TError); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TAst); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TSelect_TWhat); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TSelect_TFrom); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TSelect_TWhere); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TSelect_TLimit); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TSelect_TWhat_TItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TReadSplitsResponse_TColumnSet); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TReadSplitsResponse_TColumnSet_TColumn); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TFilter_TComparison); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TFilter_TConjunction); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TFilter_TDisjunction); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TFilter_TNegation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TFilter_TSubExpr); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TFilter_TComparison_TBinary); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TFilter_TComparison_TIsNull); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TFilter_TComparison_TIsNotNull); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TFilter_TComparison_TIn); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TFilter_TComparison_TBetween); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TAst_TList); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*TListTablesRequest_Pattern)(nil), + } + file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[8].OneofWrappers = []interface{}{ + (*TSplit_Description)(nil), + } + file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[10].OneofWrappers = []interface{}{ + (*TReadSplitsResponse_ColumnSet)(nil), + (*TReadSplitsResponse_ArrowIpcStreaming)(nil), + } + file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[12].OneofWrappers = []interface{}{ + (*TContinuation_Description)(nil), + } + file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[13].OneofWrappers = []interface{}{ + (*TFilter_Comparison)(nil), + (*TFilter_Conjunction)(nil), + (*TFilter_Disjunction)(nil), + (*TFilter_Negation)(nil), + (*TFilter_SubExpr)(nil), + } + file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[15].OneofWrappers = []interface{}{ + (*TAst_Atom)(nil), + (*TAst_List)(nil), + } + file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[20].OneofWrappers = []interface{}{ + (*TSelect_TWhat_TItem_Column)(nil), + } + file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[23].OneofWrappers = []interface{}{ + (*TFilter_TComparison_Binary)(nil), + (*TFilter_TComparison_IsNull)(nil), + (*TFilter_TComparison_IsNotNull)(nil), + (*TFilter_TComparison_In)(nil), + (*TFilter_TComparison_Between)(nil), + } + file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes[28].OneofWrappers = []interface{}{ + (*TFilter_TComparison_TBinary_TypedValue)(nil), + (*TFilter_TComparison_TBinary_Column)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_rawDesc, + NumEnums: 3, + NumMessages: 34, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_goTypes, + DependencyIndexes: file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_depIdxs, + EnumInfos: file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_enumTypes, + MessageInfos: file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_msgTypes, + }.Build() + File_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto = out.File + file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_rawDesc = nil + file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_goTypes = nil + file_ydb_library_yql_providers_generic_connector_api_service_protos_connector_proto_depIdxs = nil +} diff --git a/ydb/library/yql/providers/generic/connector/libgo/service/protos/ya.make b/ydb/library/yql/providers/generic/connector/libgo/service/protos/ya.make new file mode 100644 index 0000000000..a34aeebc4a --- /dev/null +++ b/ydb/library/yql/providers/generic/connector/libgo/service/protos/ya.make @@ -0,0 +1,5 @@ +GO_LIBRARY() + +SRCS(connector.pb.go) + +END() diff --git a/ydb/library/yql/providers/generic/connector/libgo/service/ya.make b/ydb/library/yql/providers/generic/connector/libgo/service/ya.make new file mode 100644 index 0000000000..741dc878f3 --- /dev/null +++ b/ydb/library/yql/providers/generic/connector/libgo/service/ya.make @@ -0,0 +1,14 @@ +GO_LIBRARY() + +PEERDIR(ydb/library/yql/providers/generic/connector/libgo/service/protos) + +SRCS( + connector.pb.go + connector_grpc.pb.go +) + +END() + +RECURSE( + protos +) diff --git a/ydb/library/yql/providers/generic/connector/libgo/ya.make b/ydb/library/yql/providers/generic/connector/libgo/ya.make new file mode 100644 index 0000000000..66f1c16bea --- /dev/null +++ b/ydb/library/yql/providers/generic/connector/libgo/ya.make @@ -0,0 +1 @@ +RECURSE(service) diff --git a/ydb/library/yql/providers/generic/connector/ya.make b/ydb/library/yql/providers/generic/connector/ya.make index 4c3781fd91..c9bda17c11 100644 --- a/ydb/library/yql/providers/generic/connector/ya.make +++ b/ydb/library/yql/providers/generic/connector/ya.make @@ -1,4 +1,6 @@ RECURSE( api + app libcpp + libgo ) |