aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorhcpp <hcpp@ydb.tech>2023-11-09 20:47:31 +0300
committerhcpp <hcpp@ydb.tech>2023-11-09 21:11:21 +0300
commitb7716e9978a4d1c2e548b9d53836a7d6894a8a38 (patch)
tree4ecf0ea759c7d44ed9749dc5d7beeaffc6376aac
parentea9cef2dc79047c295a260f96895628b0feed43f (diff)
downloadydb-b7716e9978a4d1c2e548b9d53836a7d6894a8a38.tar.gz
YQ Connector:metrics (one more time)
custom httppuller has been added Revert "Revert "metrics have been added"" This reverts commit e2a874f25a443edf946bab9a7f077239ba569ab0, reversing changes made to 2dbbc3a1a033dd09ad29f0c168d8ea7fef97309e.
-rw-r--r--library/go/core/metrics/buckets.go147
-rw-r--r--library/go/core/metrics/buckets_test.go183
-rw-r--r--library/go/core/metrics/collect/collect.go9
-rw-r--r--library/go/core/metrics/collect/policy/inflight/inflight.go78
-rw-r--r--library/go/core/metrics/collect/policy/inflight/inflight_opts.go11
-rw-r--r--library/go/core/metrics/collect/policy/inflight/ya.make8
-rw-r--r--library/go/core/metrics/collect/policy/ya.make1
-rw-r--r--library/go/core/metrics/collect/system.go229
-rw-r--r--library/go/core/metrics/collect/ya.make10
-rw-r--r--library/go/core/metrics/gotest/ya.make3
-rw-r--r--library/go/core/metrics/internal/pkg/metricsutil/buckets.go27
-rw-r--r--library/go/core/metrics/internal/pkg/metricsutil/ya.make5
-rw-r--r--library/go/core/metrics/internal/pkg/registryutil/gotest/ya.make3
-rw-r--r--library/go/core/metrics/internal/pkg/registryutil/registryutil.go104
-rw-r--r--library/go/core/metrics/internal/pkg/registryutil/registryutil_test.go48
-rw-r--r--library/go/core/metrics/internal/pkg/registryutil/ya.make9
-rw-r--r--library/go/core/metrics/internal/pkg/ya.make4
-rw-r--r--library/go/core/metrics/internal/ya.make1
-rw-r--r--library/go/core/metrics/metrics.go163
-rw-r--r--library/go/core/metrics/mock/counter.go35
-rw-r--r--library/go/core/metrics/mock/gauge.go33
-rw-r--r--library/go/core/metrics/mock/histogram.go40
-rw-r--r--library/go/core/metrics/mock/int_gauge.go33
-rw-r--r--library/go/core/metrics/mock/registry.go224
-rw-r--r--library/go/core/metrics/mock/registry_opts.go52
-rw-r--r--library/go/core/metrics/mock/timer.go21
-rw-r--r--library/go/core/metrics/mock/vec.go256
-rw-r--r--library/go/core/metrics/mock/ya.make14
-rw-r--r--library/go/core/metrics/nop/counter.go31
-rw-r--r--library/go/core/metrics/nop/gauge.go31
-rw-r--r--library/go/core/metrics/nop/histogram.go38
-rw-r--r--library/go/core/metrics/nop/int_gauge.go31
-rw-r--r--library/go/core/metrics/nop/registry.go79
-rw-r--r--library/go/core/metrics/nop/timer.go23
-rw-r--r--library/go/core/metrics/nop/ya.make12
-rw-r--r--library/go/core/metrics/prometheus/counter.go34
-rw-r--r--library/go/core/metrics/prometheus/counter_test.go38
-rw-r--r--library/go/core/metrics/prometheus/gauge.go32
-rw-r--r--library/go/core/metrics/prometheus/gauge_test.go39
-rw-r--r--library/go/core/metrics/prometheus/gotest/ya.make3
-rw-r--r--library/go/core/metrics/prometheus/histogram.go22
-rw-r--r--library/go/core/metrics/prometheus/histogram_test.go91
-rw-r--r--library/go/core/metrics/prometheus/int_gauge.go32
-rw-r--r--library/go/core/metrics/prometheus/registry.go254
-rw-r--r--library/go/core/metrics/prometheus/registry_opts.go84
-rw-r--r--library/go/core/metrics/prometheus/registry_test.go481
-rw-r--r--library/go/core/metrics/prometheus/timer.go19
-rw-r--r--library/go/core/metrics/prometheus/timer_test.go24
-rw-r--r--library/go/core/metrics/prometheus/vec.go248
-rw-r--r--library/go/core/metrics/prometheus/vec_test.go137
-rw-r--r--library/go/core/metrics/prometheus/ya.make25
-rw-r--r--library/go/core/metrics/solomon/converter.go114
-rw-r--r--library/go/core/metrics/solomon/converter_test.go200
-rw-r--r--library/go/core/metrics/solomon/counter.go97
-rw-r--r--library/go/core/metrics/solomon/counter_test.go90
-rw-r--r--library/go/core/metrics/solomon/func_counter.go86
-rw-r--r--library/go/core/metrics/solomon/func_counter_test.go82
-rw-r--r--library/go/core/metrics/solomon/func_gauge.go87
-rw-r--r--library/go/core/metrics/solomon/func_gauge_test.go64
-rw-r--r--library/go/core/metrics/solomon/func_int_gauge.go87
-rw-r--r--library/go/core/metrics/solomon/func_int_gauge_test.go64
-rw-r--r--library/go/core/metrics/solomon/gauge.go115
-rw-r--r--library/go/core/metrics/solomon/gauge_test.go75
-rw-r--r--library/go/core/metrics/solomon/gotest/ya.make3
-rw-r--r--library/go/core/metrics/solomon/histogram.go182
-rw-r--r--library/go/core/metrics/solomon/histogram_test.go153
-rw-r--r--library/go/core/metrics/solomon/int_gauge.go115
-rw-r--r--library/go/core/metrics/solomon/int_gauge_test.go75
-rw-r--r--library/go/core/metrics/solomon/metrics.go187
-rw-r--r--library/go/core/metrics/solomon/metrics_opts.go29
-rw-r--r--library/go/core/metrics/solomon/metrics_test.go296
-rw-r--r--library/go/core/metrics/solomon/race_test.go150
-rw-r--r--library/go/core/metrics/solomon/registry.go256
-rw-r--r--library/go/core/metrics/solomon/registry_opts.go87
-rw-r--r--library/go/core/metrics/solomon/registry_test.go168
-rw-r--r--library/go/core/metrics/solomon/spack.go387
-rw-r--r--library/go/core/metrics/solomon/spack_compression.go162
-rw-r--r--library/go/core/metrics/solomon/spack_compression_test.go26
-rw-r--r--library/go/core/metrics/solomon/spack_test.go184
-rw-r--r--library/go/core/metrics/solomon/stream.go89
-rw-r--r--library/go/core/metrics/solomon/stream_test.go595
-rw-r--r--library/go/core/metrics/solomon/timer.go91
-rw-r--r--library/go/core/metrics/solomon/timer_test.go56
-rw-r--r--library/go/core/metrics/solomon/vec.go279
-rw-r--r--library/go/core/metrics/solomon/vec_test.go339
-rw-r--r--library/go/core/metrics/solomon/ya.make44
-rw-r--r--library/go/core/metrics/ya.make20
-rw-r--r--library/go/httputil/headers/accept.go259
-rw-r--r--library/go/httputil/headers/accept_test.go309
-rw-r--r--library/go/httputil/headers/authorization.go31
-rw-r--r--library/go/httputil/headers/authorization_test.go30
-rw-r--r--library/go/httputil/headers/content.go57
-rw-r--r--library/go/httputil/headers/content_test.go41
-rw-r--r--library/go/httputil/headers/cookie.go5
-rw-r--r--library/go/httputil/headers/gotest/ya.make3
-rw-r--r--library/go/httputil/headers/tvm.go8
-rw-r--r--library/go/httputil/headers/user_agent.go5
-rw-r--r--library/go/httputil/headers/warning.go167
-rw-r--r--library/go/httputil/headers/warning_test.go245
-rw-r--r--library/go/httputil/headers/ya.make23
-rw-r--r--library/go/ptr/ptr.go75
-rw-r--r--library/go/ptr/ya.make5
-rw-r--r--library/go/test/assertpb/assert.go35
-rw-r--r--library/go/test/assertpb/ya.make9
-rw-r--r--library/go/x/xsync/singleinflight.go36
-rw-r--r--library/go/x/xsync/ya.make9
-rw-r--r--ydb/library/yql/providers/generic/connector/app/config/server.pb.go219
-rw-r--r--ydb/library/yql/providers/generic/connector/app/config/server.proto11
-rw-r--r--ydb/library/yql/providers/generic/connector/app/server/grpc_metrics.go162
-rw-r--r--ydb/library/yql/providers/generic/connector/app/server/httppuller.go126
-rw-r--r--ydb/library/yql/providers/generic/connector/app/server/launcher.go15
-rw-r--r--ydb/library/yql/providers/generic/connector/app/server/service_connector.go12
-rw-r--r--ydb/library/yql/providers/generic/connector/app/server/service_metrics.go57
-rw-r--r--ydb/library/yql/providers/generic/connector/app/server/ya.make3
114 files changed, 10586 insertions, 64 deletions
diff --git a/library/go/core/metrics/buckets.go b/library/go/core/metrics/buckets.go
new file mode 100644
index 0000000000..063c0c4418
--- /dev/null
+++ b/library/go/core/metrics/buckets.go
@@ -0,0 +1,147 @@
+package metrics
+
+import (
+ "sort"
+ "time"
+)
+
+var (
+ _ DurationBuckets = (*durationBuckets)(nil)
+ _ Buckets = (*buckets)(nil)
+)
+
+const (
+ errBucketsCountNeedsGreaterThanZero = "n needs to be > 0"
+ errBucketsStartNeedsGreaterThanZero = "start needs to be > 0"
+ errBucketsFactorNeedsGreaterThanOne = "factor needs to be > 1"
+)
+
+type durationBuckets struct {
+ buckets []time.Duration
+}
+
+// NewDurationBuckets returns new DurationBuckets implementation.
+func NewDurationBuckets(bk ...time.Duration) DurationBuckets {
+ sort.Slice(bk, func(i, j int) bool {
+ return bk[i] < bk[j]
+ })
+ return durationBuckets{buckets: bk}
+}
+
+func (d durationBuckets) Size() int {
+ return len(d.buckets)
+}
+
+func (d durationBuckets) MapDuration(dv time.Duration) (idx int) {
+ for _, bound := range d.buckets {
+ if dv < bound {
+ break
+ }
+ idx++
+ }
+ return
+}
+
+func (d durationBuckets) UpperBound(idx int) time.Duration {
+ if idx > d.Size()-1 {
+ panic("idx is out of bounds")
+ }
+ return d.buckets[idx]
+}
+
+type buckets struct {
+ buckets []float64
+}
+
+// NewBuckets returns new Buckets implementation.
+func NewBuckets(bk ...float64) Buckets {
+ sort.Slice(bk, func(i, j int) bool {
+ return bk[i] < bk[j]
+ })
+ return buckets{buckets: bk}
+}
+
+func (d buckets) Size() int {
+ return len(d.buckets)
+}
+
+func (d buckets) MapValue(v float64) (idx int) {
+ for _, bound := range d.buckets {
+ if v < bound {
+ break
+ }
+ idx++
+ }
+ return
+}
+
+func (d buckets) UpperBound(idx int) float64 {
+ if idx > d.Size()-1 {
+ panic("idx is out of bounds")
+ }
+ return d.buckets[idx]
+}
+
+// MakeLinearBuckets creates a set of linear value buckets.
+func MakeLinearBuckets(start, width float64, n int) Buckets {
+ if n <= 0 {
+ panic(errBucketsCountNeedsGreaterThanZero)
+ }
+ bounds := make([]float64, n)
+ for i := range bounds {
+ bounds[i] = start + (float64(i) * width)
+ }
+ return NewBuckets(bounds...)
+}
+
+// MakeLinearDurationBuckets creates a set of linear duration buckets.
+func MakeLinearDurationBuckets(start, width time.Duration, n int) DurationBuckets {
+ if n <= 0 {
+ panic(errBucketsCountNeedsGreaterThanZero)
+ }
+ buckets := make([]time.Duration, n)
+ for i := range buckets {
+ buckets[i] = start + (time.Duration(i) * width)
+ }
+ return NewDurationBuckets(buckets...)
+}
+
+// MakeExponentialBuckets creates a set of exponential value buckets.
+func MakeExponentialBuckets(start, factor float64, n int) Buckets {
+ if n <= 0 {
+ panic(errBucketsCountNeedsGreaterThanZero)
+ }
+ if start <= 0 {
+ panic(errBucketsStartNeedsGreaterThanZero)
+ }
+ if factor <= 1 {
+ panic(errBucketsFactorNeedsGreaterThanOne)
+ }
+ buckets := make([]float64, n)
+ curr := start
+ for i := range buckets {
+ buckets[i] = curr
+ curr *= factor
+ }
+ return NewBuckets(buckets...)
+}
+
+// MakeExponentialDurationBuckets creates a set of exponential duration buckets.
+func MakeExponentialDurationBuckets(start time.Duration, factor float64, n int) DurationBuckets {
+ if n <= 0 {
+ panic(errBucketsCountNeedsGreaterThanZero)
+ }
+ if start <= 0 {
+ panic(errBucketsStartNeedsGreaterThanZero)
+ }
+ if factor <= 1 {
+ panic(errBucketsFactorNeedsGreaterThanOne)
+ }
+ buckets := make([]time.Duration, n)
+ curr := start
+ for i := range buckets {
+ buckets[i] = curr
+ curr = time.Duration(float64(curr) * factor)
+ }
+ return NewDurationBuckets(buckets...)
+}
diff --git a/library/go/core/metrics/buckets_test.go b/library/go/core/metrics/buckets_test.go
new file mode 100644
index 0000000000..70cb6398c2
--- /dev/null
+++ b/library/go/core/metrics/buckets_test.go
@@ -0,0 +1,183 @@
+package metrics
+
+import (
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestNewDurationBuckets(t *testing.T) {
+ buckets := []time.Duration{
+ 1 * time.Second,
+ 3 * time.Second,
+ 5 * time.Second,
+ }
+ bk := NewDurationBuckets(buckets...)
+
+ expect := durationBuckets{
+ buckets: []time.Duration{
+ 1 * time.Second,
+ 3 * time.Second,
+ 5 * time.Second,
+ },
+ }
+ assert.Equal(t, expect, bk)
+}
+
+func Test_durationBuckets_MapDuration(t *testing.T) {
+ bk := NewDurationBuckets([]time.Duration{
+ 1 * time.Second,
+ 3 * time.Second,
+ 5 * time.Second,
+ }...)
+
+ for i := 0; i <= bk.Size(); i++ {
+ assert.Equal(t, i, bk.MapDuration(time.Duration(i*2)*time.Second))
+ }
+}
+
+func Test_durationBuckets_Size(t *testing.T) {
+ var buckets []time.Duration
+ for i := 1; i < 3; i++ {
+ buckets = append(buckets, time.Duration(i)*time.Second)
+ bk := NewDurationBuckets(buckets...)
+ assert.Equal(t, i, bk.Size())
+ }
+}
+
+func Test_durationBuckets_UpperBound(t *testing.T) {
+ bk := NewDurationBuckets([]time.Duration{
+ 1 * time.Second,
+ 2 * time.Second,
+ 3 * time.Second,
+ }...)
+
+ assert.Panics(t, func() { bk.UpperBound(999) })
+
+ for i := 0; i < bk.Size()-1; i++ {
+ assert.Equal(t, time.Duration(i+1)*time.Second, bk.UpperBound(i))
+ }
+}
+
+func TestNewBuckets(t *testing.T) {
+ bk := NewBuckets(1, 3, 5)
+
+ expect := buckets{
+ buckets: []float64{1, 3, 5},
+ }
+ assert.Equal(t, expect, bk)
+}
+
+func Test_buckets_MapValue(t *testing.T) {
+ bk := NewBuckets(1, 3, 5)
+
+ for i := 0; i <= bk.Size(); i++ {
+ assert.Equal(t, i, bk.MapValue(float64(i*2)))
+ }
+}
+
+func Test_buckets_Size(t *testing.T) {
+ var buckets []float64
+ for i := 1; i < 3; i++ {
+ buckets = append(buckets, float64(i))
+ bk := NewBuckets(buckets...)
+ assert.Equal(t, i, bk.Size())
+ }
+}
+
+func Test_buckets_UpperBound(t *testing.T) {
+ bk := NewBuckets(1, 2, 3)
+
+ assert.Panics(t, func() { bk.UpperBound(999) })
+
+ for i := 0; i < bk.Size()-1; i++ {
+ assert.Equal(t, float64(i+1), bk.UpperBound(i))
+ }
+}
+
+func TestMakeLinearBuckets_CorrectParameters_NotPanics(t *testing.T) {
+ assert.NotPanics(t, func() {
+ assert.Equal(t,
+ NewBuckets(0.0, 1.0, 2.0),
+ MakeLinearBuckets(0, 1, 3),
+ )
+ })
+}
+
+func TestMakeLinearBucketsPanicsOnBadCount(t *testing.T) {
+ assert.Panics(t, func() {
+ MakeLinearBuckets(0, 1, 0)
+ })
+}
+
+func TestMakeLinearDurationBuckets(t *testing.T) {
+ assert.NotPanics(t, func() {
+ assert.Equal(t,
+ NewDurationBuckets(0, time.Second, 2*time.Second),
+ MakeLinearDurationBuckets(0*time.Second, 1*time.Second, 3),
+ )
+ })
+}
+
+func TestMakeLinearDurationBucketsPanicsOnBadCount(t *testing.T) {
+ assert.Panics(t, func() {
+ MakeLinearDurationBuckets(0*time.Second, 1*time.Second, 0)
+ })
+}
+
+func TestMakeExponentialBuckets(t *testing.T) {
+ assert.NotPanics(t, func() {
+ assert.Equal(
+ t,
+ NewBuckets(2, 4, 8),
+ MakeExponentialBuckets(2, 2, 3),
+ )
+ })
+}
+
+func TestMakeExponentialBucketsPanicsOnBadCount(t *testing.T) {
+ assert.Panics(t, func() {
+ MakeExponentialBuckets(2, 2, 0)
+ })
+}
+
+func TestMakeExponentialBucketsPanicsOnBadStart(t *testing.T) {
+ assert.Panics(t, func() {
+ MakeExponentialBuckets(0, 2, 2)
+ })
+}
+
+func TestMakeExponentialBucketsPanicsOnBadFactor(t *testing.T) {
+ assert.Panics(t, func() {
+ MakeExponentialBuckets(2, 1, 2)
+ })
+}
+
+func TestMakeExponentialDurationBuckets(t *testing.T) {
+ assert.NotPanics(t, func() {
+ assert.Equal(
+ t,
+ NewDurationBuckets(2*time.Second, 4*time.Second, 8*time.Second),
+ MakeExponentialDurationBuckets(2*time.Second, 2, 3),
+ )
+ })
+}
+
+func TestMakeExponentialDurationBucketsPanicsOnBadCount(t *testing.T) {
+ assert.Panics(t, func() {
+ MakeExponentialDurationBuckets(2*time.Second, 2, 0)
+ })
+}
+
+func TestMakeExponentialDurationBucketsPanicsOnBadStart(t *testing.T) {
+ assert.Panics(t, func() {
+ MakeExponentialDurationBuckets(0, 2, 2)
+ })
+}
+
+func TestMakeExponentialDurationBucketsPanicsOnBadFactor(t *testing.T) {
+ assert.Panics(t, func() {
+ MakeExponentialDurationBuckets(2*time.Second, 1, 2)
+ })
+}
diff --git a/library/go/core/metrics/collect/collect.go b/library/go/core/metrics/collect/collect.go
new file mode 100644
index 0000000000..492a2f74a5
--- /dev/null
+++ b/library/go/core/metrics/collect/collect.go
@@ -0,0 +1,9 @@
+package collect
+
+import (
+ "context"
+
+ "github.com/ydb-platform/ydb/library/go/core/metrics"
+)
+
+type Func func(ctx context.Context, r metrics.Registry, c metrics.CollectPolicy)
diff --git a/library/go/core/metrics/collect/policy/inflight/inflight.go b/library/go/core/metrics/collect/policy/inflight/inflight.go
new file mode 100644
index 0000000000..bc045fe188
--- /dev/null
+++ b/library/go/core/metrics/collect/policy/inflight/inflight.go
@@ -0,0 +1,78 @@
+package inflight
+
+import (
+ "context"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/ydb-platform/ydb/library/go/core/metrics"
+ "github.com/ydb-platform/ydb/library/go/x/xsync"
+)
+
+var _ metrics.CollectPolicy = (*inflightPolicy)(nil)
+
+type inflightPolicy struct {
+ addCollectLock sync.Mutex
+ collect atomic.Value // func(ctx context.Context)
+
+ minUpdateInterval time.Duration
+ lastUpdate time.Time
+
+ inflight xsync.SingleInflight
+}
+
+func NewCollectorPolicy(opts ...Option) metrics.CollectPolicy {
+ c := &inflightPolicy{
+ minUpdateInterval: time.Second,
+ inflight: xsync.NewSingleInflight(),
+ }
+ c.collect.Store(func(context.Context) {})
+
+ for _, opt := range opts {
+ opt(c)
+ }
+
+ return c
+}
+
+func (i *inflightPolicy) RegisteredCounter(counterFunc func() int64) func() int64 {
+ return func() int64 {
+ i.tryInflightUpdate()
+ return counterFunc()
+ }
+}
+
+func (i *inflightPolicy) RegisteredGauge(gaugeFunc func() float64) func() float64 {
+ return func() float64 {
+ i.tryInflightUpdate()
+ return gaugeFunc()
+ }
+}
+
+func (i *inflightPolicy) AddCollect(collect func(context.Context)) {
+ oldCollect := i.getCollect()
+ i.setCollect(func(ctx context.Context) {
+ oldCollect(ctx)
+ collect(ctx)
+ })
+}
+
+func (i *inflightPolicy) tryInflightUpdate() {
+ i.inflight.Do(func() {
+ if time.Since(i.lastUpdate) < i.minUpdateInterval {
+ return
+ }
+
+ i.getCollect()(context.Background())
+ i.lastUpdate = time.Now()
+ })
+}
+
+func (i *inflightPolicy) getCollect() func(context.Context) {
+ return i.collect.Load().(func(context.Context))
+}
+
+func (i *inflightPolicy) setCollect(collect func(context.Context)) {
+ i.collect.Store(collect)
+}
diff --git a/library/go/core/metrics/collect/policy/inflight/inflight_opts.go b/library/go/core/metrics/collect/policy/inflight/inflight_opts.go
new file mode 100644
index 0000000000..cc277b0c71
--- /dev/null
+++ b/library/go/core/metrics/collect/policy/inflight/inflight_opts.go
@@ -0,0 +1,11 @@
+package inflight
+
+import "time"
+
+type Option func(*inflightPolicy)
+
+func WithMinCollectInterval(interval time.Duration) Option {
+ return func(c *inflightPolicy) {
+ c.minUpdateInterval = interval
+ }
+}
diff --git a/library/go/core/metrics/collect/policy/inflight/ya.make b/library/go/core/metrics/collect/policy/inflight/ya.make
new file mode 100644
index 0000000000..6101e04049
--- /dev/null
+++ b/library/go/core/metrics/collect/policy/inflight/ya.make
@@ -0,0 +1,8 @@
+GO_LIBRARY()
+
+SRCS(
+ inflight.go
+ inflight_opts.go
+)
+
+END()
diff --git a/library/go/core/metrics/collect/policy/ya.make b/library/go/core/metrics/collect/policy/ya.make
new file mode 100644
index 0000000000..2717ef9863
--- /dev/null
+++ b/library/go/core/metrics/collect/policy/ya.make
@@ -0,0 +1 @@
+RECURSE(inflight)
diff --git a/library/go/core/metrics/collect/system.go b/library/go/core/metrics/collect/system.go
new file mode 100644
index 0000000000..a21e91d632
--- /dev/null
+++ b/library/go/core/metrics/collect/system.go
@@ -0,0 +1,229 @@
+// dashboard generator for these metrics can be found at: github.com/ydb-platform/ydb/arcadia/library/go/yandex/monitoring-dashboards
+package collect
+
+import (
+ "context"
+ "os"
+ "runtime"
+ "runtime/debug"
+ "time"
+
+ "github.com/prometheus/procfs"
+ "github.com/ydb-platform/ydb/library/go/core/buildinfo"
+ "github.com/ydb-platform/ydb/library/go/core/metrics"
+)
+
+var _ Func = GoMetrics
+
+func GoMetrics(_ context.Context, r metrics.Registry, c metrics.CollectPolicy) {
+ if r == nil {
+ return
+ }
+ r = r.WithPrefix("go")
+
+ var stats debug.GCStats
+ stats.PauseQuantiles = make([]time.Duration, 5) // Minimum, 25%, 50%, 75%, and maximum pause times.
+ var numGoroutine, numThread int
+ var ms runtime.MemStats
+
+ c.AddCollect(func(context.Context) {
+ debug.ReadGCStats(&stats)
+ runtime.ReadMemStats(&ms)
+
+ numThread, _ = runtime.ThreadCreateProfile(nil)
+ numGoroutine = runtime.NumGoroutine()
+ })
+
+ gcRegistry := r.WithPrefix("gc")
+ gcRegistry.FuncCounter("num", c.RegisteredCounter(func() int64 {
+ return stats.NumGC
+ }))
+ gcRegistry.FuncCounter(r.ComposeName("pause", "total", "ns"), c.RegisteredCounter(func() int64 {
+ return stats.PauseTotal.Nanoseconds()
+ }))
+ gcRegistry.FuncGauge(r.ComposeName("pause", "quantile", "min"), c.RegisteredGauge(func() float64 {
+ return stats.PauseQuantiles[0].Seconds()
+ }))
+ gcRegistry.FuncGauge(r.ComposeName("pause", "quantile", "25"), c.RegisteredGauge(func() float64 {
+ return stats.PauseQuantiles[1].Seconds()
+ }))
+ gcRegistry.FuncGauge(r.ComposeName("pause", "quantile", "50"), c.RegisteredGauge(func() float64 {
+ return stats.PauseQuantiles[2].Seconds()
+ }))
+ gcRegistry.FuncGauge(r.ComposeName("pause", "quantile", "75"), c.RegisteredGauge(func() float64 {
+ return stats.PauseQuantiles[3].Seconds()
+ }))
+ gcRegistry.FuncGauge(r.ComposeName("pause", "quantile", "max"), c.RegisteredGauge(func() float64 {
+ return stats.PauseQuantiles[4].Seconds()
+ }))
+ gcRegistry.FuncGauge(r.ComposeName("last", "ts"), c.RegisteredGauge(func() float64 {
+ return float64(ms.LastGC)
+ }))
+ gcRegistry.FuncCounter(r.ComposeName("forced", "num"), c.RegisteredCounter(func() int64 {
+ return int64(ms.NumForcedGC)
+ }))
+
+ r.FuncGauge(r.ComposeName("goroutine", "num"), c.RegisteredGauge(func() float64 {
+ return float64(numGoroutine)
+ }))
+ r.FuncGauge(r.ComposeName("thread", "num"), c.RegisteredGauge(func() float64 {
+ return float64(numThread)
+ }))
+
+ memRegistry := r.WithPrefix("mem")
+ memRegistry.FuncCounter(r.ComposeName("alloc", "total"), c.RegisteredCounter(func() int64 {
+ return int64(ms.TotalAlloc)
+ }))
+ memRegistry.FuncGauge("sys", c.RegisteredGauge(func() float64 {
+ return float64(ms.Sys)
+ }))
+ memRegistry.FuncCounter("lookups", c.RegisteredCounter(func() int64 {
+ return int64(ms.Lookups)
+ }))
+ memRegistry.FuncCounter("mallocs", c.RegisteredCounter(func() int64 {
+ return int64(ms.Mallocs)
+ }))
+ memRegistry.FuncCounter("frees", c.RegisteredCounter(func() int64 {
+ return int64(ms.Frees)
+ }))
+ memRegistry.FuncGauge(r.ComposeName("heap", "alloc"), c.RegisteredGauge(func() float64 {
+ return float64(ms.HeapAlloc)
+ }))
+ memRegistry.FuncGauge(r.ComposeName("heap", "sys"), c.RegisteredGauge(func() float64 {
+ return float64(ms.HeapSys)
+ }))
+ memRegistry.FuncGauge(r.ComposeName("heap", "idle"), c.RegisteredGauge(func() float64 {
+ return float64(ms.HeapIdle)
+ }))
+ memRegistry.FuncGauge(r.ComposeName("heap", "inuse"), c.RegisteredGauge(func() float64 {
+ return float64(ms.HeapInuse)
+ }))
+ memRegistry.FuncGauge(r.ComposeName("heap", "released"), c.RegisteredGauge(func() float64 {
+ return float64(ms.HeapReleased)
+ }))
+ memRegistry.FuncGauge(r.ComposeName("heap", "objects"), c.RegisteredGauge(func() float64 {
+ return float64(ms.HeapObjects)
+ }))
+
+ memRegistry.FuncGauge(r.ComposeName("stack", "inuse"), c.RegisteredGauge(func() float64 {
+ return float64(ms.StackInuse)
+ }))
+ memRegistry.FuncGauge(r.ComposeName("stack", "sys"), c.RegisteredGauge(func() float64 {
+ return float64(ms.StackSys)
+ }))
+
+ memRegistry.FuncGauge(r.ComposeName("span", "inuse"), c.RegisteredGauge(func() float64 {
+ return float64(ms.MSpanInuse)
+ }))
+ memRegistry.FuncGauge(r.ComposeName("span", "sys"), c.RegisteredGauge(func() float64 {
+ return float64(ms.MSpanSys)
+ }))
+
+ memRegistry.FuncGauge(r.ComposeName("cache", "inuse"), c.RegisteredGauge(func() float64 {
+ return float64(ms.MCacheInuse)
+ }))
+ memRegistry.FuncGauge(r.ComposeName("cache", "sys"), c.RegisteredGauge(func() float64 {
+ return float64(ms.MCacheSys)
+ }))
+
+ memRegistry.FuncGauge(r.ComposeName("buck", "hash", "sys"), c.RegisteredGauge(func() float64 {
+ return float64(ms.BuckHashSys)
+ }))
+ memRegistry.FuncGauge(r.ComposeName("gc", "sys"), c.RegisteredGauge(func() float64 {
+ return float64(ms.GCSys)
+ }))
+ memRegistry.FuncGauge(r.ComposeName("other", "sys"), c.RegisteredGauge(func() float64 {
+ return float64(ms.OtherSys)
+ }))
+ memRegistry.FuncGauge(r.ComposeName("gc", "next"), c.RegisteredGauge(func() float64 {
+ return float64(ms.NextGC)
+ }))
+
+ memRegistry.FuncGauge(r.ComposeName("gc", "cpu", "fraction"), c.RegisteredGauge(func() float64 {
+ return ms.GCCPUFraction
+ }))
+}
+
+var _ Func = ProcessMetrics
+
+func ProcessMetrics(_ context.Context, r metrics.Registry, c metrics.CollectPolicy) {
+ if r == nil {
+ return
+ }
+ buildVersion := buildinfo.Info.ArcadiaSourceRevision
+ r.WithTags(map[string]string{"revision": buildVersion}).Gauge("build").Set(1.0)
+
+ pid := os.Getpid()
+ proc, err := procfs.NewProc(pid)
+ if err != nil {
+ return
+ }
+
+ procRegistry := r.WithPrefix("proc")
+
+ var ioStat procfs.ProcIO
+ var procStat procfs.ProcStat
+ var fd int
+ var cpuWait uint64
+
+ const clocksPerSec = 100
+
+ c.AddCollect(func(ctx context.Context) {
+ if gatheredFD, err := proc.FileDescriptorsLen(); err == nil {
+ fd = gatheredFD
+ }
+
+ if gatheredIOStat, err := proc.IO(); err == nil {
+ ioStat.SyscW = gatheredIOStat.SyscW
+ ioStat.WriteBytes = gatheredIOStat.WriteBytes
+ ioStat.SyscR = gatheredIOStat.SyscR
+ ioStat.ReadBytes = gatheredIOStat.ReadBytes
+ }
+
+ if gatheredStat, err := proc.Stat(); err == nil {
+ procStat.UTime = gatheredStat.UTime
+ procStat.STime = gatheredStat.STime
+ procStat.RSS = gatheredStat.RSS
+ }
+
+ if gatheredSched, err := proc.Schedstat(); err == nil {
+ cpuWait = gatheredSched.WaitingNanoseconds
+ }
+ })
+
+ procRegistry.FuncGauge("fd", c.RegisteredGauge(func() float64 {
+ return float64(fd)
+ }))
+
+ ioRegistry := procRegistry.WithPrefix("io")
+ ioRegistry.FuncCounter(r.ComposeName("read", "count"), c.RegisteredCounter(func() int64 {
+ return int64(ioStat.SyscR)
+ }))
+ ioRegistry.FuncCounter(r.ComposeName("read", "bytes"), c.RegisteredCounter(func() int64 {
+ return int64(ioStat.ReadBytes)
+ }))
+ ioRegistry.FuncCounter(r.ComposeName("write", "count"), c.RegisteredCounter(func() int64 {
+ return int64(ioStat.SyscW)
+ }))
+ ioRegistry.FuncCounter(r.ComposeName("write", "bytes"), c.RegisteredCounter(func() int64 {
+ return int64(ioStat.WriteBytes)
+ }))
+
+ cpuRegistry := procRegistry.WithPrefix("cpu")
+ cpuRegistry.FuncCounter(r.ComposeName("total", "ns"), c.RegisteredCounter(func() int64 {
+ return int64(procStat.UTime+procStat.STime) * (1_000_000_000 / clocksPerSec)
+ }))
+ cpuRegistry.FuncCounter(r.ComposeName("user", "ns"), c.RegisteredCounter(func() int64 {
+ return int64(procStat.UTime) * (1_000_000_000 / clocksPerSec)
+ }))
+ cpuRegistry.FuncCounter(r.ComposeName("system", "ns"), c.RegisteredCounter(func() int64 {
+ return int64(procStat.STime) * (1_000_000_000 / clocksPerSec)
+ }))
+ cpuRegistry.FuncCounter(r.ComposeName("wait", "ns"), c.RegisteredCounter(func() int64 {
+ return int64(cpuWait)
+ }))
+
+ procRegistry.FuncGauge(r.ComposeName("mem", "rss"), c.RegisteredGauge(func() float64 {
+ return float64(procStat.RSS)
+ }))
+}
diff --git a/library/go/core/metrics/collect/ya.make b/library/go/core/metrics/collect/ya.make
new file mode 100644
index 0000000000..be81763221
--- /dev/null
+++ b/library/go/core/metrics/collect/ya.make
@@ -0,0 +1,10 @@
+GO_LIBRARY()
+
+SRCS(
+ collect.go
+ system.go
+)
+
+END()
+
+RECURSE(policy)
diff --git a/library/go/core/metrics/gotest/ya.make b/library/go/core/metrics/gotest/ya.make
new file mode 100644
index 0000000000..d0bdf91982
--- /dev/null
+++ b/library/go/core/metrics/gotest/ya.make
@@ -0,0 +1,3 @@
+GO_TEST_FOR(library/go/core/metrics)
+
+END()
diff --git a/library/go/core/metrics/internal/pkg/metricsutil/buckets.go b/library/go/core/metrics/internal/pkg/metricsutil/buckets.go
new file mode 100644
index 0000000000..e9501fcceb
--- /dev/null
+++ b/library/go/core/metrics/internal/pkg/metricsutil/buckets.go
@@ -0,0 +1,27 @@
+package metricsutil
+
+import (
+ "sort"
+
+ "github.com/ydb-platform/ydb/library/go/core/metrics"
+)
+
+// BucketsBounds unwraps Buckets bounds to slice of float64.
+func BucketsBounds(b metrics.Buckets) []float64 {
+ bkts := make([]float64, b.Size())
+ for i := range bkts {
+ bkts[i] = b.UpperBound(i)
+ }
+ sort.Float64s(bkts)
+ return bkts
+}
+
+// DurationBucketsBounds unwraps DurationBuckets bounds to slice of float64.
+func DurationBucketsBounds(b metrics.DurationBuckets) []float64 {
+ bkts := make([]float64, b.Size())
+ for i := range bkts {
+ bkts[i] = b.UpperBound(i).Seconds()
+ }
+ sort.Float64s(bkts)
+ return bkts
+}
diff --git a/library/go/core/metrics/internal/pkg/metricsutil/ya.make b/library/go/core/metrics/internal/pkg/metricsutil/ya.make
new file mode 100644
index 0000000000..3058637089
--- /dev/null
+++ b/library/go/core/metrics/internal/pkg/metricsutil/ya.make
@@ -0,0 +1,5 @@
+GO_LIBRARY()
+
+SRCS(buckets.go)
+
+END()
diff --git a/library/go/core/metrics/internal/pkg/registryutil/gotest/ya.make b/library/go/core/metrics/internal/pkg/registryutil/gotest/ya.make
new file mode 100644
index 0000000000..55c204d140
--- /dev/null
+++ b/library/go/core/metrics/internal/pkg/registryutil/gotest/ya.make
@@ -0,0 +1,3 @@
+GO_TEST_FOR(library/go/core/metrics/internal/pkg/registryutil)
+
+END()
diff --git a/library/go/core/metrics/internal/pkg/registryutil/registryutil.go b/library/go/core/metrics/internal/pkg/registryutil/registryutil.go
new file mode 100644
index 0000000000..ebce50d8cb
--- /dev/null
+++ b/library/go/core/metrics/internal/pkg/registryutil/registryutil.go
@@ -0,0 +1,104 @@
+package registryutil
+
+import (
+ "errors"
+ "fmt"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/OneOfOne/xxhash"
+)
+
+// BuildRegistryKey creates registry name based on given prefix and tags
+func BuildRegistryKey(prefix string, tags map[string]string) string {
+ var builder strings.Builder
+
+ builder.WriteString(strconv.Quote(prefix))
+ builder.WriteRune('{')
+ builder.WriteString(StringifyTags(tags))
+ builder.WriteByte('}')
+
+ return builder.String()
+}
+
+// BuildFQName returns name parts joined by given separator.
+// Mainly used to append prefix to registry
+func BuildFQName(separator string, parts ...string) (name string) {
+ var b strings.Builder
+ for _, p := range parts {
+ if p == "" {
+ continue
+ }
+ if b.Len() > 0 {
+ b.WriteString(separator)
+ }
+ b.WriteString(strings.Trim(p, separator))
+ }
+ return b.String()
+}
+
+// MergeTags merges 2 sets of tags with the tags from tagsRight overriding values from tagsLeft
+func MergeTags(leftTags map[string]string, rightTags map[string]string) map[string]string {
+ if leftTags == nil && rightTags == nil {
+ return nil
+ }
+
+ if len(leftTags) == 0 {
+ return rightTags
+ }
+
+ if len(rightTags) == 0 {
+ return leftTags
+ }
+
+ newTags := make(map[string]string)
+ for key, value := range leftTags {
+ newTags[key] = value
+ }
+ for key, value := range rightTags {
+ newTags[key] = value
+ }
+ return newTags
+}
+
+// StringifyTags returns string representation of given tags map.
+// It is guaranteed that equal sets of tags will produce equal strings.
+func StringifyTags(tags map[string]string) string {
+ keys := make([]string, 0, len(tags))
+ for key := range tags {
+ keys = append(keys, key)
+ }
+ sort.Strings(keys)
+
+ var builder strings.Builder
+ for i, key := range keys {
+ if i > 0 {
+ builder.WriteByte(',')
+ }
+ builder.WriteString(key + "=" + tags[key])
+ }
+
+ return builder.String()
+}
+
+// VectorHash computes hash of metrics vector element
+func VectorHash(tags map[string]string, labels []string) (uint64, error) {
+ if len(tags) != len(labels) {
+ return 0, errors.New("inconsistent tags and labels sets")
+ }
+
+ h := xxhash.New64()
+
+ for _, label := range labels {
+ v, ok := tags[label]
+ if !ok {
+ return 0, fmt.Errorf("label '%s' not found in tags", label)
+ }
+ _, _ = h.WriteString(label)
+ _, _ = h.WriteString(v)
+ _, _ = h.WriteString(",")
+ }
+
+ return h.Sum64(), nil
+}
diff --git a/library/go/core/metrics/internal/pkg/registryutil/registryutil_test.go b/library/go/core/metrics/internal/pkg/registryutil/registryutil_test.go
new file mode 100644
index 0000000000..5463f04755
--- /dev/null
+++ b/library/go/core/metrics/internal/pkg/registryutil/registryutil_test.go
@@ -0,0 +1,48 @@
+package registryutil
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestBuildFQName(t *testing.T) {
+ testCases := []struct {
+ name string
+ parts []string
+ sep string
+ expected string
+ }{
+ {
+ name: "empty",
+ parts: nil,
+ sep: "_",
+ expected: "",
+ },
+ {
+ name: "one part",
+ parts: []string{"part"},
+ sep: "_",
+ expected: "part",
+ },
+ {
+ name: "two parts",
+ parts: []string{"part", "another"},
+ sep: "_",
+ expected: "part_another",
+ },
+ {
+ name: "parts with sep",
+ parts: []string{"abcde", "deabc"},
+ sep: "abc",
+ expected: "deabcde",
+ },
+ }
+
+ for _, testCase := range testCases {
+ c := testCase
+ t.Run(c.name, func(t *testing.T) {
+ assert.Equal(t, c.expected, BuildFQName(c.sep, c.parts...))
+ })
+ }
+}
diff --git a/library/go/core/metrics/internal/pkg/registryutil/ya.make b/library/go/core/metrics/internal/pkg/registryutil/ya.make
new file mode 100644
index 0000000000..4a1f976d40
--- /dev/null
+++ b/library/go/core/metrics/internal/pkg/registryutil/ya.make
@@ -0,0 +1,9 @@
+GO_LIBRARY()
+
+SRCS(registryutil.go)
+
+GO_TEST_SRCS(registryutil_test.go)
+
+END()
+
+RECURSE(gotest)
diff --git a/library/go/core/metrics/internal/pkg/ya.make b/library/go/core/metrics/internal/pkg/ya.make
new file mode 100644
index 0000000000..416d1b3e5d
--- /dev/null
+++ b/library/go/core/metrics/internal/pkg/ya.make
@@ -0,0 +1,4 @@
+RECURSE(
+ metricsutil
+ registryutil
+)
diff --git a/library/go/core/metrics/internal/ya.make b/library/go/core/metrics/internal/ya.make
new file mode 100644
index 0000000000..b2a587f35d
--- /dev/null
+++ b/library/go/core/metrics/internal/ya.make
@@ -0,0 +1 @@
+RECURSE(pkg)
diff --git a/library/go/core/metrics/metrics.go b/library/go/core/metrics/metrics.go
new file mode 100644
index 0000000000..097fca9a55
--- /dev/null
+++ b/library/go/core/metrics/metrics.go
@@ -0,0 +1,163 @@
+// Package metrics provides interface collecting performance metrics.
+package metrics
+
+import (
+ "context"
+ "time"
+)
+
+// Gauge tracks single float64 value.
+type Gauge interface {
+ Set(value float64)
+ Add(value float64)
+}
+
+// FuncGauge is Gauge with value provided by callback function.
+type FuncGauge interface {
+ Function() func() float64
+}
+
+// IntGauge tracks single int64 value.
+type IntGauge interface {
+ Set(value int64)
+ Add(value int64)
+}
+
+// FuncIntGauge is IntGauge with value provided by callback function.
+type FuncIntGauge interface {
+ Function() func() int64
+}
+
+// Counter tracks monotonically increasing value.
+type Counter interface {
+ // Inc increments counter by 1.
+ Inc()
+
+ // Add adds delta to the counter. Delta must be >=0.
+ Add(delta int64)
+}
+
+// FuncCounter is Counter with value provided by callback function.
+type FuncCounter interface {
+ Function() func() int64
+}
+
+// Histogram tracks distribution of value.
+type Histogram interface {
+ RecordValue(value float64)
+}
+
+// Timer measures durations.
+type Timer interface {
+ RecordDuration(value time.Duration)
+}
+
+// DurationBuckets defines buckets of the duration histogram.
+type DurationBuckets interface {
+ // Size returns number of buckets.
+ Size() int
+
+ // MapDuration returns index of the bucket.
+ //
+ // index is integer in range [0, Size()).
+ MapDuration(d time.Duration) int
+
+ // UpperBound of the last bucket is always +Inf.
+ //
+ // bucketIndex is integer in range [0, Size()-1).
+ UpperBound(bucketIndex int) time.Duration
+}
+
+// Buckets defines intervals of the regular histogram.
+type Buckets interface {
+ // Size returns number of buckets.
+ Size() int
+
+ // MapValue returns index of the bucket.
+ //
+ // Index is integer in range [0, Size()).
+ MapValue(v float64) int
+
+ // UpperBound of the last bucket is always +Inf.
+ //
+ // bucketIndex is integer in range [0, Size()-1).
+ UpperBound(bucketIndex int) float64
+}
+
+// GaugeVec stores multiple dynamically created gauges.
+type GaugeVec interface {
+ With(map[string]string) Gauge
+
+ // Reset deletes all metrics in vector.
+ Reset()
+}
+
+// IntGaugeVec stores multiple dynamically created gauges.
+type IntGaugeVec interface {
+ With(map[string]string) IntGauge
+
+ // Reset deletes all metrics in vector.
+ Reset()
+}
+
+// CounterVec stores multiple dynamically created counters.
+type CounterVec interface {
+ With(map[string]string) Counter
+
+ // Reset deletes all metrics in vector.
+ Reset()
+}
+
+// TimerVec stores multiple dynamically created timers.
+type TimerVec interface {
+ With(map[string]string) Timer
+
+ // Reset deletes all metrics in vector.
+ Reset()
+}
+
+// HistogramVec stores multiple dynamically created histograms.
+type HistogramVec interface {
+ With(map[string]string) Histogram
+
+ // Reset deletes all metrics in vector.
+ Reset()
+}
+
+// Registry creates profiling metrics.
+type Registry interface {
+ // WithTags creates new sub-scope, where each metric has tags attached to it.
+ WithTags(tags map[string]string) Registry
+ // WithPrefix creates new sub-scope, where each metric has prefix added to it name.
+ WithPrefix(prefix string) Registry
+
+ ComposeName(parts ...string) string
+
+ Counter(name string) Counter
+ CounterVec(name string, labels []string) CounterVec
+ FuncCounter(name string, function func() int64) FuncCounter
+
+ Gauge(name string) Gauge
+ GaugeVec(name string, labels []string) GaugeVec
+ FuncGauge(name string, function func() float64) FuncGauge
+
+ IntGauge(name string) IntGauge
+ IntGaugeVec(name string, labels []string) IntGaugeVec
+ FuncIntGauge(name string, function func() int64) FuncIntGauge
+
+ Timer(name string) Timer
+ TimerVec(name string, labels []string) TimerVec
+
+ Histogram(name string, buckets Buckets) Histogram
+ HistogramVec(name string, buckets Buckets, labels []string) HistogramVec
+
+ DurationHistogram(name string, buckets DurationBuckets) Timer
+ DurationHistogramVec(name string, buckets DurationBuckets, labels []string) TimerVec
+}
+
+// CollectPolicy defines how registered gauge metrics are updated via collect func.
+type CollectPolicy interface {
+ RegisteredCounter(counterFunc func() int64) func() int64
+ RegisteredGauge(gaugeFunc func() float64) func() float64
+ AddCollect(collect func(ctx context.Context))
+}
diff --git a/library/go/core/metrics/mock/counter.go b/library/go/core/metrics/mock/counter.go
new file mode 100644
index 0000000000..c3016ea1a9
--- /dev/null
+++ b/library/go/core/metrics/mock/counter.go
@@ -0,0 +1,35 @@
+package mock
+
+import (
+ "github.com/ydb-platform/ydb/library/go/core/metrics"
+ "go.uber.org/atomic"
+)
+
+var _ metrics.Counter = (*Counter)(nil)
+
+// Counter tracks monotonically increasing value.
+type Counter struct {
+ Name string
+ Tags map[string]string
+ Value *atomic.Int64
+}
+
+// Inc increments counter by 1.
+func (c *Counter) Inc() {
+ c.Add(1)
+}
+
+// Add adds delta to the counter. Delta must be >=0.
+func (c *Counter) Add(delta int64) {
+ c.Value.Add(delta)
+}
+
+var _ metrics.FuncCounter = (*FuncCounter)(nil)
+
+type FuncCounter struct {
+ function func() int64
+}
+
+func (c FuncCounter) Function() func() int64 {
+ return c.function
+}
diff --git a/library/go/core/metrics/mock/gauge.go b/library/go/core/metrics/mock/gauge.go
new file mode 100644
index 0000000000..58d2d29beb
--- /dev/null
+++ b/library/go/core/metrics/mock/gauge.go
@@ -0,0 +1,33 @@
+package mock
+
+import (
+ "github.com/ydb-platform/ydb/library/go/core/metrics"
+ "go.uber.org/atomic"
+)
+
+var _ metrics.Gauge = (*Gauge)(nil)
+
+// Gauge tracks single float64 value.
+type Gauge struct {
+ Name string
+ Tags map[string]string
+ Value *atomic.Float64
+}
+
+func (g *Gauge) Set(value float64) {
+ g.Value.Store(value)
+}
+
+func (g *Gauge) Add(value float64) {
+ g.Value.Add(value)
+}
+
+var _ metrics.FuncGauge = (*FuncGauge)(nil)
+
+type FuncGauge struct {
+ function func() float64
+}
+
+func (g FuncGauge) Function() func() float64 {
+ return g.function
+}
diff --git a/library/go/core/metrics/mock/histogram.go b/library/go/core/metrics/mock/histogram.go
new file mode 100644
index 0000000000..734d7b5f88
--- /dev/null
+++ b/library/go/core/metrics/mock/histogram.go
@@ -0,0 +1,40 @@
+package mock
+
+import (
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/ydb-platform/ydb/library/go/core/metrics"
+ "go.uber.org/atomic"
+)
+
+var (
+ _ metrics.Histogram = (*Histogram)(nil)
+ _ metrics.Timer = (*Histogram)(nil)
+)
+
+type Histogram struct {
+ Name string
+ Tags map[string]string
+ BucketBounds []float64
+ BucketValues []int64
+ InfValue *atomic.Int64
+ mutex sync.Mutex
+}
+
+func (h *Histogram) RecordValue(value float64) {
+ boundIndex := sort.SearchFloat64s(h.BucketBounds, value)
+
+ if boundIndex < len(h.BucketValues) {
+ h.mutex.Lock()
+ h.BucketValues[boundIndex] += 1
+ h.mutex.Unlock()
+ } else {
+ h.InfValue.Inc()
+ }
+}
+
+func (h *Histogram) RecordDuration(value time.Duration) {
+ h.RecordValue(value.Seconds())
+}
diff --git a/library/go/core/metrics/mock/int_gauge.go b/library/go/core/metrics/mock/int_gauge.go
new file mode 100644
index 0000000000..8955107da9
--- /dev/null
+++ b/library/go/core/metrics/mock/int_gauge.go
@@ -0,0 +1,33 @@
+package mock
+
+import (
+ "github.com/ydb-platform/ydb/library/go/core/metrics"
+ "go.uber.org/atomic"
+)
+
+var _ metrics.IntGauge = (*IntGauge)(nil)
+
+// IntGauge tracks single int64 value.
+type IntGauge struct {
+ Name string
+ Tags map[string]string
+ Value *atomic.Int64
+}
+
+func (g *IntGauge) Set(value int64) {
+ g.Value.Store(value)
+}
+
+func (g *IntGauge) Add(value int64) {
+ g.Value.Add(value)
+}
+
+var _ metrics.FuncIntGauge = (*FuncIntGauge)(nil)
+
+type FuncIntGauge struct {
+ function func() int64
+}
+
+func (g FuncIntGauge) Function() func() int64 {
+ return g.function
+}
diff --git a/library/go/core/metrics/mock/registry.go b/library/go/core/metrics/mock/registry.go
new file mode 100644
index 0000000000..77f465f8ea
--- /dev/null
+++ b/library/go/core/metrics/mock/registry.go
@@ -0,0 +1,224 @@
+package mock
+
+import (
+ "sync"
+
+ "github.com/ydb-platform/ydb/library/go/core/metrics"
+ "github.com/ydb-platform/ydb/library/go/core/metrics/internal/pkg/metricsutil"
+ "github.com/ydb-platform/ydb/library/go/core/metrics/internal/pkg/registryutil"
+ "go.uber.org/atomic"
+)
+
+var _ metrics.Registry = (*Registry)(nil)
+
+type Registry struct {
+ separator string
+ prefix string
+ tags map[string]string
+ allowLoadRegisteredMetrics bool
+
+ subregistries map[string]*Registry
+ m *sync.Mutex
+
+ metrics *sync.Map
+}
+
+func NewRegistry(opts *RegistryOpts) *Registry {
+ r := &Registry{
+ separator: ".",
+
+ subregistries: make(map[string]*Registry),
+ m: new(sync.Mutex),
+
+ metrics: new(sync.Map),
+ }
+
+ if opts != nil {
+ r.separator = string(opts.Separator)
+ r.prefix = opts.Prefix
+ r.tags = opts.Tags
+ r.allowLoadRegisteredMetrics = opts.AllowLoadRegisteredMetrics
+ }
+
+ return r
+}
+
+// WithTags creates new sub-scope, where each metric has tags attached to it.
+func (r Registry) WithTags(tags map[string]string) metrics.Registry {
+ return r.newSubregistry(r.prefix, registryutil.MergeTags(r.tags, tags))
+}
+
+// WithPrefix creates new sub-scope, where each metric has prefix added to it name.
+func (r Registry) WithPrefix(prefix string) metrics.Registry {
+ return r.newSubregistry(registryutil.BuildFQName(r.separator, r.prefix, prefix), r.tags)
+}
+
+func (r Registry) ComposeName(parts ...string) string {
+ return registryutil.BuildFQName(r.separator, parts...)
+}
+
+func (r Registry) Counter(name string) metrics.Counter {
+ s := &Counter{
+ Name: r.newMetricName(name),
+ Tags: r.tags,
+ Value: new(atomic.Int64),
+ }
+
+ key := registryutil.BuildRegistryKey(s.Name, r.tags)
+ if val, loaded := r.metrics.LoadOrStore(key, s); loaded {
+ if r.allowLoadRegisteredMetrics {
+ return val.(*Counter)
+ }
+ panic("metric with key " + key + " already registered")
+ }
+ return s
+}
+
+func (r Registry) FuncCounter(name string, function func() int64) metrics.FuncCounter {
+ metricName := r.newMetricName(name)
+ key := registryutil.BuildRegistryKey(metricName, r.tags)
+ s := FuncCounter{function: function}
+ if _, loaded := r.metrics.LoadOrStore(key, s); loaded {
+ panic("metric with key " + key + " already registered")
+ }
+ return s
+}
+
+func (r Registry) Gauge(name string) metrics.Gauge {
+ s := &Gauge{
+ Name: r.newMetricName(name),
+ Tags: r.tags,
+ Value: new(atomic.Float64),
+ }
+
+ key := registryutil.BuildRegistryKey(s.Name, r.tags)
+ if val, loaded := r.metrics.LoadOrStore(key, s); loaded {
+ if r.allowLoadRegisteredMetrics {
+ return val.(*Gauge)
+ }
+ panic("metric with key " + key + " already registered")
+ }
+ return s
+}
+
+func (r Registry) FuncGauge(name string, function func() float64) metrics.FuncGauge {
+ metricName := r.newMetricName(name)
+ key := registryutil.BuildRegistryKey(metricName, r.tags)
+ s := FuncGauge{function: function}
+ if _, loaded := r.metrics.LoadOrStore(key, s); loaded {
+ panic("metric with key " + key + " already registered")
+ }
+ return s
+}
+
+func (r *Registry) IntGauge(name string) metrics.IntGauge {
+ s := &IntGauge{
+ Name: r.newMetricName(name),
+ Tags: r.tags,
+ Value: new(atomic.Int64),
+ }
+
+ key := registryutil.BuildRegistryKey(s.Name, r.tags)
+ if val, loaded := r.metrics.LoadOrStore(key, s); loaded {
+ if r.allowLoadRegisteredMetrics {
+ return val.(*IntGauge)
+ }
+ panic("metric with key " + key + " already registered")
+ }
+ return s
+}
+
+func (r *Registry) FuncIntGauge(name string, function func() int64) metrics.FuncIntGauge {
+ metricName := r.newMetricName(name)
+ key := registryutil.BuildRegistryKey(metricName, r.tags)
+ s := FuncIntGauge{function: function}
+ if _, loaded := r.metrics.LoadOrStore(key, s); loaded {
+ panic("metric with key " + key + " already registered")
+ }
+ return s
+}
+
+func (r Registry) Timer(name string) metrics.Timer {
+ s := &Timer{
+ Name: r.newMetricName(name),
+ Tags: r.tags,
+ Value: new(atomic.Duration),
+ }
+
+ key := registryutil.BuildRegistryKey(s.Name, r.tags)
+ if val, loaded := r.metrics.LoadOrStore(key, s); loaded {
+ if r.allowLoadRegisteredMetrics {
+ return val.(*Timer)
+ }
+ panic("metric with key " + key + " already registered")
+ }
+ return s
+}
+
+func (r Registry) Histogram(name string, buckets metrics.Buckets) metrics.Histogram {
+ s := &Histogram{
+ Name: r.newMetricName(name),
+ Tags: r.tags,
+ BucketBounds: metricsutil.BucketsBounds(buckets),
+ BucketValues: make([]int64, buckets.Size()),
+ InfValue: new(atomic.Int64),
+ }
+
+ key := registryutil.BuildRegistryKey(s.Name, r.tags)
+ if val, loaded := r.metrics.LoadOrStore(key, s); loaded {
+ if r.allowLoadRegisteredMetrics {
+ return val.(*Histogram)
+ }
+ panic("metric with key " + key + " already registered")
+ }
+ return s
+}
+
+func (r Registry) DurationHistogram(name string, buckets metrics.DurationBuckets) metrics.Timer {
+ s := &Histogram{
+ Name: r.newMetricName(name),
+ Tags: r.tags,
+ BucketBounds: metricsutil.DurationBucketsBounds(buckets),
+ BucketValues: make([]int64, buckets.Size()),
+ InfValue: new(atomic.Int64),
+ }
+
+ key := registryutil.BuildRegistryKey(s.Name, r.tags)
+ if val, loaded := r.metrics.LoadOrStore(key, s); loaded {
+ if r.allowLoadRegisteredMetrics {
+ return val.(*Histogram)
+ }
+ panic("metric with key " + key + " already registered")
+ }
+ return s
+}
+
+func (r *Registry) newSubregistry(prefix string, tags map[string]string) *Registry {
+ registryKey := registryutil.BuildRegistryKey(prefix, tags)
+
+ r.m.Lock()
+ defer r.m.Unlock()
+
+ if existing, ok := r.subregistries[registryKey]; ok {
+ return existing
+ }
+
+ subregistry := &Registry{
+ separator: r.separator,
+ prefix: prefix,
+ tags: tags,
+ allowLoadRegisteredMetrics: r.allowLoadRegisteredMetrics,
+
+ subregistries: r.subregistries,
+ m: r.m,
+
+ metrics: r.metrics,
+ }
+
+ r.subregistries[registryKey] = subregistry
+ return subregistry
+}
+
+func (r *Registry) newMetricName(name string) string {
+ return registryutil.BuildFQName(r.separator, r.prefix, name)
+}
diff --git a/library/go/core/metrics/mock/registry_opts.go b/library/go/core/metrics/mock/registry_opts.go
new file mode 100644
index 0000000000..1cc1c3970d
--- /dev/null
+++ b/library/go/core/metrics/mock/registry_opts.go
@@ -0,0 +1,52 @@
+package mock
+
+import (
+ "github.com/ydb-platform/ydb/library/go/core/metrics/internal/pkg/registryutil"
+)
+
+type RegistryOpts struct {
+ Separator rune
+ Prefix string
+ Tags map[string]string
+ AllowLoadRegisteredMetrics bool
+}
+
+// NewRegistryOpts returns new initialized instance of RegistryOpts
+func NewRegistryOpts() *RegistryOpts {
+ return &RegistryOpts{
+ Separator: '.',
+ Tags: make(map[string]string),
+ }
+}
+
+// SetTags overrides existing tags
+func (o *RegistryOpts) SetTags(tags map[string]string) *RegistryOpts {
+ o.Tags = tags
+ return o
+}
+
+// AddTags merges given tags with existing
+func (o *RegistryOpts) AddTags(tags map[string]string) *RegistryOpts {
+ for k, v := range tags {
+ o.Tags[k] = v
+ }
+ return o
+}
+
+// SetPrefix overrides existing prefix
+func (o *RegistryOpts) SetPrefix(prefix string) *RegistryOpts {
+ o.Prefix = prefix
+ return o
+}
+
+// AppendPrefix adds given prefix as postfix to existing using separator
+func (o *RegistryOpts) AppendPrefix(prefix string) *RegistryOpts {
+ o.Prefix = registryutil.BuildFQName(string(o.Separator), o.Prefix, prefix)
+ return o
+}
+
+// SetSeparator overrides existing separator
+func (o *RegistryOpts) SetSeparator(separator rune) *RegistryOpts {
+ o.Separator = separator
+ return o
+}
diff --git a/library/go/core/metrics/mock/timer.go b/library/go/core/metrics/mock/timer.go
new file mode 100644
index 0000000000..3ea3629ca9
--- /dev/null
+++ b/library/go/core/metrics/mock/timer.go
@@ -0,0 +1,21 @@
+package mock
+
+import (
+ "time"
+
+ "github.com/ydb-platform/ydb/library/go/core/metrics"
+ "go.uber.org/atomic"
+)
+
+var _ metrics.Timer = (*Timer)(nil)
+
+// Timer measures gauge duration.
+type Timer struct {
+ Name string
+ Tags map[string]string
+ Value *atomic.Duration
+}
+
+func (t *Timer) RecordDuration(value time.Duration) {
+ t.Value.Store(value)
+}
diff --git a/library/go/core/metrics/mock/vec.go b/library/go/core/metrics/mock/vec.go
new file mode 100644
index 0000000000..f1cde3d47c
--- /dev/null
+++ b/library/go/core/metrics/mock/vec.go
@@ -0,0 +1,256 @@
+package mock
+
+import (
+ "sync"
+
+ "github.com/ydb-platform/ydb/library/go/core/metrics"
+ "github.com/ydb-platform/ydb/library/go/core/metrics/internal/pkg/registryutil"
+)
+
+type MetricsVector interface {
+ With(map[string]string) interface{}
+
+ // Reset deletes all metrics in vector.
+ Reset()
+}
+
+// Vector is base implementation of vector of metrics of any supported type
+type Vector struct {
+ Labels []string
+ Mtx sync.RWMutex // Protects metrics.
+ Metrics map[uint64]interface{}
+ NewMetric func(map[string]string) interface{}
+}
+
+func (v *Vector) With(tags map[string]string) interface{} {
+ hv, err := registryutil.VectorHash(tags, v.Labels)
+ if err != nil {
+ panic(err)
+ }
+
+ v.Mtx.RLock()
+ metric, ok := v.Metrics[hv]
+ v.Mtx.RUnlock()
+ if ok {
+ return metric
+ }
+
+ v.Mtx.Lock()
+ defer v.Mtx.Unlock()
+
+ metric, ok = v.Metrics[hv]
+ if !ok {
+ metric = v.NewMetric(tags)
+ v.Metrics[hv] = metric
+ }
+
+ return metric
+}
+
+// Reset deletes all metrics in this vector.
+func (v *Vector) Reset() {
+ v.Mtx.Lock()
+ defer v.Mtx.Unlock()
+
+ for h := range v.Metrics {
+ delete(v.Metrics, h)
+ }
+}
+
+var _ metrics.CounterVec = (*CounterVec)(nil)
+
+// CounterVec stores counters and
+// implements metrics.CounterVec interface
+type CounterVec struct {
+ Vec MetricsVector
+}
+
+// CounterVec creates a new counters vector with given metric name and
+// partitioned by the given label names.
+func (r *Registry) CounterVec(name string, labels []string) metrics.CounterVec {
+ return &CounterVec{
+ Vec: &Vector{
+ Labels: append([]string(nil), labels...),
+ Metrics: make(map[uint64]interface{}),
+ NewMetric: func(tags map[string]string) interface{} {
+ return r.WithTags(tags).Counter(name)
+ },
+ },
+ }
+}
+
+// With creates new or returns existing counter with given tags from vector.
+// It will panic if tags keys set is not equal to vector labels.
+func (v *CounterVec) With(tags map[string]string) metrics.Counter {
+ return v.Vec.With(tags).(*Counter)
+}
+
+// Reset deletes all metrics in this vector.
+func (v *CounterVec) Reset() {
+ v.Vec.Reset()
+}
+
+var _ metrics.GaugeVec = new(GaugeVec)
+
+// GaugeVec stores gauges and
+// implements metrics.GaugeVec interface
+type GaugeVec struct {
+ Vec MetricsVector
+}
+
+// GaugeVec creates a new gauges vector with given metric name and
+// partitioned by the given label names.
+func (r *Registry) GaugeVec(name string, labels []string) metrics.GaugeVec {
+ return &GaugeVec{
+ Vec: &Vector{
+ Labels: append([]string(nil), labels...),
+ Metrics: make(map[uint64]interface{}),
+ NewMetric: func(tags map[string]string) interface{} {
+ return r.WithTags(tags).Gauge(name)
+ },
+ },
+ }
+}
+
+// With creates new or returns existing gauge with given tags from vector.
+// It will panic if tags keys set is not equal to vector labels.
+func (v *GaugeVec) With(tags map[string]string) metrics.Gauge {
+ return v.Vec.With(tags).(*Gauge)
+}
+
+// Reset deletes all metrics in this vector.
+func (v *GaugeVec) Reset() {
+ v.Vec.Reset()
+}
+
+var _ metrics.IntGaugeVec = new(IntGaugeVec)
+
+// IntGaugeVec stores gauges and
+// implements metrics.IntGaugeVec interface
+type IntGaugeVec struct {
+ Vec MetricsVector
+}
+
+// IntGaugeVec creates a new gauges vector with given metric name and
+// partitioned by the given label names.
+func (r *Registry) IntGaugeVec(name string, labels []string) metrics.IntGaugeVec {
+ return &IntGaugeVec{
+ Vec: &Vector{
+ Labels: append([]string(nil), labels...),
+ Metrics: make(map[uint64]interface{}),
+ NewMetric: func(tags map[string]string) interface{} {
+ return r.WithTags(tags).IntGauge(name)
+ },
+ },
+ }
+}
+
+// With creates new or returns existing gauge with given tags from vector.
+// It will panic if tags keys set is not equal to vector labels.
+func (v *IntGaugeVec) With(tags map[string]string) metrics.IntGauge {
+ return v.Vec.With(tags).(*IntGauge)
+}
+
+// Reset deletes all metrics in this vector.
+func (v *IntGaugeVec) Reset() {
+ v.Vec.Reset()
+}
+
+var _ metrics.TimerVec = new(TimerVec)
+
+// TimerVec stores timers and
+// implements metrics.TimerVec interface
+type TimerVec struct {
+ Vec MetricsVector
+}
+
+// TimerVec creates a new timers vector with given metric name and
+// partitioned by the given label names.
+func (r *Registry) TimerVec(name string, labels []string) metrics.TimerVec {
+ return &TimerVec{
+ Vec: &Vector{
+ Labels: append([]string(nil), labels...),
+ Metrics: make(map[uint64]interface{}),
+ NewMetric: func(tags map[string]string) interface{} {
+ return r.WithTags(tags).Timer(name)
+ },
+ },
+ }
+}
+
+// With creates new or returns existing timer with given tags from vector.
+// It will panic if tags keys set is not equal to vector labels.
+func (v *TimerVec) With(tags map[string]string) metrics.Timer {
+ return v.Vec.With(tags).(*Timer)
+}
+
+// Reset deletes all metrics in this vector.
+func (v *TimerVec) Reset() {
+ v.Vec.Reset()
+}
+
+var _ metrics.HistogramVec = (*HistogramVec)(nil)
+
+// HistogramVec stores histograms and
+// implements metrics.HistogramVec interface
+type HistogramVec struct {
+ Vec MetricsVector
+}
+
+// HistogramVec creates a new histograms vector with given metric name and buckets and
+// partitioned by the given label names.
+func (r *Registry) HistogramVec(name string, buckets metrics.Buckets, labels []string) metrics.HistogramVec {
+ return &HistogramVec{
+ Vec: &Vector{
+ Labels: append([]string(nil), labels...),
+ Metrics: make(map[uint64]interface{}),
+ NewMetric: func(tags map[string]string) interface{} {
+ return r.WithTags(tags).Histogram(name, buckets)
+ },
+ },
+ }
+}
+
+// With creates new or returns existing histogram with given tags from vector.
+// It will panic if tags keys set is not equal to vector labels.
+func (v *HistogramVec) With(tags map[string]string) metrics.Histogram {
+ return v.Vec.With(tags).(*Histogram)
+}
+
+// Reset deletes all metrics in this vector.
+func (v *HistogramVec) Reset() {
+ v.Vec.Reset()
+}
+
+var _ metrics.TimerVec = (*DurationHistogramVec)(nil)
+
+// DurationHistogramVec stores duration histograms and
+// implements metrics.TimerVec interface
+type DurationHistogramVec struct {
+ Vec MetricsVector
+}
+
+// DurationHistogramVec creates a new duration histograms vector with given metric name and buckets and
+// partitioned by the given label names.
+func (r *Registry) DurationHistogramVec(name string, buckets metrics.DurationBuckets, labels []string) metrics.TimerVec {
+ return &DurationHistogramVec{
+ Vec: &Vector{
+ Labels: append([]string(nil), labels...),
+ Metrics: make(map[uint64]interface{}),
+ NewMetric: func(tags map[string]string) interface{} {
+ return r.WithTags(tags).DurationHistogram(name, buckets)
+ },
+ },
+ }
+}
+
+// With creates new or returns existing duration histogram with given tags from vector.
+// It will panic if tags keys set is not equal to vector labels.
+func (v *DurationHistogramVec) With(tags map[string]string) metrics.Timer {
+ return v.Vec.With(tags).(*Histogram)
+}
+
+// Reset deletes all metrics in this vector.
+func (v *DurationHistogramVec) Reset() {
+ v.Vec.Reset()
+}
diff --git a/library/go/core/metrics/mock/ya.make b/library/go/core/metrics/mock/ya.make
new file mode 100644
index 0000000000..0ddaf2285b
--- /dev/null
+++ b/library/go/core/metrics/mock/ya.make
@@ -0,0 +1,14 @@
+GO_LIBRARY()
+
+SRCS(
+ counter.go
+ gauge.go
+ int_gauge.go
+ histogram.go
+ registry.go
+ registry_opts.go
+ timer.go
+ vec.go
+)
+
+END()
diff --git a/library/go/core/metrics/nop/counter.go b/library/go/core/metrics/nop/counter.go
new file mode 100644
index 0000000000..65a36910da
--- /dev/null
+++ b/library/go/core/metrics/nop/counter.go
@@ -0,0 +1,31 @@
+package nop
+
+import "github.com/ydb-platform/ydb/library/go/core/metrics"
+
+var _ metrics.Counter = (*Counter)(nil)
+
+type Counter struct{}
+
+func (Counter) Inc() {}
+
+func (Counter) Add(_ int64) {}
+
+var _ metrics.CounterVec = (*CounterVec)(nil)
+
+type CounterVec struct{}
+
+func (t CounterVec) With(_ map[string]string) metrics.Counter {
+ return Counter{}
+}
+
+func (t CounterVec) Reset() {}
+
+var _ metrics.FuncCounter = (*FuncCounter)(nil)
+
+type FuncCounter struct {
+ function func() int64
+}
+
+func (c FuncCounter) Function() func() int64 {
+ return c.function
+}
diff --git a/library/go/core/metrics/nop/gauge.go b/library/go/core/metrics/nop/gauge.go
new file mode 100644
index 0000000000..9ab9ff6d77
--- /dev/null
+++ b/library/go/core/metrics/nop/gauge.go
@@ -0,0 +1,31 @@
+package nop
+
+import "github.com/ydb-platform/ydb/library/go/core/metrics"
+
+var _ metrics.Gauge = (*Gauge)(nil)
+
+type Gauge struct{}
+
+func (Gauge) Set(_ float64) {}
+
+func (Gauge) Add(_ float64) {}
+
+var _ metrics.GaugeVec = (*GaugeVec)(nil)
+
+type GaugeVec struct{}
+
+func (t GaugeVec) With(_ map[string]string) metrics.Gauge {
+ return Gauge{}
+}
+
+func (t GaugeVec) Reset() {}
+
+var _ metrics.FuncGauge = (*FuncGauge)(nil)
+
+type FuncGauge struct {
+ function func() float64
+}
+
+func (g FuncGauge) Function() func() float64 {
+ return g.function
+}
diff --git a/library/go/core/metrics/nop/histogram.go b/library/go/core/metrics/nop/histogram.go
new file mode 100644
index 0000000000..bde571323c
--- /dev/null
+++ b/library/go/core/metrics/nop/histogram.go
@@ -0,0 +1,38 @@
+package nop
+
+import (
+ "time"
+
+ "github.com/ydb-platform/ydb/library/go/core/metrics"
+)
+
+var (
+ _ metrics.Histogram = (*Histogram)(nil)
+ _ metrics.Timer = (*Histogram)(nil)
+)
+
+type Histogram struct{}
+
+func (Histogram) RecordValue(_ float64) {}
+
+func (Histogram) RecordDuration(_ time.Duration) {}
+
+var _ metrics.HistogramVec = (*HistogramVec)(nil)
+
+type HistogramVec struct{}
+
+func (t HistogramVec) With(_ map[string]string) metrics.Histogram {
+ return Histogram{}
+}
+
+func (t HistogramVec) Reset() {}
+
+var _ metrics.TimerVec = (*DurationHistogramVec)(nil)
+
+type DurationHistogramVec struct{}
+
+func (t DurationHistogramVec) With(_ map[string]string) metrics.Timer {
+ return Histogram{}
+}
+
+func (t DurationHistogramVec) Reset() {}
diff --git a/library/go/core/metrics/nop/int_gauge.go b/library/go/core/metrics/nop/int_gauge.go
new file mode 100644
index 0000000000..226059a79d
--- /dev/null
+++ b/library/go/core/metrics/nop/int_gauge.go
@@ -0,0 +1,31 @@
+package nop
+
+import "github.com/ydb-platform/ydb/library/go/core/metrics"
+
+var _ metrics.IntGauge = (*IntGauge)(nil)
+
+type IntGauge struct{}
+
+func (IntGauge) Set(_ int64) {}
+
+func (IntGauge) Add(_ int64) {}
+
+var _ metrics.IntGaugeVec = (*IntGaugeVec)(nil)
+
+type IntGaugeVec struct{}
+
+func (t IntGaugeVec) With(_ map[string]string) metrics.IntGauge {
+ return IntGauge{}
+}
+
+func (t IntGaugeVec) Reset() {}
+
+var _ metrics.FuncIntGauge = (*FuncIntGauge)(nil)
+
+type FuncIntGauge struct {
+ function func() int64
+}
+
+func (g FuncIntGauge) Function() func() int64 {
+ return g.function
+}
diff --git a/library/go/core/metrics/nop/registry.go b/library/go/core/metrics/nop/registry.go
new file mode 100644
index 0000000000..97ed977ed7
--- /dev/null
+++ b/library/go/core/metrics/nop/registry.go
@@ -0,0 +1,79 @@
+package nop
+
+import "github.com/ydb-platform/ydb/library/go/core/metrics"
+
+var _ metrics.Registry = (*Registry)(nil)
+
+type Registry struct{}
+
+func (r Registry) ComposeName(parts ...string) string {
+ return ""
+}
+
+func (r Registry) WithTags(_ map[string]string) metrics.Registry {
+ return Registry{}
+}
+
+func (r Registry) WithPrefix(_ string) metrics.Registry {
+ return Registry{}
+}
+
+func (r Registry) Counter(_ string) metrics.Counter {
+ return Counter{}
+}
+
+func (r Registry) FuncCounter(_ string, function func() int64) metrics.FuncCounter {
+ return FuncCounter{function: function}
+}
+
+func (r Registry) Gauge(_ string) metrics.Gauge {
+ return Gauge{}
+}
+
+func (r Registry) FuncGauge(_ string, function func() float64) metrics.FuncGauge {
+ return FuncGauge{function: function}
+}
+
+func (r Registry) IntGauge(_ string) metrics.IntGauge {
+ return IntGauge{}
+}
+
+func (r Registry) FuncIntGauge(_ string, function func() int64) metrics.FuncIntGauge {
+ return FuncIntGauge{function: function}
+}
+
+func (r Registry) Timer(_ string) metrics.Timer {
+ return Timer{}
+}
+
+func (r Registry) Histogram(_ string, _ metrics.Buckets) metrics.Histogram {
+ return Histogram{}
+}
+
+func (r Registry) DurationHistogram(_ string, _ metrics.DurationBuckets) metrics.Timer {
+ return Histogram{}
+}
+
+func (r Registry) CounterVec(_ string, _ []string) metrics.CounterVec {
+ return CounterVec{}
+}
+
+func (r Registry) GaugeVec(_ string, _ []string) metrics.GaugeVec {
+ return GaugeVec{}
+}
+
+func (r Registry) IntGaugeVec(_ string, _ []string) metrics.IntGaugeVec {
+ return IntGaugeVec{}
+}
+
+func (r Registry) TimerVec(_ string, _ []string) metrics.TimerVec {
+ return TimerVec{}
+}
+
+func (r Registry) HistogramVec(_ string, _ metrics.Buckets, _ []string) metrics.HistogramVec {
+ return HistogramVec{}
+}
+
+func (r Registry) DurationHistogramVec(_ string, _ metrics.DurationBuckets, _ []string) metrics.TimerVec {
+ return DurationHistogramVec{}
+}
diff --git a/library/go/core/metrics/nop/timer.go b/library/go/core/metrics/nop/timer.go
new file mode 100644
index 0000000000..61906032a2
--- /dev/null
+++ b/library/go/core/metrics/nop/timer.go
@@ -0,0 +1,23 @@
+package nop
+
+import (
+ "time"
+
+ "github.com/ydb-platform/ydb/library/go/core/metrics"
+)
+
+var _ metrics.Timer = (*Timer)(nil)
+
+type Timer struct{}
+
+func (Timer) RecordDuration(_ time.Duration) {}
+
+var _ metrics.TimerVec = (*TimerVec)(nil)
+
+type TimerVec struct{}
+
+func (t TimerVec) With(_ map[string]string) metrics.Timer {
+ return Timer{}
+}
+
+func (t TimerVec) Reset() {}
diff --git a/library/go/core/metrics/nop/ya.make b/library/go/core/metrics/nop/ya.make
new file mode 100644
index 0000000000..279bc22ef4
--- /dev/null
+++ b/library/go/core/metrics/nop/ya.make
@@ -0,0 +1,12 @@
+GO_LIBRARY()
+
+SRCS(
+ counter.go
+ gauge.go
+ int_gauge.go
+ histogram.go
+ registry.go
+ timer.go
+)
+
+END()
diff --git a/library/go/core/metrics/prometheus/counter.go b/library/go/core/metrics/prometheus/counter.go
new file mode 100644
index 0000000000..1a07063f30
--- /dev/null
+++ b/library/go/core/metrics/prometheus/counter.go
@@ -0,0 +1,34 @@
+package prometheus
+
+import (
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/ydb-platform/ydb/library/go/core/metrics"
+)
+
+var _ metrics.Counter = (*Counter)(nil)
+
+// Counter tracks monotonically increasing value.
+type Counter struct {
+ cnt prometheus.Counter
+}
+
+// Inc increments counter by 1.
+func (c Counter) Inc() {
+ c.cnt.Inc()
+}
+
+// Add adds delta to the counter. Delta must be >=0.
+func (c Counter) Add(delta int64) {
+ c.cnt.Add(float64(delta))
+}
+
+var _ metrics.FuncCounter = (*FuncCounter)(nil)
+
+type FuncCounter struct {
+ cnt prometheus.CounterFunc
+ function func() int64
+}
+
+func (c FuncCounter) Function() func() int64 {
+ return c.function
+}
diff --git a/library/go/core/metrics/prometheus/counter_test.go b/library/go/core/metrics/prometheus/counter_test.go
new file mode 100644
index 0000000000..04f0c894f8
--- /dev/null
+++ b/library/go/core/metrics/prometheus/counter_test.go
@@ -0,0 +1,38 @@
+package prometheus
+
+import (
+ "testing"
+
+ "github.com/prometheus/client_golang/prometheus"
+ dto "github.com/prometheus/client_model/go"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestCounter_Add(t *testing.T) {
+ c := &Counter{cnt: prometheus.NewCounter(prometheus.CounterOpts{
+ Name: "test_counter_add",
+ })}
+
+ var expectValue int64 = 42
+ c.Add(expectValue)
+
+ var res dto.Metric
+ err := c.cnt.Write(&res)
+
+ assert.NoError(t, err)
+ assert.Equal(t, expectValue, int64(res.GetCounter().GetValue()))
+}
+
+func TestCounter_Inc(t *testing.T) {
+ c := &Counter{cnt: prometheus.NewCounter(prometheus.CounterOpts{
+ Name: "test_counter_inc",
+ })}
+
+ var res dto.Metric
+ for i := 1; i <= 10; i++ {
+ c.Inc()
+ err := c.cnt.Write(&res)
+ assert.NoError(t, err)
+ assert.Equal(t, int64(i), int64(res.GetCounter().GetValue()))
+ }
+}
diff --git a/library/go/core/metrics/prometheus/gauge.go b/library/go/core/metrics/prometheus/gauge.go
new file mode 100644
index 0000000000..8683755561
--- /dev/null
+++ b/library/go/core/metrics/prometheus/gauge.go
@@ -0,0 +1,32 @@
+package prometheus
+
+import (
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/ydb-platform/ydb/library/go/core/metrics"
+)
+
+var _ metrics.Gauge = (*Gauge)(nil)
+
+// Gauge tracks single float64 value.
+type Gauge struct {
+ gg prometheus.Gauge
+}
+
+func (g Gauge) Set(value float64) {
+ g.gg.Set(value)
+}
+
+func (g Gauge) Add(value float64) {
+ g.gg.Add(value)
+}
+
+var _ metrics.FuncGauge = (*FuncGauge)(nil)
+
+type FuncGauge struct {
+ ff prometheus.GaugeFunc
+ function func() float64
+}
+
+func (g FuncGauge) Function() func() float64 {
+ return g.function
+}
diff --git a/library/go/core/metrics/prometheus/gauge_test.go b/library/go/core/metrics/prometheus/gauge_test.go
new file mode 100644
index 0000000000..aebb7586c1
--- /dev/null
+++ b/library/go/core/metrics/prometheus/gauge_test.go
@@ -0,0 +1,39 @@
+package prometheus
+
+import (
+ "testing"
+
+ "github.com/prometheus/client_golang/prometheus"
+ dto "github.com/prometheus/client_model/go"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestGauge_Add(t *testing.T) {
+ g := &Gauge{gg: prometheus.NewGauge(prometheus.GaugeOpts{
+ Name: "test_gauge_add",
+ })}
+
+ var expectValue float64 = 42
+ g.Add(expectValue)
+
+ var res dto.Metric
+ err := g.gg.Write(&res)
+
+ assert.NoError(t, err)
+ assert.Equal(t, expectValue, res.GetGauge().GetValue())
+}
+
+func TestGauge_Set(t *testing.T) {
+ g := &Gauge{gg: prometheus.NewGauge(prometheus.GaugeOpts{
+ Name: "test_gauge_set",
+ })}
+
+ var expectValue float64 = 42
+ g.Set(expectValue)
+
+ var res dto.Metric
+ err := g.gg.Write(&res)
+
+ assert.NoError(t, err)
+ assert.Equal(t, expectValue, res.GetGauge().GetValue())
+}
diff --git a/library/go/core/metrics/prometheus/gotest/ya.make b/library/go/core/metrics/prometheus/gotest/ya.make
new file mode 100644
index 0000000000..466256dcaa
--- /dev/null
+++ b/library/go/core/metrics/prometheus/gotest/ya.make
@@ -0,0 +1,3 @@
+GO_TEST_FOR(library/go/core/metrics/prometheus)
+
+END()
diff --git a/library/go/core/metrics/prometheus/histogram.go b/library/go/core/metrics/prometheus/histogram.go
new file mode 100644
index 0000000000..bd5e0dca66
--- /dev/null
+++ b/library/go/core/metrics/prometheus/histogram.go
@@ -0,0 +1,22 @@
+package prometheus
+
+import (
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/ydb-platform/ydb/library/go/core/metrics"
+)
+
+var _ metrics.Histogram = (*Histogram)(nil)
+
+type Histogram struct {
+ hm prometheus.Observer
+}
+
+func (h Histogram) RecordValue(value float64) {
+ h.hm.Observe(value)
+}
+
+func (h Histogram) RecordDuration(value time.Duration) {
+ h.hm.Observe(value.Seconds())
+}
diff --git a/library/go/core/metrics/prometheus/histogram_test.go b/library/go/core/metrics/prometheus/histogram_test.go
new file mode 100644
index 0000000000..0dec46589c
--- /dev/null
+++ b/library/go/core/metrics/prometheus/histogram_test.go
@@ -0,0 +1,91 @@
+package prometheus
+
+import (
+ "testing"
+ "time"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/google/go-cmp/cmp/cmpopts"
+ dto "github.com/prometheus/client_model/go"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "github.com/ydb-platform/ydb/library/go/core/metrics"
+ "github.com/ydb-platform/ydb/library/go/ptr"
+ "google.golang.org/protobuf/testing/protocmp"
+)
+
+func TestHistogram_RecordValue(t *testing.T) {
+ rg := NewRegistry(NewRegistryOpts())
+
+ h := rg.Histogram("test_histogram_record_value",
+ metrics.NewBuckets(0.1, 1.0, 15.47, 42.0, 128.256),
+ )
+
+ for _, value := range []float64{0.5, 0.7, 34.1234, 127} {
+ h.RecordValue(value)
+ }
+
+ expectBuckets := []*dto.Bucket{
+ {CumulativeCount: ptr.Uint64(0), UpperBound: ptr.Float64(0.1)},
+ {CumulativeCount: ptr.Uint64(2), UpperBound: ptr.Float64(1.0)},
+ {CumulativeCount: ptr.Uint64(2), UpperBound: ptr.Float64(15.47)},
+ {CumulativeCount: ptr.Uint64(3), UpperBound: ptr.Float64(42.0)},
+ {CumulativeCount: ptr.Uint64(4), UpperBound: ptr.Float64(128.256)},
+ }
+
+ gathered, err := rg.Gather()
+ require.NoError(t, err)
+
+ resBuckets := gathered[0].Metric[0].GetHistogram().GetBucket()
+
+ cmpOpts := []cmp.Option{
+ cmpopts.IgnoreUnexported(),
+ protocmp.Transform(),
+ }
+ assert.True(t, cmp.Equal(expectBuckets, resBuckets, cmpOpts...), cmp.Diff(expectBuckets, resBuckets, cmpOpts...))
+}
+
+func TestDurationHistogram_RecordDuration(t *testing.T) {
+ rg := NewRegistry(NewRegistryOpts())
+
+ ht := rg.DurationHistogram("test_histogram_record_value",
+ metrics.NewDurationBuckets(
+ 1*time.Millisecond, // 0.1
+ 1*time.Second, // 1.0
+ 15*time.Second+470*time.Millisecond, // 15.47
+ 42*time.Second, // 42.0
+ 128*time.Second+256*time.Millisecond, // 128.256
+ ),
+ )
+
+ values := []time.Duration{
+ 500 * time.Millisecond,
+ 700 * time.Millisecond,
+ 34*time.Second + 1234*time.Millisecond,
+ 127 * time.Second,
+ }
+
+ for _, value := range values {
+ ht.RecordDuration(value)
+ }
+
+ expectBuckets := []*dto.Bucket{
+ {CumulativeCount: ptr.Uint64(0), UpperBound: ptr.Float64(0.001)},
+ {CumulativeCount: ptr.Uint64(2), UpperBound: ptr.Float64(1)},
+ {CumulativeCount: ptr.Uint64(2), UpperBound: ptr.Float64(15.47)},
+ {CumulativeCount: ptr.Uint64(3), UpperBound: ptr.Float64(42)},
+ {CumulativeCount: ptr.Uint64(4), UpperBound: ptr.Float64(128.256)},
+ }
+
+ gathered, err := rg.Gather()
+ require.NoError(t, err)
+
+ resBuckets := gathered[0].Metric[0].GetHistogram().GetBucket()
+
+ cmpOpts := []cmp.Option{
+ cmpopts.IgnoreUnexported(),
+ protocmp.Transform(),
+ }
+
+ assert.True(t, cmp.Equal(expectBuckets, resBuckets, cmpOpts...), cmp.Diff(expectBuckets, resBuckets, cmpOpts...))
+}
diff --git a/library/go/core/metrics/prometheus/int_gauge.go b/library/go/core/metrics/prometheus/int_gauge.go
new file mode 100644
index 0000000000..813b87828c
--- /dev/null
+++ b/library/go/core/metrics/prometheus/int_gauge.go
@@ -0,0 +1,32 @@
+package prometheus
+
+import (
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/ydb-platform/ydb/library/go/core/metrics"
+)
+
+var _ metrics.IntGauge = (*IntGauge)(nil)
+
+// IntGauge tracks single int64 value.
+type IntGauge struct {
+ metrics.Gauge
+}
+
+func (i IntGauge) Set(value int64) {
+ i.Gauge.Set(float64(value))
+}
+
+func (i IntGauge) Add(value int64) {
+ i.Gauge.Add(float64(value))
+}
+
+var _ metrics.FuncIntGauge = (*FuncIntGauge)(nil)
+
+type FuncIntGauge struct {
+ ff prometheus.GaugeFunc
+ function func() int64
+}
+
+func (g FuncIntGauge) Function() func() int64 {
+ return g.function
+}
diff --git a/library/go/core/metrics/prometheus/registry.go b/library/go/core/metrics/prometheus/registry.go
new file mode 100644
index 0000000000..bad45fe617
--- /dev/null
+++ b/library/go/core/metrics/prometheus/registry.go
@@ -0,0 +1,254 @@
+package prometheus
+
+import (
+ "sync"
+
+ "github.com/prometheus/client_golang/prometheus"
+ dto "github.com/prometheus/client_model/go"
+ "github.com/ydb-platform/ydb/library/go/core/metrics"
+ "github.com/ydb-platform/ydb/library/go/core/metrics/internal/pkg/metricsutil"
+ "github.com/ydb-platform/ydb/library/go/core/metrics/internal/pkg/registryutil"
+ "github.com/ydb-platform/ydb/library/go/core/xerrors"
+)
+
+var _ metrics.Registry = (*Registry)(nil)
+
+type Registry struct {
+ rg *prometheus.Registry
+
+ m *sync.Mutex
+ subregistries map[string]*Registry
+
+ tags map[string]string
+ prefix string
+ nameSanitizer func(string) string
+}
+
+// NewRegistry creates new Prometheus backed registry.
+func NewRegistry(opts *RegistryOpts) *Registry {
+ r := &Registry{
+ rg: prometheus.NewRegistry(),
+ m: new(sync.Mutex),
+ subregistries: make(map[string]*Registry),
+ tags: make(map[string]string),
+ }
+
+ if opts != nil {
+ r.prefix = opts.Prefix
+ r.tags = opts.Tags
+ if opts.rg != nil {
+ r.rg = opts.rg
+ }
+ for _, collector := range opts.Collectors {
+ collector(r)
+ }
+ if opts.NameSanitizer != nil {
+ r.nameSanitizer = opts.NameSanitizer
+ }
+ }
+
+ return r
+}
+
+// WithTags creates new sub-scope, where each metric has tags attached to it.
+func (r Registry) WithTags(tags map[string]string) metrics.Registry {
+ return r.newSubregistry(r.prefix, registryutil.MergeTags(r.tags, tags))
+}
+
+// WithPrefix creates new sub-scope, where each metric has prefix added to it name.
+func (r Registry) WithPrefix(prefix string) metrics.Registry {
+ return r.newSubregistry(registryutil.BuildFQName("_", r.prefix, prefix), r.tags)
+}
+
+// ComposeName builds FQ name with appropriate separator.
+func (r Registry) ComposeName(parts ...string) string {
+ return registryutil.BuildFQName("_", parts...)
+}
+
+func (r Registry) Counter(name string) metrics.Counter {
+ name = r.sanitizeName(name)
+ cnt := prometheus.NewCounter(prometheus.CounterOpts{
+ Namespace: r.prefix,
+ Name: name,
+ ConstLabels: r.tags,
+ })
+
+ if err := r.rg.Register(cnt); err != nil {
+ var existErr prometheus.AlreadyRegisteredError
+ if xerrors.As(err, &existErr) {
+ return &Counter{cnt: existErr.ExistingCollector.(prometheus.Counter)}
+ }
+ panic(err)
+ }
+
+ return &Counter{cnt: cnt}
+}
+
+func (r Registry) FuncCounter(name string, function func() int64) metrics.FuncCounter {
+ name = r.sanitizeName(name)
+ cnt := prometheus.NewCounterFunc(prometheus.CounterOpts{
+ Namespace: r.prefix,
+ Name: name,
+ ConstLabels: r.tags,
+ }, func() float64 {
+ return float64(function())
+ })
+
+ if err := r.rg.Register(cnt); err != nil {
+ panic(err)
+ }
+
+ return &FuncCounter{
+ cnt: cnt,
+ function: function,
+ }
+}
+
+func (r Registry) Gauge(name string) metrics.Gauge {
+ name = r.sanitizeName(name)
+ gg := prometheus.NewGauge(prometheus.GaugeOpts{
+ Namespace: r.prefix,
+ Name: name,
+ ConstLabels: r.tags,
+ })
+
+ if err := r.rg.Register(gg); err != nil {
+ var existErr prometheus.AlreadyRegisteredError
+ if xerrors.As(err, &existErr) {
+ return &Gauge{gg: existErr.ExistingCollector.(prometheus.Gauge)}
+ }
+ panic(err)
+ }
+
+ return &Gauge{gg: gg}
+}
+
+func (r Registry) FuncGauge(name string, function func() float64) metrics.FuncGauge {
+ name = r.sanitizeName(name)
+ ff := prometheus.NewGaugeFunc(prometheus.GaugeOpts{
+ Namespace: r.prefix,
+ Name: name,
+ ConstLabels: r.tags,
+ }, function)
+ if err := r.rg.Register(ff); err != nil {
+ panic(err)
+ }
+ return &FuncGauge{
+ ff: ff,
+ function: function,
+ }
+}
+
+func (r Registry) IntGauge(name string) metrics.IntGauge {
+ return &IntGauge{Gauge: r.Gauge(name)}
+}
+
+func (r Registry) FuncIntGauge(name string, function func() int64) metrics.FuncIntGauge {
+ name = r.sanitizeName(name)
+ ff := prometheus.NewGaugeFunc(prometheus.GaugeOpts{
+ Namespace: r.prefix,
+ Name: name,
+ ConstLabels: r.tags,
+ }, func() float64 { return float64(function()) })
+ if err := r.rg.Register(ff); err != nil {
+ panic(err)
+ }
+ return &FuncIntGauge{
+ ff: ff,
+ function: function,
+ }
+}
+
+func (r Registry) Timer(name string) metrics.Timer {
+ name = r.sanitizeName(name)
+ gg := prometheus.NewGauge(prometheus.GaugeOpts{
+ Namespace: r.prefix,
+ Name: name,
+ ConstLabels: r.tags,
+ })
+
+ if err := r.rg.Register(gg); err != nil {
+ var existErr prometheus.AlreadyRegisteredError
+ if xerrors.As(err, &existErr) {
+ return &Timer{gg: existErr.ExistingCollector.(prometheus.Gauge)}
+ }
+ panic(err)
+ }
+
+ return &Timer{gg: gg}
+}
+
+func (r Registry) Histogram(name string, buckets metrics.Buckets) metrics.Histogram {
+ name = r.sanitizeName(name)
+ hm := prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: r.prefix,
+ Name: name,
+ ConstLabels: r.tags,
+ Buckets: metricsutil.BucketsBounds(buckets),
+ })
+
+ if err := r.rg.Register(hm); err != nil {
+ var existErr prometheus.AlreadyRegisteredError
+ if xerrors.As(err, &existErr) {
+ return &Histogram{hm: existErr.ExistingCollector.(prometheus.Observer)}
+ }
+ panic(err)
+ }
+
+ return &Histogram{hm: hm}
+}
+
+func (r Registry) DurationHistogram(name string, buckets metrics.DurationBuckets) metrics.Timer {
+ name = r.sanitizeName(name)
+ hm := prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: r.prefix,
+ Name: name,
+ ConstLabels: r.tags,
+ Buckets: metricsutil.DurationBucketsBounds(buckets),
+ })
+
+ if err := r.rg.Register(hm); err != nil {
+ var existErr prometheus.AlreadyRegisteredError
+ if xerrors.As(err, &existErr) {
+ return &Histogram{hm: existErr.ExistingCollector.(prometheus.Histogram)}
+ }
+ panic(err)
+ }
+
+ return &Histogram{hm: hm}
+}
+
+// Gather returns raw collected Prometheus metrics.
+func (r Registry) Gather() ([]*dto.MetricFamily, error) {
+ return r.rg.Gather()
+}
+
+func (r *Registry) newSubregistry(prefix string, tags map[string]string) *Registry {
+ registryKey := registryutil.BuildRegistryKey(prefix, tags)
+
+ r.m.Lock()
+ defer r.m.Unlock()
+
+ if old, ok := r.subregistries[registryKey]; ok {
+ return old
+ }
+
+ subregistry := &Registry{
+ rg: r.rg,
+ m: r.m,
+ subregistries: r.subregistries,
+ tags: tags,
+ prefix: prefix,
+ nameSanitizer: r.nameSanitizer,
+ }
+
+ r.subregistries[registryKey] = subregistry
+ return subregistry
+}
+
+func (r *Registry) sanitizeName(name string) string {
+ if r.nameSanitizer == nil {
+ return name
+ }
+ return r.nameSanitizer(name)
+}
diff --git a/library/go/core/metrics/prometheus/registry_opts.go b/library/go/core/metrics/prometheus/registry_opts.go
new file mode 100644
index 0000000000..fedb019d85
--- /dev/null
+++ b/library/go/core/metrics/prometheus/registry_opts.go
@@ -0,0 +1,84 @@
+package prometheus
+
+import (
+ "context"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/ydb-platform/ydb/library/go/core/metrics"
+ "github.com/ydb-platform/ydb/library/go/core/metrics/collect"
+ "github.com/ydb-platform/ydb/library/go/core/metrics/internal/pkg/registryutil"
+)
+
+type RegistryOpts struct {
+ Prefix string
+ Tags map[string]string
+ rg *prometheus.Registry
+ Collectors []func(metrics.Registry)
+ NameSanitizer func(string) string
+}
+
+// NewRegistryOpts returns new initialized instance of RegistryOpts.
+func NewRegistryOpts() *RegistryOpts {
+ return &RegistryOpts{
+ Tags: make(map[string]string),
+ }
+}
+
+// SetTags overrides existing tags.
+func (o *RegistryOpts) SetTags(tags map[string]string) *RegistryOpts {
+ o.Tags = tags
+ return o
+}
+
+// AddTags merges given tags with existing.
+func (o *RegistryOpts) AddTags(tags map[string]string) *RegistryOpts {
+ for k, v := range tags {
+ o.Tags[k] = v
+ }
+ return o
+}
+
+// SetPrefix overrides existing prefix.
+func (o *RegistryOpts) SetPrefix(prefix string) *RegistryOpts {
+ o.Prefix = prefix
+ return o
+}
+
+// AppendPrefix adds given prefix as postfix to existing using separator.
+func (o *RegistryOpts) AppendPrefix(prefix string) *RegistryOpts {
+ o.Prefix = registryutil.BuildFQName("_", o.Prefix, prefix)
+ return o
+}
+
+// SetRegistry sets the given prometheus registry for further usage instead
+// of creating a new one.
+//
+// This is primarily used to unite externally defined metrics with metrics kept
+// in the core registry.
+func (o *RegistryOpts) SetRegistry(rg *prometheus.Registry) *RegistryOpts {
+ o.rg = rg
+ return o
+}
+
+// AddCollectors adds collectors that handle their metrics automatically (e.g. system metrics).
+func (o *RegistryOpts) AddCollectors(
+ ctx context.Context, c metrics.CollectPolicy, collectors ...collect.Func,
+) *RegistryOpts {
+ if len(collectors) == 0 {
+ return o
+ }
+
+ o.Collectors = append(o.Collectors, func(r metrics.Registry) {
+ for _, collector := range collectors {
+ collector(ctx, r, c)
+ }
+ })
+ return o
+}
+
+// SetNameSanitizer sets a functions which will be called for each metric's name.
+// It allows to alter names, for example to replace invalid characters
+func (o *RegistryOpts) SetNameSanitizer(v func(string) string) *RegistryOpts {
+ o.NameSanitizer = v
+ return o
+}
diff --git a/library/go/core/metrics/prometheus/registry_test.go b/library/go/core/metrics/prometheus/registry_test.go
new file mode 100644
index 0000000000..73d071a8de
--- /dev/null
+++ b/library/go/core/metrics/prometheus/registry_test.go
@@ -0,0 +1,481 @@
+package prometheus
+
+import (
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/prometheus/client_golang/prometheus"
+ dto "github.com/prometheus/client_model/go"
+ "github.com/stretchr/testify/assert"
+ "github.com/ydb-platform/ydb/library/go/core/metrics"
+ "github.com/ydb-platform/ydb/library/go/ptr"
+ "github.com/ydb-platform/ydb/library/go/test/assertpb"
+ "google.golang.org/protobuf/testing/protocmp"
+)
+
+func TestNewRegistry(t *testing.T) {
+ expected := &Registry{
+ rg: prometheus.NewRegistry(),
+ m: new(sync.Mutex),
+ subregistries: make(map[string]*Registry),
+ tags: map[string]string{},
+ prefix: "",
+ }
+
+ r := NewRegistry(nil)
+ assert.IsType(t, expected, r)
+ assert.Equal(t, expected, r)
+}
+
+func TestRegistry_Subregisters(t *testing.T) {
+ r := NewRegistry(nil)
+ sr1 := r.WithPrefix("subregister1").
+ WithTags(map[string]string{"ololo": "trololo"})
+ sr2 := sr1.WithPrefix("subregister2").
+ WithTags(map[string]string{"shimba": "boomba"})
+
+ // check global subregistries map
+ expectedMap := map[string]*Registry{
+ "\"subregister1\"{}": {
+ rg: r.rg,
+ m: r.m,
+ subregistries: r.subregistries,
+ prefix: "subregister1",
+ tags: make(map[string]string),
+ },
+ "\"subregister1\"{ololo=trololo}": {
+ rg: r.rg,
+ m: r.m,
+ subregistries: r.subregistries,
+ tags: map[string]string{"ololo": "trololo"},
+ prefix: "subregister1",
+ },
+ "\"subregister1_subregister2\"{ololo=trololo}": {
+ rg: r.rg,
+ m: r.m,
+ subregistries: r.subregistries,
+ tags: map[string]string{"ololo": "trololo"},
+ prefix: "subregister1_subregister2",
+ },
+ "\"subregister1_subregister2\"{ololo=trololo,shimba=boomba}": {
+ rg: r.rg,
+ m: r.m,
+ subregistries: r.subregistries,
+ tags: map[string]string{"ololo": "trololo", "shimba": "boomba"},
+ prefix: "subregister1_subregister2",
+ },
+ }
+
+ assert.EqualValues(t, expectedMap, r.subregistries)
+
+ // top-register write
+ rCnt := r.Counter("subregisters_count")
+ rCnt.Add(2)
+
+ // sub-register write
+ srTm := sr1.Timer("mytimer")
+ srTm.RecordDuration(42 * time.Second)
+
+ // sub-sub-register write
+ srHm := sr2.Histogram("myhistogram", metrics.NewBuckets(1, 2, 3))
+ srHm.RecordValue(1.5)
+
+ mr, err := r.Gather()
+ assert.NoError(t, err)
+
+ assert.IsType(t, mr, []*dto.MetricFamily{})
+
+ expected := []*dto.MetricFamily{
+ {
+ Name: ptr.String("subregister1_mytimer"),
+ Help: ptr.String(""),
+ Type: func(mt dto.MetricType) *dto.MetricType { return &mt }(dto.MetricType_GAUGE),
+ Metric: []*dto.Metric{
+ {
+ Label: []*dto.LabelPair{
+ {Name: ptr.String("ololo"), Value: ptr.String("trololo")},
+ },
+ Gauge: &dto.Gauge{Value: ptr.Float64(42)},
+ },
+ },
+ },
+ {
+ Name: ptr.String("subregister1_subregister2_myhistogram"),
+ Help: ptr.String(""),
+ Type: func(mt dto.MetricType) *dto.MetricType { return &mt }(dto.MetricType_HISTOGRAM),
+ Metric: []*dto.Metric{
+ {
+ Label: []*dto.LabelPair{
+ {Name: ptr.String("ololo"), Value: ptr.String("trololo")},
+ {Name: ptr.String("shimba"), Value: ptr.String("boomba")},
+ },
+ Histogram: &dto.Histogram{
+ SampleCount: ptr.Uint64(1),
+ SampleSum: ptr.Float64(1.5),
+ Bucket: []*dto.Bucket{
+ {CumulativeCount: ptr.Uint64(0), UpperBound: ptr.Float64(1)},
+ {CumulativeCount: ptr.Uint64(1), UpperBound: ptr.Float64(2)},
+ {CumulativeCount: ptr.Uint64(1), UpperBound: ptr.Float64(3)},
+ },
+ },
+ },
+ },
+ },
+ {
+ Name: ptr.String("subregisters_count"),
+ Help: ptr.String(""),
+ Type: func(mt dto.MetricType) *dto.MetricType { return &mt }(dto.MetricType_COUNTER),
+ Metric: []*dto.Metric{
+ {
+ Label: []*dto.LabelPair{},
+ Counter: &dto.Counter{Value: ptr.Float64(2)},
+ },
+ },
+ },
+ }
+
+ cmpOpts := []cmp.Option{
+ protocmp.Transform(),
+ }
+ assert.True(t, cmp.Equal(expected, mr, cmpOpts...), cmp.Diff(expected, mr, cmpOpts...))
+}
+
+func TestRegistry_Counter(t *testing.T) {
+ r := NewRegistry(nil)
+ sr := r.WithPrefix("myprefix").
+ WithTags(map[string]string{"ololo": "trololo"})
+
+ // must panic on empty name
+ assert.Panics(t, func() { r.Counter("") })
+
+ srCnt := sr.Counter("mycounter")
+ srCnt.Add(42)
+
+ mr, err := r.Gather()
+ assert.NoError(t, err)
+
+ assert.IsType(t, mr, []*dto.MetricFamily{})
+
+ expected := []*dto.MetricFamily{
+ {
+ Name: ptr.String("myprefix_mycounter"),
+ Help: ptr.String(""),
+ Type: func(mt dto.MetricType) *dto.MetricType { return &mt }(dto.MetricType_COUNTER),
+ Metric: []*dto.Metric{
+ {
+ Label: []*dto.LabelPair{
+ {Name: ptr.String("ololo"), Value: ptr.String("trololo")},
+ },
+ Counter: &dto.Counter{Value: ptr.Float64(42)},
+ },
+ },
+ },
+ }
+ cmpOpts := []cmp.Option{
+ protocmp.Transform(),
+ }
+ assert.True(t, cmp.Equal(expected, mr, cmpOpts...), cmp.Diff(expected, mr, cmpOpts...))
+}
+
+func TestRegistry_DurationHistogram(t *testing.T) {
+ r := NewRegistry(nil)
+ sr := r.WithPrefix("myprefix").
+ WithTags(map[string]string{"ololo": "trololo"})
+
+ // must panic on empty name
+ assert.Panics(t, func() { r.DurationHistogram("", nil) })
+
+ cnt := sr.DurationHistogram("myhistogram", metrics.NewDurationBuckets(
+ 1*time.Second, 3*time.Second, 5*time.Second,
+ ))
+
+ cnt.RecordDuration(2 * time.Second)
+ cnt.RecordDuration(4 * time.Second)
+
+ mr, err := r.Gather()
+ assert.NoError(t, err)
+
+ assert.IsType(t, mr, []*dto.MetricFamily{})
+
+ expected := []*dto.MetricFamily{
+ {
+ Name: ptr.String("myprefix_myhistogram"),
+ Help: ptr.String(""),
+ Type: func(mt dto.MetricType) *dto.MetricType { return &mt }(dto.MetricType_HISTOGRAM),
+ Metric: []*dto.Metric{
+ {
+ Label: []*dto.LabelPair{{Name: ptr.String("ololo"), Value: ptr.String("trololo")}},
+ Histogram: &dto.Histogram{
+ SampleCount: ptr.Uint64(2),
+ SampleSum: ptr.Float64(6),
+ Bucket: []*dto.Bucket{
+ {CumulativeCount: ptr.Uint64(0), UpperBound: ptr.Float64(1)},
+ {CumulativeCount: ptr.Uint64(1), UpperBound: ptr.Float64(3)},
+ {CumulativeCount: ptr.Uint64(2), UpperBound: ptr.Float64(5)},
+ },
+ },
+ },
+ },
+ },
+ }
+ assertpb.Equal(t, expected, mr)
+}
+
+func TestRegistry_Gauge(t *testing.T) {
+ r := NewRegistry(nil)
+ sr := r.WithPrefix("myprefix").
+ WithTags(map[string]string{"ololo": "trololo"})
+
+ // must panic on empty name
+ assert.Panics(t, func() { r.Gauge("") })
+
+ cnt := sr.Gauge("mygauge")
+ cnt.Add(42)
+
+ mr, err := r.Gather()
+ assert.NoError(t, err)
+
+ assert.IsType(t, mr, []*dto.MetricFamily{})
+
+ expected := []*dto.MetricFamily{
+ {
+ Name: ptr.String("myprefix_mygauge"),
+ Help: ptr.String(""),
+ Type: func(mt dto.MetricType) *dto.MetricType { return &mt }(dto.MetricType_GAUGE),
+ Metric: []*dto.Metric{
+ {
+ Label: []*dto.LabelPair{{Name: ptr.String("ololo"), Value: ptr.String("trololo")}},
+ Gauge: &dto.Gauge{Value: ptr.Float64(42)},
+ },
+ },
+ },
+ }
+ assertpb.Equal(t, expected, mr)
+}
+
+func TestRegistry_Histogram(t *testing.T) {
+ r := NewRegistry(nil)
+ sr := r.WithPrefix("myprefix").
+ WithTags(map[string]string{"ololo": "trololo"})
+
+ // must panic on empty name
+ assert.Panics(t, func() { r.Histogram("", nil) })
+
+ cnt := sr.Histogram("myhistogram", metrics.NewBuckets(1, 3, 5))
+
+ cnt.RecordValue(2)
+ cnt.RecordValue(4)
+
+ mr, err := r.Gather()
+ assert.NoError(t, err)
+
+ assert.IsType(t, mr, []*dto.MetricFamily{})
+
+ expected := []*dto.MetricFamily{
+ {
+ Name: ptr.String("myprefix_myhistogram"),
+ Help: ptr.String(""),
+ Type: func(mt dto.MetricType) *dto.MetricType { return &mt }(dto.MetricType_HISTOGRAM),
+ Metric: []*dto.Metric{
+ {
+ Label: []*dto.LabelPair{{Name: ptr.String("ololo"), Value: ptr.String("trololo")}},
+ Histogram: &dto.Histogram{
+ SampleCount: ptr.Uint64(2),
+ SampleSum: ptr.Float64(6),
+ Bucket: []*dto.Bucket{
+ {CumulativeCount: ptr.Uint64(0), UpperBound: ptr.Float64(1)},
+ {CumulativeCount: ptr.Uint64(1), UpperBound: ptr.Float64(3)},
+ {CumulativeCount: ptr.Uint64(2), UpperBound: ptr.Float64(5)},
+ },
+ },
+ },
+ },
+ },
+ }
+ assertpb.Equal(t, expected, mr)
+}
+
+func TestRegistry_Timer(t *testing.T) {
+ r := NewRegistry(nil)
+ sr := r.WithPrefix("myprefix").
+ WithTags(map[string]string{"ololo": "trololo"})
+
+ // must panic on empty name
+ assert.Panics(t, func() { r.Timer("") })
+
+ cnt := sr.Timer("mytimer")
+ cnt.RecordDuration(42 * time.Second)
+
+ mr, err := r.Gather()
+ assert.NoError(t, err)
+
+ assert.IsType(t, mr, []*dto.MetricFamily{})
+
+ expected := []*dto.MetricFamily{
+ {
+ Name: ptr.String("myprefix_mytimer"),
+ Help: ptr.String(""),
+ Type: func(mt dto.MetricType) *dto.MetricType { return &mt }(dto.MetricType_GAUGE),
+ Metric: []*dto.Metric{
+ {
+ Label: []*dto.LabelPair{{Name: ptr.String("ololo"), Value: ptr.String("trololo")}},
+ Gauge: &dto.Gauge{Value: ptr.Float64(42)},
+ },
+ },
+ },
+ }
+ assertpb.Equal(t, expected, mr)
+}
+
+func TestRegistry_WithPrefix(t *testing.T) {
+ testCases := []struct {
+ r metrics.Registry
+ expected string
+ }{
+ {
+ r: func() metrics.Registry {
+ return NewRegistry(nil)
+ }(),
+ expected: "",
+ },
+ {
+ r: func() metrics.Registry {
+ return NewRegistry(nil).WithPrefix("myprefix")
+ }(),
+ expected: "myprefix",
+ },
+ {
+ r: func() metrics.Registry {
+ return NewRegistry(nil).WithPrefix("__myprefix_")
+ }(),
+ expected: "myprefix",
+ },
+ {
+ r: func() metrics.Registry {
+ return NewRegistry(nil).WithPrefix("__myprefix_").WithPrefix("mysubprefix______")
+ }(),
+ expected: "myprefix_mysubprefix",
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run("", func(t *testing.T) {
+ assert.Equal(t, tc.expected, tc.r.(*Registry).prefix)
+ })
+ }
+}
+
+func TestRegistry_WithTags(t *testing.T) {
+ testCases := []struct {
+ r metrics.Registry
+ expected map[string]string
+ }{
+ {
+ r: func() metrics.Registry {
+ return NewRegistry(nil)
+ }(),
+ expected: map[string]string{},
+ },
+ {
+ r: func() metrics.Registry {
+ return NewRegistry(nil).WithTags(map[string]string{"shimba": "boomba"})
+ }(),
+ expected: map[string]string{"shimba": "boomba"},
+ },
+ {
+ r: func() metrics.Registry {
+ return NewRegistry(nil).
+ WithTags(map[string]string{"shimba": "boomba"}).
+ WithTags(map[string]string{"looken": "tooken"})
+ }(),
+ expected: map[string]string{
+ "shimba": "boomba",
+ "looken": "tooken",
+ },
+ },
+ {
+ r: func() metrics.Registry {
+ return NewRegistry(nil).
+ WithTags(map[string]string{"shimba": "boomba"}).
+ WithTags(map[string]string{"looken": "tooken"}).
+ WithTags(map[string]string{"shimba": "cooken"})
+ }(),
+ expected: map[string]string{
+ "shimba": "cooken",
+ "looken": "tooken",
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run("", func(t *testing.T) {
+ assert.Equal(t, tc.expected, tc.r.(*Registry).tags)
+ })
+ }
+}
+
+func TestRegistry_WithTags_NoPanic(t *testing.T) {
+ _ = NewRegistry(nil).WithTags(map[string]string{"foo": "bar"})
+ _ = NewRegistry(nil).WithTags(map[string]string{"foo": "bar"})
+}
+
+func TestRegistry_Counter_NoPanic(t *testing.T) {
+ r := NewRegistry(nil)
+ sr := r.WithPrefix("myprefix").
+ WithTags(map[string]string{"ololo": "trololo"})
+ cntrRaz := sr.Counter("mycounter").(*Counter)
+ cntrDvaz := sr.Counter("mycounter").(*Counter)
+ assert.Equal(t, cntrRaz.cnt, cntrDvaz.cnt)
+ cntrRaz.Add(100)
+ cntrDvaz.Add(100)
+ mr, err := r.Gather()
+ assert.NoError(t, err)
+ expected := []*dto.MetricFamily{
+ {
+ Name: ptr.String("myprefix_mycounter"),
+ Help: ptr.String(""),
+ Type: func(mt dto.MetricType) *dto.MetricType { return &mt }(dto.MetricType_COUNTER),
+ Metric: []*dto.Metric{
+ {
+ Label: []*dto.LabelPair{{Name: ptr.String("ololo"), Value: ptr.String("trololo")}},
+ Counter: &dto.Counter{Value: ptr.Float64(200)},
+ },
+ },
+ },
+ }
+ assertpb.Equal(t, expected, mr)
+}
+
+func TestRegistry_NameSanitizer(t *testing.T) {
+ testCases := []struct {
+ opts *RegistryOpts
+ name string
+ want string
+ }{
+ {
+ opts: nil,
+ name: "some_name",
+ want: "some_name",
+ },
+ {
+ opts: NewRegistryOpts().SetNameSanitizer(func(s string) string {
+ return strings.ReplaceAll(s, "/", "_")
+ }),
+ name: "other/name",
+ want: "other_name",
+ },
+ }
+
+ for _, tc := range testCases {
+ r := NewRegistry(tc.opts)
+ _ = r.Counter(tc.name)
+ mfs, err := r.Gather()
+ assert.NoError(t, err)
+ assert.NotEmpty(t, mfs)
+
+ assert.Equal(t, tc.want, *mfs[0].Name)
+ }
+}
diff --git a/library/go/core/metrics/prometheus/timer.go b/library/go/core/metrics/prometheus/timer.go
new file mode 100644
index 0000000000..3350e5a61d
--- /dev/null
+++ b/library/go/core/metrics/prometheus/timer.go
@@ -0,0 +1,19 @@
+package prometheus
+
+import (
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/ydb-platform/ydb/library/go/core/metrics"
+)
+
+var _ metrics.Timer = (*Timer)(nil)
+
+// Timer measures gauge duration.
+type Timer struct {
+ gg prometheus.Gauge
+}
+
+func (t Timer) RecordDuration(value time.Duration) {
+ t.gg.Set(value.Seconds())
+}
diff --git a/library/go/core/metrics/prometheus/timer_test.go b/library/go/core/metrics/prometheus/timer_test.go
new file mode 100644
index 0000000000..a520b6f477
--- /dev/null
+++ b/library/go/core/metrics/prometheus/timer_test.go
@@ -0,0 +1,24 @@
+package prometheus
+
+import (
+ "testing"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+ dto "github.com/prometheus/client_model/go"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestTimer_RecordDuration(t *testing.T) {
+ g := &Timer{gg: prometheus.NewGauge(prometheus.GaugeOpts{
+ Name: "test_timer_record_duration",
+ })}
+
+ g.RecordDuration(42 * time.Second)
+
+ var res dto.Metric
+ err := g.gg.Write(&res)
+
+ assert.NoError(t, err)
+ assert.Equal(t, float64(42), res.GetGauge().GetValue())
+}
diff --git a/library/go/core/metrics/prometheus/vec.go b/library/go/core/metrics/prometheus/vec.go
new file mode 100644
index 0000000000..731c7b752a
--- /dev/null
+++ b/library/go/core/metrics/prometheus/vec.go
@@ -0,0 +1,248 @@
+package prometheus
+
+import (
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/ydb-platform/ydb/library/go/core/metrics"
+ "github.com/ydb-platform/ydb/library/go/core/metrics/internal/pkg/metricsutil"
+ "github.com/ydb-platform/ydb/library/go/core/xerrors"
+)
+
+var _ metrics.CounterVec = (*CounterVec)(nil)
+
+// CounterVec wraps prometheus.CounterVec
+// and implements metrics.CounterVec interface.
+type CounterVec struct {
+ vec *prometheus.CounterVec
+}
+
+// CounterVec creates a new counters vector with given metric name and
+// partitioned by the given label names.
+func (r *Registry) CounterVec(name string, labels []string) metrics.CounterVec {
+ name = r.sanitizeName(name)
+ vec := prometheus.NewCounterVec(prometheus.CounterOpts{
+ Namespace: r.prefix,
+ Name: name,
+ ConstLabels: r.tags,
+ }, labels)
+
+ if err := r.rg.Register(vec); err != nil {
+ var existErr prometheus.AlreadyRegisteredError
+ if xerrors.As(err, &existErr) {
+ return &CounterVec{vec: existErr.ExistingCollector.(*prometheus.CounterVec)}
+ }
+ panic(err)
+ }
+
+ return &CounterVec{vec: vec}
+}
+
+// With creates new or returns existing counter with given tags from vector.
+// It will panic if tags keys set is not equal to vector labels.
+func (v *CounterVec) With(tags map[string]string) metrics.Counter {
+ return &Counter{cnt: v.vec.With(tags)}
+}
+
+// Reset deletes all metrics in this vector.
+func (v *CounterVec) Reset() {
+ v.vec.Reset()
+}
+
+var _ metrics.GaugeVec = (*GaugeVec)(nil)
+
+// GaugeVec wraps prometheus.GaugeVec
+// and implements metrics.GaugeVec interface.
+type GaugeVec struct {
+ vec *prometheus.GaugeVec
+}
+
+// GaugeVec creates a new gauges vector with given metric name and
+// partitioned by the given label names.
+func (r *Registry) GaugeVec(name string, labels []string) metrics.GaugeVec {
+ name = r.sanitizeName(name)
+ vec := prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: r.prefix,
+ Name: name,
+ ConstLabels: r.tags,
+ }, labels)
+
+ if err := r.rg.Register(vec); err != nil {
+ var existErr prometheus.AlreadyRegisteredError
+ if xerrors.As(err, &existErr) {
+ return &GaugeVec{vec: existErr.ExistingCollector.(*prometheus.GaugeVec)}
+ }
+ panic(err)
+ }
+
+ return &GaugeVec{vec: vec}
+}
+
+// With creates new or returns existing gauge with given tags from vector.
+// It will panic if tags keys set is not equal to vector labels.
+func (v *GaugeVec) With(tags map[string]string) metrics.Gauge {
+ return &Gauge{gg: v.vec.With(tags)}
+}
+
+// Reset deletes all metrics in this vector.
+func (v *GaugeVec) Reset() {
+ v.vec.Reset()
+}
+
+// IntGaugeVec wraps prometheus.GaugeVec
+// and implements metrics.IntGaugeVec interface.
+type IntGaugeVec struct {
+ vec *prometheus.GaugeVec
+}
+
+// IntGaugeVec creates a new gauges vector with given metric name and
+// partitioned by the given label names.
+func (r *Registry) IntGaugeVec(name string, labels []string) metrics.IntGaugeVec {
+ name = r.sanitizeName(name)
+ vec := prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: r.prefix,
+ Name: name,
+ ConstLabels: r.tags,
+ }, labels)
+
+ if err := r.rg.Register(vec); err != nil {
+ var existErr prometheus.AlreadyRegisteredError
+ if xerrors.As(err, &existErr) {
+ return &IntGaugeVec{vec: existErr.ExistingCollector.(*prometheus.GaugeVec)}
+ }
+ panic(err)
+ }
+
+ return &IntGaugeVec{vec: vec}
+}
+
+// With creates new or returns existing gauge with given tags from vector.
+// It will panic if tags keys set is not equal to vector labels.
+func (v *IntGaugeVec) With(tags map[string]string) metrics.IntGauge {
+ return &IntGauge{Gauge{gg: v.vec.With(tags)}}
+}
+
+// Reset deletes all metrics in this vector.
+func (v *IntGaugeVec) Reset() {
+ v.vec.Reset()
+}
+
+var _ metrics.TimerVec = (*TimerVec)(nil)
+
+// TimerVec wraps prometheus.GaugeVec
+// and implements metrics.TimerVec interface.
+type TimerVec struct {
+ vec *prometheus.GaugeVec
+}
+
+// TimerVec creates a new timers vector with given metric name and
+// partitioned by the given label names.
+func (r *Registry) TimerVec(name string, labels []string) metrics.TimerVec {
+ name = r.sanitizeName(name)
+ vec := prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: r.prefix,
+ Name: name,
+ ConstLabels: r.tags,
+ }, labels)
+
+ if err := r.rg.Register(vec); err != nil {
+ var existErr prometheus.AlreadyRegisteredError
+ if xerrors.As(err, &existErr) {
+ return &TimerVec{vec: existErr.ExistingCollector.(*prometheus.GaugeVec)}
+ }
+ panic(err)
+ }
+
+ return &TimerVec{vec: vec}
+}
+
+// With creates new or returns existing timer with given tags from vector.
+// It will panic if tags keys set is not equal to vector labels.
+func (v *TimerVec) With(tags map[string]string) metrics.Timer {
+ return &Timer{gg: v.vec.With(tags)}
+}
+
+// Reset deletes all metrics in this vector.
+func (v *TimerVec) Reset() {
+ v.vec.Reset()
+}
+
+var _ metrics.HistogramVec = (*HistogramVec)(nil)
+
+// HistogramVec wraps prometheus.HistogramVec
+// and implements metrics.HistogramVec interface.
+type HistogramVec struct {
+ vec *prometheus.HistogramVec
+}
+
+// HistogramVec creates a new histograms vector with given metric name and buckets and
+// partitioned by the given label names.
+func (r *Registry) HistogramVec(name string, buckets metrics.Buckets, labels []string) metrics.HistogramVec {
+ name = r.sanitizeName(name)
+ vec := prometheus.NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: r.prefix,
+ Name: name,
+ ConstLabels: r.tags,
+ Buckets: metricsutil.BucketsBounds(buckets),
+ }, labels)
+
+ if err := r.rg.Register(vec); err != nil {
+ var existErr prometheus.AlreadyRegisteredError
+ if xerrors.As(err, &existErr) {
+ return &HistogramVec{vec: existErr.ExistingCollector.(*prometheus.HistogramVec)}
+ }
+ panic(err)
+ }
+
+ return &HistogramVec{vec: vec}
+}
+
+// With creates new or returns existing histogram with given tags from vector.
+// It will panic if tags keys set is not equal to vector labels.
+func (v *HistogramVec) With(tags map[string]string) metrics.Histogram {
+ return &Histogram{hm: v.vec.With(tags)}
+}
+
+// Reset deletes all metrics in this vector.
+func (v *HistogramVec) Reset() {
+ v.vec.Reset()
+}
+
+var _ metrics.TimerVec = (*DurationHistogramVec)(nil)
+
+// DurationHistogramVec wraps prometheus.HistogramVec
+// and implements metrics.TimerVec interface.
+type DurationHistogramVec struct {
+ vec *prometheus.HistogramVec
+}
+
+// DurationHistogramVec creates a new duration histograms vector with given metric name and buckets and
+// partitioned by the given label names.
+func (r *Registry) DurationHistogramVec(name string, buckets metrics.DurationBuckets, labels []string) metrics.TimerVec {
+ name = r.sanitizeName(name)
+ vec := prometheus.NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: r.prefix,
+ Name: name,
+ ConstLabels: r.tags,
+ Buckets: metricsutil.DurationBucketsBounds(buckets),
+ }, labels)
+
+ if err := r.rg.Register(vec); err != nil {
+ var existErr prometheus.AlreadyRegisteredError
+ if xerrors.As(err, &existErr) {
+ return &DurationHistogramVec{vec: existErr.ExistingCollector.(*prometheus.HistogramVec)}
+ }
+ panic(err)
+ }
+
+ return &DurationHistogramVec{vec: vec}
+}
+
+// With creates new or returns existing duration histogram with given tags from vector.
+// It will panic if tags keys set is not equal to vector labels.
+func (v *DurationHistogramVec) With(tags map[string]string) metrics.Timer {
+ return &Histogram{hm: v.vec.With(tags)}
+}
+
+// Reset deletes all metrics in this vector.
+func (v *DurationHistogramVec) Reset() {
+ v.vec.Reset()
+}
diff --git a/library/go/core/metrics/prometheus/vec_test.go b/library/go/core/metrics/prometheus/vec_test.go
new file mode 100644
index 0000000000..ccf088c17a
--- /dev/null
+++ b/library/go/core/metrics/prometheus/vec_test.go
@@ -0,0 +1,137 @@
+package prometheus
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/ydb-platform/ydb/library/go/core/metrics"
+)
+
+func TestCounterVec(t *testing.T) {
+ rg := NewRegistry(NewRegistryOpts())
+ vec := rg.CounterVec("ololo", []string{"shimba", "looken"})
+ mt := vec.With(map[string]string{
+ "shimba": "boomba",
+ "looken": "tooken",
+ })
+
+ assert.IsType(t, &CounterVec{}, vec)
+ assert.IsType(t, &Counter{}, mt)
+
+ vec.Reset()
+
+ metrics, err := rg.Gather()
+ assert.NoError(t, err)
+ assert.Empty(t, metrics)
+}
+
+func TestCounterVec_RegisterAgain(t *testing.T) {
+ rg := NewRegistry(NewRegistryOpts())
+ vec1 := rg.CounterVec("ololo", []string{"shimba", "looken"}).(*CounterVec)
+ vec2 := rg.CounterVec("ololo", []string{"shimba", "looken"}).(*CounterVec)
+ assert.Same(t, vec1.vec, vec2.vec)
+}
+
+func TestGaugeVec(t *testing.T) {
+ rg := NewRegistry(NewRegistryOpts())
+ vec := rg.GaugeVec("ololo", []string{"shimba", "looken"})
+ mt := vec.With(map[string]string{
+ "shimba": "boomba",
+ "looken": "tooken",
+ })
+
+ assert.IsType(t, &GaugeVec{}, vec)
+ assert.IsType(t, &Gauge{}, mt)
+
+ vec.Reset()
+
+ metrics, err := rg.Gather()
+ assert.NoError(t, err)
+ assert.Empty(t, metrics)
+}
+
+func TestGaugeVec_RegisterAgain(t *testing.T) {
+ rg := NewRegistry(NewRegistryOpts())
+ vec1 := rg.GaugeVec("ololo", []string{"shimba", "looken"}).(*GaugeVec)
+ vec2 := rg.GaugeVec("ololo", []string{"shimba", "looken"}).(*GaugeVec)
+ assert.Same(t, vec1.vec, vec2.vec)
+}
+
+func TestTimerVec(t *testing.T) {
+ rg := NewRegistry(NewRegistryOpts())
+ vec := rg.TimerVec("ololo", []string{"shimba", "looken"})
+ mt := vec.With(map[string]string{
+ "shimba": "boomba",
+ "looken": "tooken",
+ })
+
+ assert.IsType(t, &TimerVec{}, vec)
+ assert.IsType(t, &Timer{}, mt)
+
+ vec.Reset()
+
+ metrics, err := rg.Gather()
+ assert.NoError(t, err)
+ assert.Empty(t, metrics)
+}
+
+func TestTimerVec_RegisterAgain(t *testing.T) {
+ rg := NewRegistry(NewRegistryOpts())
+ vec1 := rg.TimerVec("ololo", []string{"shimba", "looken"}).(*TimerVec)
+ vec2 := rg.TimerVec("ololo", []string{"shimba", "looken"}).(*TimerVec)
+ assert.Same(t, vec1.vec, vec2.vec)
+}
+
+func TestHistogramVec(t *testing.T) {
+ rg := NewRegistry(NewRegistryOpts())
+ buckets := metrics.NewBuckets(1, 2, 3)
+ vec := rg.HistogramVec("ololo", buckets, []string{"shimba", "looken"})
+ mt := vec.With(map[string]string{
+ "shimba": "boomba",
+ "looken": "tooken",
+ })
+
+ assert.IsType(t, &HistogramVec{}, vec)
+ assert.IsType(t, &Histogram{}, mt)
+
+ vec.Reset()
+
+ metrics, err := rg.Gather()
+ assert.NoError(t, err)
+ assert.Empty(t, metrics)
+}
+
+func TestHistogramVec_RegisterAgain(t *testing.T) {
+ rg := NewRegistry(NewRegistryOpts())
+ buckets := metrics.NewBuckets(1, 2, 3)
+ vec1 := rg.HistogramVec("ololo", buckets, []string{"shimba", "looken"}).(*HistogramVec)
+ vec2 := rg.HistogramVec("ololo", buckets, []string{"shimba", "looken"}).(*HistogramVec)
+ assert.Same(t, vec1.vec, vec2.vec)
+}
+
+func TestDurationHistogramVec(t *testing.T) {
+ rg := NewRegistry(NewRegistryOpts())
+ buckets := metrics.NewDurationBuckets(1, 2, 3)
+ vec := rg.DurationHistogramVec("ololo", buckets, []string{"shimba", "looken"})
+ mt := vec.With(map[string]string{
+ "shimba": "boomba",
+ "looken": "tooken",
+ })
+
+ assert.IsType(t, &DurationHistogramVec{}, vec)
+ assert.IsType(t, &Histogram{}, mt)
+
+ vec.Reset()
+
+ metrics, err := rg.Gather()
+ assert.NoError(t, err)
+ assert.Empty(t, metrics)
+}
+
+func TestDurationHistogramVec_RegisterAgain(t *testing.T) {
+ rg := NewRegistry(NewRegistryOpts())
+ buckets := metrics.NewDurationBuckets(1, 2, 3)
+ vec1 := rg.DurationHistogramVec("ololo", buckets, []string{"shimba", "looken"}).(*DurationHistogramVec)
+ vec2 := rg.DurationHistogramVec("ololo", buckets, []string{"shimba", "looken"}).(*DurationHistogramVec)
+ assert.Same(t, vec1.vec, vec2.vec)
+}
diff --git a/library/go/core/metrics/prometheus/ya.make b/library/go/core/metrics/prometheus/ya.make
new file mode 100644
index 0000000000..b012835f4b
--- /dev/null
+++ b/library/go/core/metrics/prometheus/ya.make
@@ -0,0 +1,25 @@
+GO_LIBRARY()
+
+SRCS(
+ counter.go
+ gauge.go
+ int_gauge.go
+ histogram.go
+ registry.go
+ registry_opts.go
+ timer.go
+ vec.go
+)
+
+GO_TEST_SRCS(
+ counter_test.go
+ gauge_test.go
+ histogram_test.go
+ registry_test.go
+ timer_test.go
+ vec_test.go
+)
+
+END()
+
+RECURSE(gotest)
diff --git a/library/go/core/metrics/solomon/converter.go b/library/go/core/metrics/solomon/converter.go
new file mode 100644
index 0000000000..6976b223ba
--- /dev/null
+++ b/library/go/core/metrics/solomon/converter.go
@@ -0,0 +1,114 @@
+package solomon
+
+import (
+ "fmt"
+
+ dto "github.com/prometheus/client_model/go"
+ "go.uber.org/atomic"
+)
+
+// PrometheusMetrics converts Prometheus metrics to Solomon metrics.
+func PrometheusMetrics(metrics []*dto.MetricFamily) (*Metrics, error) {
+ s := &Metrics{
+ metrics: make([]Metric, 0, len(metrics)),
+ }
+
+ if len(metrics) == 0 {
+ return s, nil
+ }
+
+ for _, mf := range metrics {
+ if len(mf.Metric) == 0 {
+ continue
+ }
+
+ for _, metric := range mf.Metric {
+
+ tags := make(map[string]string, len(metric.Label))
+ for _, label := range metric.Label {
+ tags[label.GetName()] = label.GetValue()
+ }
+
+ switch *mf.Type {
+ case dto.MetricType_COUNTER:
+ s.metrics = append(s.metrics, &Counter{
+ name: mf.GetName(),
+ metricType: typeCounter,
+ tags: tags,
+ value: *atomic.NewInt64(int64(metric.Counter.GetValue())),
+ })
+ case dto.MetricType_GAUGE:
+ s.metrics = append(s.metrics, &Gauge{
+ name: mf.GetName(),
+ metricType: typeGauge,
+ tags: tags,
+ value: *atomic.NewFloat64(metric.Gauge.GetValue()),
+ })
+ case dto.MetricType_HISTOGRAM:
+ bounds := make([]float64, 0, len(metric.Histogram.Bucket))
+ values := make([]int64, 0, len(metric.Histogram.Bucket))
+
+ var prevValuesSum int64
+
+ for _, bucket := range metric.Histogram.Bucket {
+ // prometheus uses cumulative buckets where solomon uses instant
+ bucketValue := int64(bucket.GetCumulativeCount())
+ bucketValue -= prevValuesSum
+ prevValuesSum += bucketValue
+
+ bounds = append(bounds, bucket.GetUpperBound())
+ values = append(values, bucketValue)
+ }
+
+ s.metrics = append(s.metrics, &Histogram{
+ name: mf.GetName(),
+ metricType: typeHistogram,
+ tags: tags,
+ bucketBounds: bounds,
+ bucketValues: values,
+ infValue: *atomic.NewInt64(int64(metric.Histogram.GetSampleCount()) - prevValuesSum),
+ })
+ case dto.MetricType_SUMMARY:
+ bounds := make([]float64, 0, len(metric.Summary.Quantile))
+ values := make([]int64, 0, len(metric.Summary.Quantile))
+
+ var prevValuesSum int64
+
+ for _, bucket := range metric.Summary.GetQuantile() {
+ // prometheus uses cumulative buckets where solomon uses instant
+ bucketValue := int64(bucket.GetValue())
+ bucketValue -= prevValuesSum
+ prevValuesSum += bucketValue
+
+ bounds = append(bounds, bucket.GetQuantile())
+ values = append(values, bucketValue)
+ }
+
+ mName := mf.GetName()
+
+ s.metrics = append(s.metrics, &Histogram{
+ name: mName,
+ metricType: typeHistogram,
+ tags: tags,
+ bucketBounds: bounds,
+ bucketValues: values,
+ infValue: *atomic.NewInt64(int64(*metric.Summary.SampleCount) - prevValuesSum),
+ }, &Counter{
+ name: mName + "_count",
+ metricType: typeCounter,
+ tags: tags,
+ value: *atomic.NewInt64(int64(*metric.Summary.SampleCount)),
+ }, &Gauge{
+ name: mName + "_sum",
+ metricType: typeGauge,
+ tags: tags,
+ value: *atomic.NewFloat64(*metric.Summary.SampleSum),
+ })
+ default:
+ return nil, fmt.Errorf("unsupported type: %s", mf.Type.String())
+ }
+ }
+ }
+
+ return s, nil
+}
diff --git a/library/go/core/metrics/solomon/converter_test.go b/library/go/core/metrics/solomon/converter_test.go
new file mode 100644
index 0000000000..5368029038
--- /dev/null
+++ b/library/go/core/metrics/solomon/converter_test.go
@@ -0,0 +1,200 @@
+package solomon
+
+import (
+ "testing"
+
+ dto "github.com/prometheus/client_model/go"
+ "github.com/stretchr/testify/assert"
+ "github.com/ydb-platform/ydb/library/go/ptr"
+ "go.uber.org/atomic"
+)
+
+func TestPrometheusMetrics(t *testing.T) {
+ testCases := []struct {
+ name string
+ metrics []*dto.MetricFamily
+ expect *Metrics
+ expectErr error
+ }{
+ {
+ name: "success",
+ metrics: []*dto.MetricFamily{
+ {
+ Name: ptr.String("subregister1_mygauge"),
+ Help: ptr.String(""),
+ Type: ptr.T(dto.MetricType_GAUGE),
+ Metric: []*dto.Metric{
+ {
+ Label: []*dto.LabelPair{
+ {Name: ptr.String("ololo"), Value: ptr.String("trololo")},
+ },
+ Gauge: &dto.Gauge{Value: ptr.Float64(42)},
+ },
+ },
+ },
+ {
+ Name: ptr.String("subregisters_count"),
+ Help: ptr.String(""),
+ Type: ptr.T(dto.MetricType_COUNTER),
+ Metric: []*dto.Metric{
+ {
+ Label: []*dto.LabelPair{},
+ Counter: &dto.Counter{Value: ptr.Float64(2)},
+ },
+ },
+ },
+ {
+ Name: ptr.String("subregister1_subregister2_myhistogram"),
+ Help: ptr.String(""),
+ Type: ptr.T(dto.MetricType_HISTOGRAM),
+ Metric: []*dto.Metric{
+ {
+ Label: []*dto.LabelPair{
+ {Name: ptr.String("ololo"), Value: ptr.String("trololo")},
+ {Name: ptr.String("shimba"), Value: ptr.String("boomba")},
+ },
+ Histogram: &dto.Histogram{
+ SampleCount: ptr.Uint64(6),
+ SampleSum: ptr.Float64(4.2),
+ Bucket: []*dto.Bucket{
+ {CumulativeCount: ptr.Uint64(1), UpperBound: ptr.Float64(1)}, // 0.5 written
+ {CumulativeCount: ptr.Uint64(3), UpperBound: ptr.Float64(2)}, // 1.5 & 1.7 written
+ {CumulativeCount: ptr.Uint64(4), UpperBound: ptr.Float64(3)}, // 2.2 written
+ },
+ },
+ },
+ },
+ },
+ {
+ Name: ptr.String("metrics_group"),
+ Help: ptr.String(""),
+ Type: ptr.T(dto.MetricType_COUNTER),
+ Metric: []*dto.Metric{
+ {
+ Label: []*dto.LabelPair{},
+ Counter: &dto.Counter{Value: ptr.Float64(2)},
+ },
+ {
+ Label: []*dto.LabelPair{},
+ Counter: &dto.Counter{Value: ptr.Float64(3)},
+ },
+ },
+ },
+ },
+ expect: &Metrics{
+ metrics: []Metric{
+ &Gauge{
+ name: "subregister1_mygauge",
+ metricType: typeGauge,
+ tags: map[string]string{"ololo": "trololo"},
+ value: *atomic.NewFloat64(42),
+ },
+ &Counter{
+ name: "subregisters_count",
+ metricType: typeCounter,
+ tags: map[string]string{},
+ value: *atomic.NewInt64(2),
+ },
+ &Histogram{
+ name: "subregister1_subregister2_myhistogram",
+ metricType: typeHistogram,
+ tags: map[string]string{"ololo": "trololo", "shimba": "boomba"},
+ bucketBounds: []float64{1, 2, 3},
+ bucketValues: []int64{1, 2, 1},
+ infValue: *atomic.NewInt64(2),
+ },
+ // group of metrics
+ &Counter{
+ name: "metrics_group",
+ metricType: typeCounter,
+ tags: map[string]string{},
+ value: *atomic.NewInt64(2),
+ },
+ &Counter{
+ name: "metrics_group",
+ metricType: typeCounter,
+ tags: map[string]string{},
+ value: *atomic.NewInt64(3),
+ },
+ },
+ },
+ expectErr: nil,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ s, err := PrometheusMetrics(tc.metrics)
+
+ if tc.expectErr == nil {
+ assert.NoError(t, err)
+ } else {
+ assert.EqualError(t, err, tc.expectErr.Error())
+ }
+
+ assert.Equal(t, tc.expect, s)
+ })
+ }
+}
+
+func TestPrometheusSummaryMetric(t *testing.T) {
+ src := []*dto.MetricFamily{
+ {
+ Name: ptr.String("subregister1_subregister2_mysummary"),
+ Help: ptr.String(""),
+ Type: func(mt dto.MetricType) *dto.MetricType { return &mt }(dto.MetricType_SUMMARY),
+ Metric: []*dto.Metric{
+ {
+ Label: []*dto.LabelPair{
+ {Name: ptr.String("ololo"), Value: ptr.String("trololo")},
+ {Name: ptr.String("shimba"), Value: ptr.String("boomba")},
+ },
+ Summary: &dto.Summary{
+ SampleCount: ptr.Uint64(8),
+ SampleSum: ptr.Float64(4.2),
+ Quantile: []*dto.Quantile{
+ {Value: ptr.Float64(1), Quantile: ptr.Float64(1)}, // 0.5 written
+ {Value: ptr.Float64(3), Quantile: ptr.Float64(2)}, // 1.5 & 1.7 written
+ {Value: ptr.Float64(4), Quantile: ptr.Float64(3)}, // 2.2 written
+ },
+ },
+ },
+ },
+ },
+ }
+
+ mName := "subregister1_subregister2_mysummary"
+ mTags := map[string]string{"ololo": "trololo", "shimba": "boomba"}
+ bBounds := []float64{1, 2, 3}
+ bValues := []int64{1, 2, 1}
+
+ expect := &Metrics{
+ metrics: []Metric{
+ &Histogram{
+ name: mName,
+ metricType: typeHistogram,
+ tags: mTags,
+ bucketBounds: bBounds,
+ bucketValues: bValues,
+ infValue: *atomic.NewInt64(4),
+ },
+ &Counter{
+ name: mName + "_count",
+ metricType: typeCounter,
+ tags: mTags,
+ value: *atomic.NewInt64(8),
+ },
+ &Gauge{
+ name: mName + "_sum",
+ metricType: typeGauge,
+ tags: mTags,
+ value: *atomic.NewFloat64(4.2),
+ },
+ },
+ }
+
+ s, err := PrometheusMetrics(src)
+ assert.NoError(t, err)
+
+ assert.Equal(t, expect, s)
+}
diff --git a/library/go/core/metrics/solomon/counter.go b/library/go/core/metrics/solomon/counter.go
new file mode 100644
index 0000000000..e37933760c
--- /dev/null
+++ b/library/go/core/metrics/solomon/counter.go
@@ -0,0 +1,97 @@
+package solomon
+
+import (
+ "encoding/json"
+ "time"
+
+ "github.com/ydb-platform/ydb/library/go/core/metrics"
+ "go.uber.org/atomic"
+)
+
+var (
+ _ metrics.Counter = (*Counter)(nil)
+ _ Metric = (*Counter)(nil)
+)
+
+// Counter tracks monotonically increasing value.
+type Counter struct {
+ name string
+ metricType metricType
+ tags map[string]string
+ value atomic.Int64
+ timestamp *time.Time
+
+ useNameTag bool
+}
+
+// Inc increments counter by 1.
+func (c *Counter) Inc() {
+ c.Add(1)
+}
+
+// Add adds delta to the counter. Delta must be >=0.
+func (c *Counter) Add(delta int64) {
+ c.value.Add(delta)
+}
+
+func (c *Counter) Name() string {
+ return c.name
+}
+
+func (c *Counter) getType() metricType {
+ return c.metricType
+}
+
+func (c *Counter) getLabels() map[string]string {
+ return c.tags
+}
+
+func (c *Counter) getValue() interface{} {
+ return c.value.Load()
+}
+
+func (c *Counter) getTimestamp() *time.Time {
+ return c.timestamp
+}
+
+func (c *Counter) getNameTag() string {
+ if c.useNameTag {
+ return "name"
+ } else {
+ return "sensor"
+ }
+}
+
+// MarshalJSON implements json.Marshaler.
+func (c *Counter) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string `json:"type"`
+ Labels map[string]string `json:"labels"`
+ Value int64 `json:"value"`
+ Timestamp *int64 `json:"ts,omitempty"`
+ }{
+ Type: c.metricType.String(),
+ Value: c.value.Load(),
+ Labels: func() map[string]string {
+ labels := make(map[string]string, len(c.tags)+1)
+ labels[c.getNameTag()] = c.Name()
+ for k, v := range c.tags {
+ labels[k] = v
+ }
+ return labels
+ }(),
+ Timestamp: tsAsRef(c.timestamp),
+ })
+}
+
+// Snapshot returns independent copy on metric.
+func (c *Counter) Snapshot() Metric {
+ return &Counter{
+ name: c.name,
+ metricType: c.metricType,
+ tags: c.tags,
+ value: *atomic.NewInt64(c.value.Load()),
+
+ useNameTag: c.useNameTag,
+ }
+}
diff --git a/library/go/core/metrics/solomon/counter_test.go b/library/go/core/metrics/solomon/counter_test.go
new file mode 100644
index 0000000000..09284125d2
--- /dev/null
+++ b/library/go/core/metrics/solomon/counter_test.go
@@ -0,0 +1,90 @@
+package solomon
+
+import (
+ "encoding/json"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "go.uber.org/atomic"
+)
+
+func TestCounter_Add(t *testing.T) {
+ c := &Counter{
+ name: "mycounter",
+ metricType: typeCounter,
+ tags: map[string]string{"ololo": "trololo"},
+ }
+
+ c.Add(1)
+ assert.Equal(t, int64(1), c.value.Load())
+
+ c.Add(42)
+ assert.Equal(t, int64(43), c.value.Load())
+
+ c.Add(1489)
+ assert.Equal(t, int64(1532), c.value.Load())
+}
+
+func TestCounter_Inc(t *testing.T) {
+ c := &Counter{
+ name: "mycounter",
+ metricType: typeCounter,
+ tags: map[string]string{"ololo": "trololo"},
+ }
+
+ for i := 0; i < 10; i++ {
+ c.Inc()
+ }
+ assert.Equal(t, int64(10), c.value.Load())
+
+ c.Inc()
+ c.Inc()
+ assert.Equal(t, int64(12), c.value.Load())
+}
+
+func TestCounter_MarshalJSON(t *testing.T) {
+ c := &Counter{
+ name: "mycounter",
+ metricType: typeCounter,
+ tags: map[string]string{"ololo": "trololo"},
+ value: *atomic.NewInt64(42),
+ }
+
+ b, err := json.Marshal(c)
+ assert.NoError(t, err)
+
+ expected := []byte(`{"type":"COUNTER","labels":{"ololo":"trololo","sensor":"mycounter"},"value":42}`)
+ assert.Equal(t, expected, b)
+}
+
+func TestRatedCounter_MarshalJSON(t *testing.T) {
+ c := &Counter{
+ name: "mycounter",
+ metricType: typeRated,
+ tags: map[string]string{"ololo": "trololo"},
+ value: *atomic.NewInt64(42),
+ }
+
+ b, err := json.Marshal(c)
+ assert.NoError(t, err)
+
+ expected := []byte(`{"type":"RATE","labels":{"ololo":"trololo","sensor":"mycounter"},"value":42}`)
+ assert.Equal(t, expected, b)
+}
+
+func TestNameTagCounter_MarshalJSON(t *testing.T) {
+ c := &Counter{
+ name: "mycounter",
+ metricType: typeCounter,
+ tags: map[string]string{"ololo": "trololo"},
+ value: *atomic.NewInt64(42),
+
+ useNameTag: true,
+ }
+
+ b, err := json.Marshal(c)
+ assert.NoError(t, err)
+
+ expected := []byte(`{"type":"COUNTER","labels":{"name":"mycounter","ololo":"trololo"},"value":42}`)
+ assert.Equal(t, expected, b)
+}
diff --git a/library/go/core/metrics/solomon/func_counter.go b/library/go/core/metrics/solomon/func_counter.go
new file mode 100644
index 0000000000..db862869e4
--- /dev/null
+++ b/library/go/core/metrics/solomon/func_counter.go
@@ -0,0 +1,86 @@
+package solomon
+
+import (
+ "encoding/json"
+ "time"
+
+ "go.uber.org/atomic"
+)
+
+var _ Metric = (*FuncCounter)(nil)
+
+// FuncCounter tracks int64 value returned by function.
+type FuncCounter struct {
+ name string
+ metricType metricType
+ tags map[string]string
+ function func() int64
+ timestamp *time.Time
+ useNameTag bool
+}
+
+func (c *FuncCounter) Name() string {
+ return c.name
+}
+
+func (c *FuncCounter) Function() func() int64 {
+ return c.function
+}
+
+func (c *FuncCounter) getType() metricType {
+ return c.metricType
+}
+
+func (c *FuncCounter) getLabels() map[string]string {
+ return c.tags
+}
+
+func (c *FuncCounter) getValue() interface{} {
+ return c.function()
+}
+
+func (c *FuncCounter) getTimestamp() *time.Time {
+ return c.timestamp
+}
+
+func (c *FuncCounter) getNameTag() string {
+ if c.useNameTag {
+ return "name"
+ } else {
+ return "sensor"
+ }
+}
+
+// MarshalJSON implements json.Marshaler.
+func (c *FuncCounter) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string `json:"type"`
+ Labels map[string]string `json:"labels"`
+ Value int64 `json:"value"`
+ Timestamp *int64 `json:"ts,omitempty"`
+ }{
+ Type: c.metricType.String(),
+ Value: c.function(),
+ Labels: func() map[string]string {
+ labels := make(map[string]string, len(c.tags)+1)
+ labels[c.getNameTag()] = c.Name()
+ for k, v := range c.tags {
+ labels[k] = v
+ }
+ return labels
+ }(),
+ Timestamp: tsAsRef(c.timestamp),
+ })
+}
+
+// Snapshot returns independent copy on metric.
+func (c *FuncCounter) Snapshot() Metric {
+ return &Counter{
+ name: c.name,
+ metricType: c.metricType,
+ tags: c.tags,
+ value: *atomic.NewInt64(c.function()),
+
+ useNameTag: c.useNameTag,
+ }
+}
diff --git a/library/go/core/metrics/solomon/func_counter_test.go b/library/go/core/metrics/solomon/func_counter_test.go
new file mode 100644
index 0000000000..7849769d12
--- /dev/null
+++ b/library/go/core/metrics/solomon/func_counter_test.go
@@ -0,0 +1,82 @@
+package solomon
+
+import (
+ "encoding/json"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "go.uber.org/atomic"
+)
+
+func TestFuncCounter_Inc(t *testing.T) {
+ val := new(atomic.Int64)
+ c := &FuncCounter{
+ name: "mycounter",
+ metricType: typeCounter,
+ tags: map[string]string{"ololo": "trololo"},
+ function: func() int64 {
+ return val.Load()
+ },
+ }
+
+ val.Store(1)
+ assert.Equal(t, int64(1), c.Snapshot().(*Counter).value.Load())
+
+ val.Store(42)
+ assert.Equal(t, int64(42), c.Snapshot().(*Counter).value.Load())
+
+}
+
+func TestFuncCounter_MarshalJSON(t *testing.T) {
+ c := &FuncCounter{
+ name: "mycounter",
+ metricType: typeCounter,
+ tags: map[string]string{"ololo": "trololo"},
+ function: func() int64 {
+ return 42
+ },
+ }
+
+ b, err := json.Marshal(c)
+ assert.NoError(t, err)
+
+ expected := []byte(`{"type":"COUNTER","labels":{"ololo":"trololo","sensor":"mycounter"},"value":42}`)
+ assert.Equal(t, expected, b)
+}
+
+func TestRatedFuncCounter_MarshalJSON(t *testing.T) {
+ c := &FuncCounter{
+ name: "mycounter",
+ metricType: typeRated,
+ tags: map[string]string{"ololo": "trololo"},
+ function: func() int64 {
+ return 42
+ },
+ }
+
+ b, err := json.Marshal(c)
+ assert.NoError(t, err)
+
+ expected := []byte(`{"type":"RATE","labels":{"ololo":"trololo","sensor":"mycounter"},"value":42}`)
+ assert.Equal(t, expected, b)
+}
+
+func TestNameTagFuncCounter_MarshalJSON(t *testing.T) {
+ c := &FuncCounter{
+ name: "mycounter",
+ metricType: typeCounter,
+ tags: map[string]string{"ololo": "trololo"},
+
+ function: func() int64 {
+ return 42
+ },
+
+ useNameTag: true,
+ }
+
+ b, err := json.Marshal(c)
+ assert.NoError(t, err)
+
+ expected := []byte(`{"type":"COUNTER","labels":{"name":"mycounter","ololo":"trololo"},"value":42}`)
+ assert.Equal(t, expected, b)
+}
diff --git a/library/go/core/metrics/solomon/func_gauge.go b/library/go/core/metrics/solomon/func_gauge.go
new file mode 100644
index 0000000000..ce824c6fa8
--- /dev/null
+++ b/library/go/core/metrics/solomon/func_gauge.go
@@ -0,0 +1,87 @@
+package solomon
+
+import (
+ "encoding/json"
+ "time"
+
+ "go.uber.org/atomic"
+)
+
+var _ Metric = (*FuncGauge)(nil)
+
+// FuncGauge tracks float64 value returned by function.
+type FuncGauge struct {
+ name string
+ metricType metricType
+ tags map[string]string
+ function func() float64
+ timestamp *time.Time
+
+ useNameTag bool
+}
+
+func (g *FuncGauge) Name() string {
+ return g.name
+}
+
+func (g *FuncGauge) Function() func() float64 {
+ return g.function
+}
+
+func (g *FuncGauge) getType() metricType {
+ return g.metricType
+}
+
+func (g *FuncGauge) getLabels() map[string]string {
+ return g.tags
+}
+
+func (g *FuncGauge) getValue() interface{} {
+ return g.function()
+}
+
+func (g *FuncGauge) getTimestamp() *time.Time {
+ return g.timestamp
+}
+
+func (g *FuncGauge) getNameTag() string {
+ if g.useNameTag {
+ return "name"
+ } else {
+ return "sensor"
+ }
+}
+
+// MarshalJSON implements json.Marshaler.
+func (g *FuncGauge) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string `json:"type"`
+ Labels map[string]string `json:"labels"`
+ Value float64 `json:"value"`
+ Timestamp *int64 `json:"ts,omitempty"`
+ }{
+ Type: g.metricType.String(),
+ Value: g.function(),
+ Labels: func() map[string]string {
+ labels := make(map[string]string, len(g.tags)+1)
+ labels[g.getNameTag()] = g.Name()
+ for k, v := range g.tags {
+ labels[k] = v
+ }
+ return labels
+ }(),
+ Timestamp: tsAsRef(g.timestamp),
+ })
+}
+
+// Snapshot returns independent copy on metric.
+func (g *FuncGauge) Snapshot() Metric {
+ return &Gauge{
+ name: g.name,
+ metricType: g.metricType,
+ tags: g.tags,
+ value: *atomic.NewFloat64(g.function()),
+
+ useNameTag: g.useNameTag,
+ }
+}
diff --git a/library/go/core/metrics/solomon/func_gauge_test.go b/library/go/core/metrics/solomon/func_gauge_test.go
new file mode 100644
index 0000000000..f4317a0cab
--- /dev/null
+++ b/library/go/core/metrics/solomon/func_gauge_test.go
@@ -0,0 +1,64 @@
+package solomon
+
+import (
+ "encoding/json"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "go.uber.org/atomic"
+)
+
+func TestFuncGauge_Value(t *testing.T) {
+ val := new(atomic.Float64)
+ c := &FuncGauge{
+ name: "mygauge",
+ metricType: typeGauge,
+ tags: map[string]string{"ololo": "trololo"},
+ function: func() float64 {
+ return val.Load()
+ },
+ }
+
+ val.Store(1)
+ assert.Equal(t, float64(1), c.Snapshot().(*Gauge).value.Load())
+
+ val.Store(42)
+ assert.Equal(t, float64(42), c.Snapshot().(*Gauge).value.Load())
+
+}
+
+func TestFunGauge_MarshalJSON(t *testing.T) {
+ c := &FuncGauge{
+ name: "mygauge",
+ metricType: typeGauge,
+ tags: map[string]string{"ololo": "trololo"},
+ function: func() float64 {
+ return 42.18
+ },
+ }
+
+ b, err := json.Marshal(c)
+ assert.NoError(t, err)
+
+ expected := []byte(`{"type":"DGAUGE","labels":{"ololo":"trololo","sensor":"mygauge"},"value":42.18}`)
+ assert.Equal(t, expected, b)
+}
+
+func TestNameTagFunGauge_MarshalJSON(t *testing.T) {
+ c := &FuncGauge{
+ name: "mygauge",
+ metricType: typeGauge,
+ tags: map[string]string{"ololo": "trololo"},
+ function: func() float64 {
+ return 42.18
+ },
+
+ useNameTag: true,
+ }
+
+ b, err := json.Marshal(c)
+ assert.NoError(t, err)
+
+ expected := []byte(`{"type":"DGAUGE","labels":{"name":"mygauge","ololo":"trololo"},"value":42.18}`)
+ assert.Equal(t, expected, b)
+}
diff --git a/library/go/core/metrics/solomon/func_int_gauge.go b/library/go/core/metrics/solomon/func_int_gauge.go
new file mode 100644
index 0000000000..4e7f22949a
--- /dev/null
+++ b/library/go/core/metrics/solomon/func_int_gauge.go
@@ -0,0 +1,87 @@
+package solomon
+
+import (
+ "encoding/json"
+ "time"
+
+ "go.uber.org/atomic"
+)
+
+var _ Metric = (*FuncIntGauge)(nil)
+
+// FuncIntGauge tracks int64 value returned by function.
+type FuncIntGauge struct {
+ name string
+ metricType metricType
+ tags map[string]string
+ function func() int64
+ timestamp *time.Time
+
+ useNameTag bool
+}
+
+func (g *FuncIntGauge) Name() string {
+ return g.name
+}
+
+func (g *FuncIntGauge) Function() func() int64 {
+ return g.function
+}
+
+func (g *FuncIntGauge) getType() metricType {
+ return g.metricType
+}
+
+func (g *FuncIntGauge) getLabels() map[string]string {
+ return g.tags
+}
+
+func (g *FuncIntGauge) getValue() interface{} {
+ return g.function()
+}
+
+func (g *FuncIntGauge) getTimestamp() *time.Time {
+ return g.timestamp
+}
+
+func (g *FuncIntGauge) getNameTag() string {
+ if g.useNameTag {
+ return "name"
+ } else {
+ return "sensor"
+ }
+}
+
+// MarshalJSON implements json.Marshaler.
+func (g *FuncIntGauge) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string `json:"type"`
+ Labels map[string]string `json:"labels"`
+ Value int64 `json:"value"`
+ Timestamp *int64 `json:"ts,omitempty"`
+ }{
+ Type: g.metricType.String(),
+ Value: g.function(),
+ Labels: func() map[string]string {
+ labels := make(map[string]string, len(g.tags)+1)
+ labels[g.getNameTag()] = g.Name()
+ for k, v := range g.tags {
+ labels[k] = v
+ }
+ return labels
+ }(),
+ Timestamp: tsAsRef(g.timestamp),
+ })
+}
+
+// Snapshot returns independent copy on metric.
+func (g *FuncIntGauge) Snapshot() Metric {
+ return &IntGauge{
+ name: g.name,
+ metricType: g.metricType,
+ tags: g.tags,
+ value: *atomic.NewInt64(g.function()),
+
+ useNameTag: g.useNameTag,
+ }
+}
diff --git a/library/go/core/metrics/solomon/func_int_gauge_test.go b/library/go/core/metrics/solomon/func_int_gauge_test.go
new file mode 100644
index 0000000000..4a576461e3
--- /dev/null
+++ b/library/go/core/metrics/solomon/func_int_gauge_test.go
@@ -0,0 +1,64 @@
+package solomon
+
+import (
+ "encoding/json"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "go.uber.org/atomic"
+)
+
+func TestFuncIntGauge_Value(t *testing.T) {
+ val := new(atomic.Int64)
+ c := &FuncIntGauge{
+ name: "myintgauge",
+ metricType: typeIGauge,
+ tags: map[string]string{"ololo": "trololo"},
+ function: func() int64 {
+ return val.Load()
+ },
+ }
+
+ val.Store(1)
+ assert.Equal(t, int64(1), c.Snapshot().(*IntGauge).value.Load())
+
+ val.Store(42)
+ assert.Equal(t, int64(42), c.Snapshot().(*IntGauge).value.Load())
+
+}
+
+func TestFunIntGauge_MarshalJSON(t *testing.T) {
+ c := &FuncIntGauge{
+ name: "myintgauge",
+ metricType: typeIGauge,
+ tags: map[string]string{"ololo": "trololo"},
+ function: func() int64 {
+ return 42
+ },
+ }
+
+ b, err := json.Marshal(c)
+ assert.NoError(t, err)
+
+ expected := []byte(`{"type":"IGAUGE","labels":{"ololo":"trololo","sensor":"myintgauge"},"value":42}`)
+ assert.Equal(t, expected, b)
+}
+
+func TestNameTagFunIntGauge_MarshalJSON(t *testing.T) {
+ c := &FuncIntGauge{
+ name: "myintgauge",
+ metricType: typeIGauge,
+ tags: map[string]string{"ololo": "trololo"},
+ function: func() int64 {
+ return 42
+ },
+
+ useNameTag: true,
+ }
+
+ b, err := json.Marshal(c)
+ assert.NoError(t, err)
+
+ expected := []byte(`{"type":"IGAUGE","labels":{"name":"myintgauge","ololo":"trololo"},"value":42}`)
+ assert.Equal(t, expected, b)
+}
diff --git a/library/go/core/metrics/solomon/gauge.go b/library/go/core/metrics/solomon/gauge.go
new file mode 100644
index 0000000000..4660d33c11
--- /dev/null
+++ b/library/go/core/metrics/solomon/gauge.go
@@ -0,0 +1,115 @@
+package solomon
+
+import (
+ "encoding/json"
+ "time"
+
+ "github.com/ydb-platform/ydb/library/go/core/metrics"
+ "go.uber.org/atomic"
+)
+
+var (
+ _ metrics.Gauge = (*Gauge)(nil)
+ _ Metric = (*Gauge)(nil)
+)
+
+// Gauge tracks single float64 value.
+type Gauge struct {
+ name string
+ metricType metricType
+ tags map[string]string
+ value atomic.Float64
+ timestamp *time.Time
+
+ useNameTag bool
+}
+
+func NewGauge(name string, value float64, opts ...metricOpts) Gauge {
+ mOpts := MetricsOpts{}
+ for _, op := range opts {
+ op(&mOpts)
+ }
+ return Gauge{
+ name: name,
+ metricType: typeGauge,
+ tags: mOpts.tags,
+ value: *atomic.NewFloat64(value),
+ useNameTag: mOpts.useNameTag,
+ timestamp: mOpts.timestamp,
+ }
+}
+
+func (g *Gauge) Set(value float64) {
+ g.value.Store(value)
+}
+
+func (g *Gauge) Add(value float64) {
+ g.value.Add(value)
+}
+
+func (g *Gauge) Name() string {
+ return g.name
+}
+
+func (g *Gauge) getType() metricType {
+ return g.metricType
+}
+
+func (g *Gauge) getLabels() map[string]string {
+ return g.tags
+}
+
+func (g *Gauge) getValue() interface{} {
+ return g.value.Load()
+}
+
+func (g *Gauge) getTimestamp() *time.Time {
+ return g.timestamp
+}
+
+func (g *Gauge) getNameTag() string {
+ if g.useNameTag {
+ return "name"
+ } else {
+ return "sensor"
+ }
+}
+
+// MarshalJSON implements json.Marshaler.
+func (g *Gauge) MarshalJSON() ([]byte, error) {
+ metricType := g.metricType.String()
+ value := g.value.Load()
+ labels := func() map[string]string {
+ labels := make(map[string]string, len(g.tags)+1)
+ labels[g.getNameTag()] = g.Name()
+ for k, v := range g.tags {
+ labels[k] = v
+ }
+ return labels
+ }()
+
+ return json.Marshal(struct {
+ Type string `json:"type"`
+ Labels map[string]string `json:"labels"`
+ Value float64 `json:"value"`
+ Timestamp *int64 `json:"ts,omitempty"`
+ }{
+ Type: metricType,
+ Value: value,
+ Labels: labels,
+ Timestamp: tsAsRef(g.timestamp),
+ })
+}
+
+// Snapshot returns independent copy of metric.
+func (g *Gauge) Snapshot() Metric {
+ return &Gauge{
+ name: g.name,
+ metricType: g.metricType,
+ tags: g.tags,
+ value: *atomic.NewFloat64(g.value.Load()),
+
+ useNameTag: g.useNameTag,
+ timestamp: g.timestamp,
+ }
+}
diff --git a/library/go/core/metrics/solomon/gauge_test.go b/library/go/core/metrics/solomon/gauge_test.go
new file mode 100644
index 0000000000..82659a49c4
--- /dev/null
+++ b/library/go/core/metrics/solomon/gauge_test.go
@@ -0,0 +1,75 @@
+package solomon
+
+import (
+ "encoding/json"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "go.uber.org/atomic"
+)
+
+func TestGauge_Add(t *testing.T) {
+ c := &Gauge{
+ name: "mygauge",
+ metricType: typeGauge,
+ tags: map[string]string{"ololo": "trololo"},
+ }
+
+ c.Add(1)
+ assert.Equal(t, float64(1), c.value.Load())
+
+ c.Add(42)
+ assert.Equal(t, float64(43), c.value.Load())
+
+ c.Add(14.89)
+ assert.Equal(t, float64(57.89), c.value.Load())
+}
+
+func TestGauge_Set(t *testing.T) {
+ c := &Gauge{
+ name: "mygauge",
+ metricType: typeGauge,
+ tags: map[string]string{"ololo": "trololo"},
+ }
+
+ c.Set(1)
+ assert.Equal(t, float64(1), c.value.Load())
+
+ c.Set(42)
+ assert.Equal(t, float64(42), c.value.Load())
+
+ c.Set(14.89)
+ assert.Equal(t, float64(14.89), c.value.Load())
+}
+
+func TestGauge_MarshalJSON(t *testing.T) {
+ c := &Gauge{
+ name: "mygauge",
+ metricType: typeGauge,
+ tags: map[string]string{"ololo": "trololo"},
+ value: *atomic.NewFloat64(42.18),
+ }
+
+ b, err := json.Marshal(c)
+ assert.NoError(t, err)
+
+ expected := []byte(`{"type":"DGAUGE","labels":{"ololo":"trololo","sensor":"mygauge"},"value":42.18}`)
+ assert.Equal(t, expected, b)
+}
+
+func TestNameTagGauge_MarshalJSON(t *testing.T) {
+ c := &Gauge{
+ name: "mygauge",
+ metricType: typeGauge,
+ tags: map[string]string{"ololo": "trololo"},
+ value: *atomic.NewFloat64(42.18),
+
+ useNameTag: true,
+ }
+
+ b, err := json.Marshal(c)
+ assert.NoError(t, err)
+
+ expected := []byte(`{"type":"DGAUGE","labels":{"name":"mygauge","ololo":"trololo"},"value":42.18}`)
+ assert.Equal(t, expected, b)
+}
diff --git a/library/go/core/metrics/solomon/gotest/ya.make b/library/go/core/metrics/solomon/gotest/ya.make
new file mode 100644
index 0000000000..0c386167a4
--- /dev/null
+++ b/library/go/core/metrics/solomon/gotest/ya.make
@@ -0,0 +1,3 @@
+GO_TEST_FOR(library/go/core/metrics/solomon)
+
+END()
diff --git a/library/go/core/metrics/solomon/histogram.go b/library/go/core/metrics/solomon/histogram.go
new file mode 100644
index 0000000000..6f4d3629e0
--- /dev/null
+++ b/library/go/core/metrics/solomon/histogram.go
@@ -0,0 +1,182 @@
+package solomon
+
+import (
+ "encoding/binary"
+ "encoding/json"
+ "io"
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/ydb-platform/ydb/library/go/core/metrics"
+ "github.com/ydb-platform/ydb/library/go/core/xerrors"
+ "go.uber.org/atomic"
+)
+
+var (
+ _ metrics.Histogram = (*Histogram)(nil)
+ _ metrics.Timer = (*Histogram)(nil)
+ _ Metric = (*Histogram)(nil)
+)
+
+type Histogram struct {
+ name string
+ metricType metricType
+ tags map[string]string
+ bucketBounds []float64
+ bucketValues []int64
+ infValue atomic.Int64
+ mutex sync.Mutex
+ timestamp *time.Time
+ useNameTag bool
+}
+
+type histogram struct {
+ Bounds []float64 `json:"bounds"`
+ Buckets []int64 `json:"buckets"`
+ Inf int64 `json:"inf,omitempty"`
+}
+
+func (h *histogram) writeHistogram(w io.Writer) error {
+ err := writeULEB128(w, uint32(len(h.Buckets)))
+ if err != nil {
+ return xerrors.Errorf("writeULEB128 size histogram buckets failed: %w", err)
+ }
+
+ for _, upperBound := range h.Bounds {
+ err = binary.Write(w, binary.LittleEndian, float64(upperBound))
+ if err != nil {
+ return xerrors.Errorf("binary.Write upper bound failed: %w", err)
+ }
+ }
+
+ for _, bucketValue := range h.Buckets {
+ err = binary.Write(w, binary.LittleEndian, uint64(bucketValue))
+ if err != nil {
+ return xerrors.Errorf("binary.Write histogram buckets failed: %w", err)
+ }
+ }
+ return nil
+}
+
+func (h *Histogram) RecordValue(value float64) {
+ boundIndex := sort.SearchFloat64s(h.bucketBounds, value)
+
+ if boundIndex < len(h.bucketValues) {
+ h.mutex.Lock()
+ h.bucketValues[boundIndex] += 1
+ h.mutex.Unlock()
+ } else {
+ h.infValue.Inc()
+ }
+}
+
+func (h *Histogram) RecordDuration(value time.Duration) {
+ h.RecordValue(value.Seconds())
+}
+
+func (h *Histogram) Reset() {
+ h.mutex.Lock()
+ defer h.mutex.Unlock()
+
+ h.bucketValues = make([]int64, len(h.bucketValues))
+ h.infValue.Store(0)
+}
+
+func (h *Histogram) Name() string {
+ return h.name
+}
+
+func (h *Histogram) getType() metricType {
+ return h.metricType
+}
+
+func (h *Histogram) getLabels() map[string]string {
+ return h.tags
+}
+
+func (h *Histogram) getValue() interface{} {
+ return histogram{
+ Bounds: h.bucketBounds,
+ Buckets: h.bucketValues,
+ }
+}
+
+func (h *Histogram) getTimestamp() *time.Time {
+ return h.timestamp
+}
+
+func (h *Histogram) getNameTag() string {
+ if h.useNameTag {
+ return "name"
+ } else {
+ return "sensor"
+ }
+}
+
+// MarshalJSON implements json.Marshaler.
+func (h *Histogram) MarshalJSON() ([]byte, error) {
+ valuesCopy := make([]int64, len(h.bucketValues))
+ h.mutex.Lock()
+ copy(valuesCopy, h.bucketValues)
+ h.mutex.Unlock()
+ return json.Marshal(struct {
+ Type string `json:"type"`
+ Labels map[string]string `json:"labels"`
+ Histogram histogram `json:"hist"`
+ Timestamp *int64 `json:"ts,omitempty"`
+ }{
+ Type: h.metricType.String(),
+ Histogram: histogram{
+ Bounds: h.bucketBounds,
+ Buckets: valuesCopy,
+ Inf: h.infValue.Load(),
+ },
+ Labels: func() map[string]string {
+ labels := make(map[string]string, len(h.tags)+1)
+ labels[h.getNameTag()] = h.Name()
+ for k, v := range h.tags {
+ labels[k] = v
+ }
+ return labels
+ }(),
+ Timestamp: tsAsRef(h.timestamp),
+ })
+}
+
+// Snapshot returns independent copy on metric.
+func (h *Histogram) Snapshot() Metric {
+ bucketBounds := make([]float64, len(h.bucketBounds))
+ bucketValues := make([]int64, len(h.bucketValues))
+
+ copy(bucketBounds, h.bucketBounds)
+ h.mutex.Lock()
+ copy(bucketValues, h.bucketValues)
+ h.mutex.Unlock()
+
+ return &Histogram{
+ name: h.name,
+ metricType: h.metricType,
+ tags: h.tags,
+ bucketBounds: bucketBounds,
+ bucketValues: bucketValues,
+ infValue: *atomic.NewInt64(h.infValue.Load()),
+ useNameTag: h.useNameTag,
+ }
+}
+
+// InitBucketValues cleans internal bucketValues and saves new values in order.
+// Length of internal bucketValues stays unchanged.
+// If length of slice in argument bucketValues more than length of internal one,
+// the first extra element of bucketValues is stored in infValue.
+func (h *Histogram) InitBucketValues(bucketValues []int64) {
+ h.mutex.Lock()
+ defer h.mutex.Unlock()
+
+ h.bucketValues = make([]int64, len(h.bucketValues))
+ h.infValue.Store(0)
+ copy(h.bucketValues, bucketValues)
+ if len(bucketValues) > len(h.bucketValues) {
+ h.infValue.Store(bucketValues[len(h.bucketValues)])
+ }
+}
diff --git a/library/go/core/metrics/solomon/histogram_test.go b/library/go/core/metrics/solomon/histogram_test.go
new file mode 100644
index 0000000000..be7042397c
--- /dev/null
+++ b/library/go/core/metrics/solomon/histogram_test.go
@@ -0,0 +1,153 @@
+package solomon
+
+import (
+ "encoding/json"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "go.uber.org/atomic"
+)
+
+func TestHistogram_MarshalJSON(t *testing.T) {
+ h := &Histogram{
+ name: "myhistogram",
+ metricType: typeHistogram,
+ tags: map[string]string{"ololo": "trololo"},
+ bucketBounds: []float64{1, 2, 3},
+ bucketValues: []int64{1, 2, 1},
+ infValue: *atomic.NewInt64(2),
+ }
+
+ b, err := json.Marshal(h)
+ assert.NoError(t, err)
+
+ expected := []byte(`{"type":"HIST","labels":{"ololo":"trololo","sensor":"myhistogram"},"hist":{"bounds":[1,2,3],"buckets":[1,2,1],"inf":2}}`)
+ assert.Equal(t, expected, b)
+}
+
+func TestRatedHistogram_MarshalJSON(t *testing.T) {
+ h := &Histogram{
+ name: "myhistogram",
+ metricType: typeRatedHistogram,
+ tags: map[string]string{"ololo": "trololo"},
+ bucketBounds: []float64{1, 2, 3},
+ bucketValues: []int64{1, 2, 1},
+ infValue: *atomic.NewInt64(2),
+ }
+
+ b, err := json.Marshal(h)
+ assert.NoError(t, err)
+
+ expected := []byte(`{"type":"HIST_RATE","labels":{"ololo":"trololo","sensor":"myhistogram"},"hist":{"bounds":[1,2,3],"buckets":[1,2,1],"inf":2}}`)
+ assert.Equal(t, expected, b)
+}
+
+func TestNameTagHistogram_MarshalJSON(t *testing.T) {
+ h := &Histogram{
+ name: "myhistogram",
+ metricType: typeRatedHistogram,
+ tags: map[string]string{"ololo": "trololo"},
+ bucketBounds: []float64{1, 2, 3},
+ bucketValues: []int64{1, 2, 1},
+ infValue: *atomic.NewInt64(2),
+ useNameTag: true,
+ }
+
+ b, err := json.Marshal(h)
+ assert.NoError(t, err)
+
+ expected := []byte(`{"type":"HIST_RATE","labels":{"name":"myhistogram","ololo":"trololo"},"hist":{"bounds":[1,2,3],"buckets":[1,2,1],"inf":2}}`)
+ assert.Equal(t, expected, b)
+}
+
+func TestHistogram_RecordDuration(t *testing.T) {
+ h := &Histogram{
+ name: "myhistogram",
+ metricType: typeHistogram,
+ tags: map[string]string{"ololo": "trololo"},
+ bucketBounds: []float64{1, 2, 3},
+ bucketValues: make([]int64, 3),
+ }
+
+ h.RecordDuration(500 * time.Millisecond)
+ h.RecordDuration(1 * time.Second)
+ h.RecordDuration(1800 * time.Millisecond)
+ h.RecordDuration(3 * time.Second)
+ h.RecordDuration(1 * time.Hour)
+
+ expectedValues := []int64{2, 1, 1}
+ assert.Equal(t, expectedValues, h.bucketValues)
+
+ var expectedInfValue int64 = 1
+ assert.Equal(t, expectedInfValue, h.infValue.Load())
+}
+
+func TestHistogram_RecordValue(t *testing.T) {
+ h := &Histogram{
+ name: "myhistogram",
+ metricType: typeHistogram,
+ tags: map[string]string{"ololo": "trololo"},
+ bucketBounds: []float64{1, 2, 3},
+ bucketValues: make([]int64, 3),
+ }
+
+ h.RecordValue(0.5)
+ h.RecordValue(1)
+ h.RecordValue(1.8)
+ h.RecordValue(3)
+ h.RecordValue(60)
+
+ expectedValues := []int64{2, 1, 1}
+ assert.Equal(t, expectedValues, h.bucketValues)
+
+ var expectedInfValue int64 = 1
+ assert.Equal(t, expectedInfValue, h.infValue.Load())
+}
+
+func TestHistogram_Reset(t *testing.T) {
+ h := &Histogram{
+ name: "myhistogram",
+ metricType: typeHistogram,
+ tags: map[string]string{"ololo": "trololo"},
+ bucketBounds: []float64{1, 2, 3},
+ bucketValues: make([]int64, 3),
+ }
+
+ h.RecordValue(0.5)
+ h.RecordValue(1)
+ h.RecordValue(1.8)
+ h.RecordValue(3)
+ h.RecordValue(60)
+
+ assert.Equal(t, []int64{2, 1, 1}, h.bucketValues)
+ assert.Equal(t, int64(1), h.infValue.Load())
+
+ h.Reset()
+
+ assert.Equal(t, []int64{0, 0, 0}, h.bucketValues)
+ assert.Equal(t, int64(0), h.infValue.Load())
+}
+
+func TestHistogram_InitBucketValues(t *testing.T) {
+ h := &Histogram{
+ name: "myhistogram",
+ metricType: typeHistogram,
+ tags: map[string]string{"ololo": "trololo"},
+ bucketBounds: []float64{1, 2, 3},
+ bucketValues: make([]int64, 3),
+ }
+
+ valsToInit := []int64{1, 2, 3, 4}
+ h.InitBucketValues(valsToInit[:2])
+ assert.Equal(t, append(valsToInit[:2], 0), h.bucketValues)
+ assert.Equal(t, *atomic.NewInt64(0), h.infValue)
+
+ h.InitBucketValues(valsToInit[:3])
+ assert.Equal(t, valsToInit[:3], h.bucketValues)
+ assert.Equal(t, *atomic.NewInt64(0), h.infValue)
+
+ h.InitBucketValues(valsToInit)
+ assert.Equal(t, valsToInit[:3], h.bucketValues)
+ assert.Equal(t, *atomic.NewInt64(valsToInit[3]), h.infValue)
+}
diff --git a/library/go/core/metrics/solomon/int_gauge.go b/library/go/core/metrics/solomon/int_gauge.go
new file mode 100644
index 0000000000..8733bf11fe
--- /dev/null
+++ b/library/go/core/metrics/solomon/int_gauge.go
@@ -0,0 +1,115 @@
+package solomon
+
+import (
+ "encoding/json"
+ "time"
+
+ "github.com/ydb-platform/ydb/library/go/core/metrics"
+ "go.uber.org/atomic"
+)
+
+var (
+ _ metrics.IntGauge = (*IntGauge)(nil)
+ _ Metric = (*IntGauge)(nil)
+)
+
+// IntGauge tracks single float64 value.
+type IntGauge struct {
+ name string
+ metricType metricType
+ tags map[string]string
+ value atomic.Int64
+ timestamp *time.Time
+
+ useNameTag bool
+}
+
+func NewIntGauge(name string, value int64, opts ...metricOpts) IntGauge {
+ mOpts := MetricsOpts{}
+ for _, op := range opts {
+ op(&mOpts)
+ }
+ return IntGauge{
+ name: name,
+ metricType: typeIGauge,
+ tags: mOpts.tags,
+ value: *atomic.NewInt64(value),
+ useNameTag: mOpts.useNameTag,
+ timestamp: mOpts.timestamp,
+ }
+}
+
+func (g *IntGauge) Set(value int64) {
+ g.value.Store(value)
+}
+
+func (g *IntGauge) Add(value int64) {
+ g.value.Add(value)
+}
+
+func (g *IntGauge) Name() string {
+ return g.name
+}
+
+func (g *IntGauge) getType() metricType {
+ return g.metricType
+}
+
+func (g *IntGauge) getLabels() map[string]string {
+ return g.tags
+}
+
+func (g *IntGauge) getValue() interface{} {
+ return g.value.Load()
+}
+
+func (g *IntGauge) getTimestamp() *time.Time {
+ return g.timestamp
+}
+
+func (g *IntGauge) getNameTag() string {
+ if g.useNameTag {
+ return "name"
+ } else {
+ return "sensor"
+ }
+}
+
+// MarshalJSON implements json.Marshaler.
+func (g *IntGauge) MarshalJSON() ([]byte, error) {
+ metricType := g.metricType.String()
+ value := g.value.Load()
+ labels := func() map[string]string {
+ labels := make(map[string]string, len(g.tags)+1)
+ labels[g.getNameTag()] = g.Name()
+ for k, v := range g.tags {
+ labels[k] = v
+ }
+ return labels
+ }()
+
+ return json.Marshal(struct {
+ Type string `json:"type"`
+ Labels map[string]string `json:"labels"`
+ Value int64 `json:"value"`
+ Timestamp *int64 `json:"ts,omitempty"`
+ }{
+ Type: metricType,
+ Value: value,
+ Labels: labels,
+ Timestamp: tsAsRef(g.timestamp),
+ })
+}
+
+// Snapshot returns independent copy of metric.
+func (g *IntGauge) Snapshot() Metric {
+ return &IntGauge{
+ name: g.name,
+ metricType: g.metricType,
+ tags: g.tags,
+ value: *atomic.NewInt64(g.value.Load()),
+
+ useNameTag: g.useNameTag,
+ timestamp: g.timestamp,
+ }
+}
diff --git a/library/go/core/metrics/solomon/int_gauge_test.go b/library/go/core/metrics/solomon/int_gauge_test.go
new file mode 100644
index 0000000000..5918ef9ac3
--- /dev/null
+++ b/library/go/core/metrics/solomon/int_gauge_test.go
@@ -0,0 +1,75 @@
+package solomon
+
+import (
+ "encoding/json"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "go.uber.org/atomic"
+)
+
+func TestIntGauge_Add(t *testing.T) {
+ c := &IntGauge{
+ name: "myintgauge",
+ metricType: typeIGauge,
+ tags: map[string]string{"ololo": "trololo"},
+ }
+
+ c.Add(1)
+ assert.Equal(t, int64(1), c.value.Load())
+
+ c.Add(42)
+ assert.Equal(t, int64(43), c.value.Load())
+
+ c.Add(-45)
+ assert.Equal(t, int64(-2), c.value.Load())
+}
+
+func TestIntGauge_Set(t *testing.T) {
+ c := &IntGauge{
+ name: "myintgauge",
+ metricType: typeIGauge,
+ tags: map[string]string{"ololo": "trololo"},
+ }
+
+ c.Set(1)
+ assert.Equal(t, int64(1), c.value.Load())
+
+ c.Set(42)
+ assert.Equal(t, int64(42), c.value.Load())
+
+ c.Set(-45)
+ assert.Equal(t, int64(-45), c.value.Load())
+}
+
+func TestIntGauge_MarshalJSON(t *testing.T) {
+ c := &IntGauge{
+ name: "myintgauge",
+ metricType: typeIGauge,
+ tags: map[string]string{"ololo": "trololo"},
+ value: *atomic.NewInt64(42),
+ }
+
+ b, err := json.Marshal(c)
+ assert.NoError(t, err)
+
+ expected := []byte(`{"type":"IGAUGE","labels":{"ololo":"trololo","sensor":"myintgauge"},"value":42}`)
+ assert.Equal(t, expected, b)
+}
+
+func TestNameTagIntGauge_MarshalJSON(t *testing.T) {
+ c := &IntGauge{
+ name: "myintgauge",
+ metricType: typeIGauge,
+ tags: map[string]string{"ololo": "trololo"},
+ value: *atomic.NewInt64(42),
+
+ useNameTag: true,
+ }
+
+ b, err := json.Marshal(c)
+ assert.NoError(t, err)
+
+ expected := []byte(`{"type":"IGAUGE","labels":{"name":"myintgauge","ololo":"trololo"},"value":42}`)
+ assert.Equal(t, expected, b)
+}
diff --git a/library/go/core/metrics/solomon/metrics.go b/library/go/core/metrics/solomon/metrics.go
new file mode 100644
index 0000000000..6b73fd10a6
--- /dev/null
+++ b/library/go/core/metrics/solomon/metrics.go
@@ -0,0 +1,187 @@
+package solomon
+
+import (
+ "bytes"
+ "context"
+ "encoding"
+ "encoding/json"
+ "fmt"
+ "time"
+
+ "github.com/ydb-platform/ydb/library/go/core/xerrors"
+ "golang.org/x/exp/slices"
+)
+
+// Gather collects all metrics data via snapshots.
+func (r Registry) Gather() (*Metrics, error) {
+ metrics := make([]Metric, 0)
+
+ var err error
+ r.metrics.Range(func(_, v interface{}) bool {
+ if s, ok := v.(Metric); ok {
+ metrics = append(metrics, s.Snapshot())
+ return true
+ }
+ err = fmt.Errorf("unexpected value type: %T", v)
+ return false
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ return &Metrics{metrics: metrics}, nil
+}
+
+func NewMetrics(metrics []Metric) Metrics {
+ return Metrics{metrics: metrics}
+}
+
+func NewMetricsWithTimestamp(metrics []Metric, ts time.Time) Metrics {
+ return Metrics{metrics: metrics, timestamp: &ts}
+}
+
+type valueType uint8
+
+const (
+ valueTypeNone valueType = iota
+ valueTypeOneWithoutTS valueType = 0x01
+ valueTypeOneWithTS valueType = 0x02
+ valueTypeManyWithTS valueType = 0x03
+)
+
+type metricType uint8
+
+const (
+ typeUnspecified metricType = iota
+ typeGauge metricType = 0x01
+ typeCounter metricType = 0x02
+ typeRated metricType = 0x03
+ typeIGauge metricType = 0x04
+ typeHistogram metricType = 0x05
+ typeRatedHistogram metricType = 0x06
+)
+
+func (k metricType) String() string {
+ switch k {
+ case typeCounter:
+ return "COUNTER"
+ case typeGauge:
+ return "DGAUGE"
+ case typeIGauge:
+ return "IGAUGE"
+ case typeHistogram:
+ return "HIST"
+ case typeRated:
+ return "RATE"
+ case typeRatedHistogram:
+ return "HIST_RATE"
+ default:
+ panic("unknown metric type")
+ }
+}
+
+// Metric is an any abstract solomon Metric.
+type Metric interface {
+ json.Marshaler
+
+ Name() string
+ getType() metricType
+ getLabels() map[string]string
+ getValue() interface{}
+ getNameTag() string
+ getTimestamp() *time.Time
+
+ Snapshot() Metric
+}
+
+// Rated marks given Solomon metric or vector as rated.
+// Example:
+//
+// cnt := r.Counter("mycounter")
+// Rated(cnt)
+//
+// cntvec := r.CounterVec("mycounter", []string{"mytag"})
+// Rated(cntvec)
+//
+// For additional info: https://docs.yandex-team.ru/solomon/data-collection/dataformat/json
+func Rated(s interface{}) {
+ switch st := s.(type) {
+ case *Counter:
+ st.metricType = typeRated
+ case *FuncCounter:
+ st.metricType = typeRated
+ case *Histogram:
+ st.metricType = typeRatedHistogram
+
+ case *CounterVec:
+ st.vec.rated = true
+ case *HistogramVec:
+ st.vec.rated = true
+ case *DurationHistogramVec:
+ st.vec.rated = true
+ }
+ // any other metrics types are unrateable
+}
+
+var (
+ _ json.Marshaler = (*Metrics)(nil)
+ _ encoding.BinaryMarshaler = (*Metrics)(nil)
+)
+
+type Metrics struct {
+ metrics []Metric
+ timestamp *time.Time
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s Metrics) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Metrics []Metric `json:"metrics"`
+ Timestamp *int64 `json:"ts,omitempty"`
+ }{s.metrics, tsAsRef(s.timestamp)})
+}
+
+// MarshalBinary implements encoding.BinaryMarshaler.
+func (s Metrics) MarshalBinary() ([]byte, error) {
+ var buf bytes.Buffer
+ se := NewSpackEncoder(context.Background(), CompressionNone, &s)
+ n, err := se.Encode(&buf)
+ if err != nil {
+ return nil, xerrors.Errorf("encode only %d bytes: %w", n, err)
+ }
+ return buf.Bytes(), nil
+}
+
+// SplitToChunks splits Metrics into a slice of chunks, each at most maxChunkSize long.
+// The length of returned slice is always at least one.
+// Zero maxChunkSize denotes unlimited chunk length.
+func (s Metrics) SplitToChunks(maxChunkSize int) []Metrics {
+ if maxChunkSize == 0 || len(s.metrics) == 0 {
+ return []Metrics{s}
+ }
+ chunks := make([]Metrics, 0, len(s.metrics)/maxChunkSize+1)
+
+ for leftBound := 0; leftBound < len(s.metrics); leftBound += maxChunkSize {
+ rightBound := leftBound + maxChunkSize
+ if rightBound > len(s.metrics) {
+ rightBound = len(s.metrics)
+ }
+ chunk := s.metrics[leftBound:rightBound]
+ chunks = append(chunks, Metrics{metrics: chunk})
+ }
+ return chunks
+}
+
+// List return list of metrics
+func (s Metrics) List() []Metric {
+ return slices.Clone(s.metrics)
+}
+
+func tsAsRef(t *time.Time) *int64 {
+ if t == nil {
+ return nil
+ }
+ ts := t.Unix()
+ return &ts
+}
diff --git a/library/go/core/metrics/solomon/metrics_opts.go b/library/go/core/metrics/solomon/metrics_opts.go
new file mode 100644
index 0000000000..d9ade67966
--- /dev/null
+++ b/library/go/core/metrics/solomon/metrics_opts.go
@@ -0,0 +1,29 @@
+package solomon
+
+import "time"
+
+type MetricsOpts struct {
+ useNameTag bool
+ tags map[string]string
+ timestamp *time.Time
+}
+
+type metricOpts func(*MetricsOpts)
+
+func WithTags(tags map[string]string) func(*MetricsOpts) {
+ return func(m *MetricsOpts) {
+ m.tags = tags
+ }
+}
+
+func WithUseNameTag() func(*MetricsOpts) {
+ return func(m *MetricsOpts) {
+ m.useNameTag = true
+ }
+}
+
+func WithTimestamp(t time.Time) func(*MetricsOpts) {
+ return func(m *MetricsOpts) {
+ m.timestamp = &t
+ }
+}
diff --git a/library/go/core/metrics/solomon/metrics_test.go b/library/go/core/metrics/solomon/metrics_test.go
new file mode 100644
index 0000000000..610fa061a1
--- /dev/null
+++ b/library/go/core/metrics/solomon/metrics_test.go
@@ -0,0 +1,296 @@
+package solomon
+
+import (
+ "encoding/json"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/ydb-platform/ydb/library/go/core/metrics"
+ "go.uber.org/atomic"
+)
+
+func TestMetrics_MarshalJSON(t *testing.T) {
+ s := &Metrics{
+ metrics: []Metric{
+ &Counter{
+ name: "mycounter",
+ metricType: typeCounter,
+ tags: map[string]string{"ololo": "trololo"},
+ value: *atomic.NewInt64(42),
+ },
+ &Counter{
+ name: "myratedcounter",
+ metricType: typeRated,
+ tags: map[string]string{"ololo": "trololo"},
+ value: *atomic.NewInt64(42),
+ },
+ &Gauge{
+ name: "mygauge",
+ metricType: typeGauge,
+ tags: map[string]string{"shimba": "boomba"},
+ value: *atomic.NewFloat64(14.89),
+ },
+ &Timer{
+ name: "mytimer",
+ metricType: typeGauge,
+ tags: map[string]string{"looken": "tooken"},
+ value: *atomic.NewDuration(1456 * time.Millisecond),
+ },
+ &Histogram{
+ name: "myhistogram",
+ metricType: typeHistogram,
+ tags: map[string]string{"chicken": "cooken"},
+ bucketBounds: []float64{1, 2, 3},
+ bucketValues: []int64{1, 2, 1},
+ infValue: *atomic.NewInt64(1),
+ },
+ &Histogram{
+ name: "myratedhistogram",
+ metricType: typeRatedHistogram,
+ tags: map[string]string{"chicken": "cooken"},
+ bucketBounds: []float64{1, 2, 3},
+ bucketValues: []int64{1, 2, 1},
+ infValue: *atomic.NewInt64(1),
+ },
+ &Gauge{
+ name: "mytimedgauge",
+ metricType: typeGauge,
+ tags: map[string]string{"oki": "toki"},
+ value: *atomic.NewFloat64(42.24),
+ timestamp: timeAsRef(time.Unix(1500000000, 0)),
+ },
+ },
+ }
+
+ b, err := json.Marshal(s)
+ assert.NoError(t, err)
+
+ expected := []byte(`{"metrics":[` +
+ `{"type":"COUNTER","labels":{"ololo":"trololo","sensor":"mycounter"},"value":42},` +
+ `{"type":"RATE","labels":{"ololo":"trololo","sensor":"myratedcounter"},"value":42},` +
+ `{"type":"DGAUGE","labels":{"sensor":"mygauge","shimba":"boomba"},"value":14.89},` +
+ `{"type":"DGAUGE","labels":{"looken":"tooken","sensor":"mytimer"},"value":1.456},` +
+ `{"type":"HIST","labels":{"chicken":"cooken","sensor":"myhistogram"},"hist":{"bounds":[1,2,3],"buckets":[1,2,1],"inf":1}},` +
+ `{"type":"HIST_RATE","labels":{"chicken":"cooken","sensor":"myratedhistogram"},"hist":{"bounds":[1,2,3],"buckets":[1,2,1],"inf":1}},` +
+ `{"type":"DGAUGE","labels":{"oki":"toki","sensor":"mytimedgauge"},"value":42.24,"ts":1500000000}` +
+ `]}`)
+ assert.Equal(t, expected, b)
+}
+
+func timeAsRef(t time.Time) *time.Time {
+ return &t
+}
+
+func TestMetrics_with_timestamp_MarshalJSON(t *testing.T) {
+ s := &Metrics{
+ metrics: []Metric{
+ &Counter{
+ name: "mycounter",
+ metricType: typeCounter,
+ tags: map[string]string{"ololo": "trololo"},
+ value: *atomic.NewInt64(42),
+ },
+ &Gauge{
+ name: "mytimedgauge",
+ metricType: typeGauge,
+ tags: map[string]string{"oki": "toki"},
+ value: *atomic.NewFloat64(42.24),
+ timestamp: timeAsRef(time.Unix(1500000000, 0)),
+ },
+ },
+ timestamp: timeAsRef(time.Unix(1657710477, 0)),
+ }
+
+ b, err := json.Marshal(s)
+ assert.NoError(t, err)
+
+ expected := []byte(`{"metrics":[` +
+ `{"type":"COUNTER","labels":{"ololo":"trololo","sensor":"mycounter"},"value":42},` +
+ `{"type":"DGAUGE","labels":{"oki":"toki","sensor":"mytimedgauge"},"value":42.24,"ts":1500000000}` +
+ `],"ts":1657710477}`)
+ assert.Equal(t, expected, b)
+}
+
+func TestRated(t *testing.T) {
+ testCases := []struct {
+ name string
+ s interface{}
+ expected Metric
+ }{
+ {
+ "counter",
+ &Counter{
+ name: "mycounter",
+ metricType: typeCounter,
+ tags: map[string]string{"ololo": "trololo"},
+ value: *atomic.NewInt64(42),
+ },
+ &Counter{
+ name: "mycounter",
+ metricType: typeRated,
+ tags: map[string]string{"ololo": "trololo"},
+ value: *atomic.NewInt64(42),
+ },
+ },
+ {
+ "gauge",
+ &Gauge{
+ name: "mygauge",
+ metricType: typeGauge,
+ tags: map[string]string{"ololo": "trololo"},
+ value: *atomic.NewFloat64(42),
+ },
+ &Gauge{
+ name: "mygauge",
+ metricType: typeGauge,
+ tags: map[string]string{"ololo": "trololo"},
+ value: *atomic.NewFloat64(42),
+ },
+ },
+ {
+ "timer",
+ &Timer{
+ name: "mytimer",
+ metricType: typeGauge,
+ tags: map[string]string{"ololo": "trololo"},
+ value: *atomic.NewDuration(1 * time.Second),
+ },
+ &Timer{
+ name: "mytimer",
+ metricType: typeGauge,
+ tags: map[string]string{"ololo": "trololo"},
+ value: *atomic.NewDuration(1 * time.Second),
+ },
+ },
+ {
+ "histogram",
+ &Histogram{
+ name: "myhistogram",
+ metricType: typeHistogram,
+ tags: map[string]string{"ololo": "trololo"},
+ bucketBounds: []float64{1, 2, 3},
+ infValue: *atomic.NewInt64(0),
+ },
+ &Histogram{
+ name: "myhistogram",
+ metricType: typeRatedHistogram,
+ tags: map[string]string{"ololo": "trololo"},
+ bucketBounds: []float64{1, 2, 3},
+ infValue: *atomic.NewInt64(0),
+ },
+ },
+ {
+ "metric_interface",
+ metrics.Counter(&Counter{
+ name: "mycounter",
+ metricType: typeCounter,
+ tags: map[string]string{"ololo": "trololo"},
+ value: *atomic.NewInt64(42),
+ }),
+ &Counter{
+ name: "mycounter",
+ metricType: typeRated,
+ tags: map[string]string{"ololo": "trololo"},
+ value: *atomic.NewInt64(42),
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ Rated(tc.s)
+ assert.Equal(t, tc.expected, tc.s)
+ })
+ }
+}
+
+func TestSplitToChunks(t *testing.T) {
+ zeroMetrics := Metrics{
+ metrics: []Metric{},
+ }
+ oneMetric := Metrics{
+ metrics: []Metric{
+ &Counter{name: "a"},
+ },
+ }
+ twoMetrics := Metrics{
+ metrics: []Metric{
+ &Counter{name: "a"},
+ &Counter{name: "b"},
+ },
+ }
+ fourMetrics := Metrics{
+ metrics: []Metric{
+ &Counter{name: "a"},
+ &Counter{name: "b"},
+ &Counter{name: "c"},
+ &Counter{name: "d"},
+ },
+ }
+ fiveMetrics := Metrics{
+ metrics: []Metric{
+ &Counter{name: "a"},
+ &Counter{name: "b"},
+ &Counter{name: "c"},
+ &Counter{name: "d"},
+ &Counter{name: "e"},
+ },
+ }
+
+ chunks := zeroMetrics.SplitToChunks(2)
+ assert.Equal(t, 1, len(chunks))
+ assert.Equal(t, 0, len(chunks[0].metrics))
+
+ chunks = oneMetric.SplitToChunks(1)
+ assert.Equal(t, 1, len(chunks))
+ assert.Equal(t, 1, len(chunks[0].metrics))
+ assert.Equal(t, "a", chunks[0].metrics[0].Name())
+
+ chunks = oneMetric.SplitToChunks(2)
+ assert.Equal(t, 1, len(chunks))
+ assert.Equal(t, 1, len(chunks[0].metrics))
+ assert.Equal(t, "a", chunks[0].metrics[0].Name())
+
+ chunks = twoMetrics.SplitToChunks(1)
+ assert.Equal(t, 2, len(chunks))
+ assert.Equal(t, 1, len(chunks[0].metrics))
+ assert.Equal(t, 1, len(chunks[1].metrics))
+ assert.Equal(t, "a", chunks[0].metrics[0].Name())
+ assert.Equal(t, "b", chunks[1].metrics[0].Name())
+
+ chunks = twoMetrics.SplitToChunks(2)
+ assert.Equal(t, 1, len(chunks))
+ assert.Equal(t, 2, len(chunks[0].metrics))
+ assert.Equal(t, "a", chunks[0].metrics[0].Name())
+ assert.Equal(t, "b", chunks[0].metrics[1].Name())
+
+ chunks = fourMetrics.SplitToChunks(2)
+ assert.Equal(t, 2, len(chunks))
+ assert.Equal(t, 2, len(chunks[0].metrics))
+ assert.Equal(t, 2, len(chunks[1].metrics))
+ assert.Equal(t, "a", chunks[0].metrics[0].Name())
+ assert.Equal(t, "b", chunks[0].metrics[1].Name())
+ assert.Equal(t, "c", chunks[1].metrics[0].Name())
+ assert.Equal(t, "d", chunks[1].metrics[1].Name())
+
+ chunks = fiveMetrics.SplitToChunks(2)
+ assert.Equal(t, 3, len(chunks))
+ assert.Equal(t, 2, len(chunks[0].metrics))
+ assert.Equal(t, 2, len(chunks[1].metrics))
+ assert.Equal(t, 1, len(chunks[2].metrics))
+ assert.Equal(t, "a", chunks[0].metrics[0].Name())
+ assert.Equal(t, "b", chunks[0].metrics[1].Name())
+ assert.Equal(t, "c", chunks[1].metrics[0].Name())
+ assert.Equal(t, "d", chunks[1].metrics[1].Name())
+ assert.Equal(t, "e", chunks[2].metrics[0].Name())
+
+ chunks = fiveMetrics.SplitToChunks(0)
+ assert.Equal(t, 1, len(chunks))
+ assert.Equal(t, 5, len(chunks[0].metrics))
+ assert.Equal(t, "a", chunks[0].metrics[0].Name())
+ assert.Equal(t, "b", chunks[0].metrics[1].Name())
+ assert.Equal(t, "c", chunks[0].metrics[2].Name())
+ assert.Equal(t, "d", chunks[0].metrics[3].Name())
+ assert.Equal(t, "e", chunks[0].metrics[4].Name())
+}
diff --git a/library/go/core/metrics/solomon/race_test.go b/library/go/core/metrics/solomon/race_test.go
new file mode 100644
index 0000000000..32be6f34fb
--- /dev/null
+++ b/library/go/core/metrics/solomon/race_test.go
@@ -0,0 +1,150 @@
+package solomon
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+ "github.com/ydb-platform/ydb/library/go/core/metrics"
+)
+
+type spinBarrier struct {
+ count int64
+ waiting atomic.Int64
+ step atomic.Int64
+}
+
+func newSpinBarrier(size int) *spinBarrier {
+ return &spinBarrier{count: int64(size)}
+}
+
+func (b *spinBarrier) wait() {
+ s := b.step.Load()
+ w := b.waiting.Add(1)
+ if w == b.count {
+ b.waiting.Store(0)
+ b.step.Add(1)
+ } else {
+ for s == b.step.Load() {
+ // noop
+ }
+ }
+}
+
+func TestRaceDurationHistogramVecVersusStreamJson(t *testing.T) {
+ // Regression test: https://github.com/ydb-platform/ydb/review/2690822/details
+ registry := NewRegistry(NewRegistryOpts())
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ const stepCount = 200
+
+ barrier := newSpinBarrier(2)
+ wg := sync.WaitGroup{}
+ wg.Add(1)
+ go func() {
+ // Consumer
+ defer wg.Done()
+ out := bytes.NewBuffer(nil)
+ for i := 0; i < stepCount; i++ {
+ out.Reset()
+ barrier.wait()
+ _, err := registry.StreamJSON(ctx, out)
+ if err != nil {
+ require.ErrorIs(t, err, context.Canceled)
+ break
+ }
+ }
+ }()
+
+ wg.Add(1)
+ go func() {
+ // Producer
+ defer wg.Done()
+
+ const success = "success"
+ const version = "version"
+ vecs := make([]metrics.TimerVec, 0)
+ buckets := metrics.NewDurationBuckets(1, 2, 3)
+ ProducerLoop:
+ for i := 0; i < stepCount; i++ {
+ barrier.wait()
+ vec := registry.DurationHistogramVec(
+ fmt.Sprintf("latency-%v", i),
+ buckets,
+ []string{success, version},
+ )
+ Rated(vec)
+ vecs = append(vecs, vec)
+ for _, v := range vecs {
+ v.With(map[string]string{success: "ok", version: "123"}).RecordDuration(time.Second)
+ v.With(map[string]string{success: "false", version: "123"}).RecordDuration(time.Millisecond)
+ }
+ select {
+ case <-ctx.Done():
+ break ProducerLoop
+ default:
+ // noop
+ }
+ }
+ }()
+ wg.Wait()
+}
+
+func TestRaceDurationHistogramRecordDurationVersusStreamJson(t *testing.T) {
+ // Regression test: https://github.com/ydb-platform/ydb/review/2690822/details
+
+ registry := NewRegistry(NewRegistryOpts())
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ const stepCount = 200
+ barrier := newSpinBarrier(2)
+ wg := sync.WaitGroup{}
+
+ wg.Add(1)
+ go func() {
+ // Consumer
+ defer wg.Done()
+ out := bytes.NewBuffer(nil)
+ for i := 0; i < stepCount; i++ {
+ out.Reset()
+ barrier.wait()
+ _, err := registry.StreamJSON(ctx, out)
+ if err != nil {
+ require.ErrorIs(t, err, context.Canceled)
+ break
+ }
+ }
+ }()
+
+ wg.Add(1)
+ go func() {
+ // Producer
+ defer wg.Done()
+
+ buckets := metrics.NewDurationBuckets(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
+ hist := registry.DurationHistogram("latency", buckets)
+ // Rated(hist)
+
+ ProducerLoop:
+ for i := 0; i < stepCount; i++ {
+ barrier.wait()
+ hist.RecordDuration(time.Duration(i % 10))
+ select {
+ case <-ctx.Done():
+ break ProducerLoop
+ default:
+ // noop
+ }
+ }
+ }()
+ wg.Wait()
+}
diff --git a/library/go/core/metrics/solomon/registry.go b/library/go/core/metrics/solomon/registry.go
new file mode 100644
index 0000000000..0ad4d9378a
--- /dev/null
+++ b/library/go/core/metrics/solomon/registry.go
@@ -0,0 +1,256 @@
+package solomon
+
+import (
+ "reflect"
+ "strconv"
+ "sync"
+
+ "github.com/ydb-platform/ydb/library/go/core/metrics"
+ "github.com/ydb-platform/ydb/library/go/core/metrics/internal/pkg/metricsutil"
+ "github.com/ydb-platform/ydb/library/go/core/metrics/internal/pkg/registryutil"
+)
+
+var _ metrics.Registry = (*Registry)(nil)
+
+type Registry struct {
+ separator string
+ prefix string
+ tags map[string]string
+ rated bool
+ useNameTag bool
+
+ subregistries map[string]*Registry
+ m *sync.Mutex
+
+ metrics *sync.Map
+}
+
+func NewRegistry(opts *RegistryOpts) *Registry {
+ r := &Registry{
+ separator: ".",
+ useNameTag: false,
+
+ subregistries: make(map[string]*Registry),
+ m: new(sync.Mutex),
+
+ metrics: new(sync.Map),
+ }
+
+ if opts != nil {
+ r.separator = string(opts.Separator)
+ r.prefix = opts.Prefix
+ r.tags = opts.Tags
+ r.rated = opts.Rated
+ r.useNameTag = opts.UseNameTag
+ for _, collector := range opts.Collectors {
+ collector(r)
+ }
+ }
+
+ return r
+}
+
+// Rated returns copy of registry with rated set to desired value.
+func (r Registry) Rated(rated bool) metrics.Registry {
+ return &Registry{
+ separator: r.separator,
+ prefix: r.prefix,
+ tags: r.tags,
+ rated: rated,
+ useNameTag: r.useNameTag,
+
+ subregistries: r.subregistries,
+ m: r.m,
+
+ metrics: r.metrics,
+ }
+}
+
+// WithTags creates new sub-scope, where each metric has tags attached to it.
+func (r Registry) WithTags(tags map[string]string) metrics.Registry {
+ return r.newSubregistry(r.prefix, registryutil.MergeTags(r.tags, tags))
+}
+
+// WithPrefix creates new sub-scope, where each metric has prefix added to it name.
+func (r Registry) WithPrefix(prefix string) metrics.Registry {
+ return r.newSubregistry(registryutil.BuildFQName(r.separator, r.prefix, prefix), r.tags)
+}
+
+// ComposeName builds FQ name with appropriate separator.
+func (r Registry) ComposeName(parts ...string) string {
+ return registryutil.BuildFQName(r.separator, parts...)
+}
+
+func (r Registry) Counter(name string) metrics.Counter {
+ s := &Counter{
+ name: r.newMetricName(name),
+ metricType: typeCounter,
+ tags: r.tags,
+
+ useNameTag: r.useNameTag,
+ }
+
+ return r.registerMetric(s).(metrics.Counter)
+}
+
+func (r Registry) FuncCounter(name string, function func() int64) metrics.FuncCounter {
+ s := &FuncCounter{
+ name: r.newMetricName(name),
+ metricType: typeCounter,
+ tags: r.tags,
+ function: function,
+ useNameTag: r.useNameTag,
+ }
+
+ return r.registerMetric(s).(metrics.FuncCounter)
+}
+
+func (r Registry) Gauge(name string) metrics.Gauge {
+ s := &Gauge{
+ name: r.newMetricName(name),
+ metricType: typeGauge,
+ tags: r.tags,
+ useNameTag: r.useNameTag,
+ }
+
+ return r.registerMetric(s).(metrics.Gauge)
+}
+
+func (r Registry) FuncGauge(name string, function func() float64) metrics.FuncGauge {
+ s := &FuncGauge{
+ name: r.newMetricName(name),
+ metricType: typeGauge,
+ tags: r.tags,
+ function: function,
+ useNameTag: r.useNameTag,
+ }
+
+ return r.registerMetric(s).(metrics.FuncGauge)
+}
+
+func (r Registry) IntGauge(name string) metrics.IntGauge {
+ s := &IntGauge{
+ name: r.newMetricName(name),
+ metricType: typeIGauge,
+ tags: r.tags,
+ useNameTag: r.useNameTag,
+ }
+
+ return r.registerMetric(s).(metrics.IntGauge)
+}
+
+func (r Registry) FuncIntGauge(name string, function func() int64) metrics.FuncIntGauge {
+ s := &FuncIntGauge{
+ name: r.newMetricName(name),
+ metricType: typeIGauge,
+ tags: r.tags,
+ function: function,
+ useNameTag: r.useNameTag,
+ }
+
+ return r.registerMetric(s).(metrics.FuncIntGauge)
+}
+
+func (r Registry) Timer(name string) metrics.Timer {
+ s := &Timer{
+ name: r.newMetricName(name),
+ metricType: typeGauge,
+ tags: r.tags,
+ useNameTag: r.useNameTag,
+ }
+
+ return r.registerMetric(s).(metrics.Timer)
+}
+
+func (r Registry) Histogram(name string, buckets metrics.Buckets) metrics.Histogram {
+ s := &Histogram{
+ name: r.newMetricName(name),
+ metricType: typeHistogram,
+ tags: r.tags,
+ bucketBounds: metricsutil.BucketsBounds(buckets),
+ bucketValues: make([]int64, buckets.Size()),
+ useNameTag: r.useNameTag,
+ }
+
+ return r.registerMetric(s).(metrics.Histogram)
+}
+
+func (r Registry) DurationHistogram(name string, buckets metrics.DurationBuckets) metrics.Timer {
+ s := &Histogram{
+ name: r.newMetricName(name),
+ metricType: typeHistogram,
+ tags: r.tags,
+ bucketBounds: metricsutil.DurationBucketsBounds(buckets),
+ bucketValues: make([]int64, buckets.Size()),
+ useNameTag: r.useNameTag,
+ }
+
+ return r.registerMetric(s).(metrics.Timer)
+}
+
+func (r *Registry) newSubregistry(prefix string, tags map[string]string) *Registry {
+ // differ simple and rated registries
+ keyTags := registryutil.MergeTags(tags, map[string]string{"rated": strconv.FormatBool(r.rated)})
+ registryKey := registryutil.BuildRegistryKey(prefix, keyTags)
+
+ r.m.Lock()
+ defer r.m.Unlock()
+
+ if existing, ok := r.subregistries[registryKey]; ok {
+ return existing
+ }
+
+ subregistry := &Registry{
+ separator: r.separator,
+ prefix: prefix,
+ tags: tags,
+ rated: r.rated,
+ useNameTag: r.useNameTag,
+
+ subregistries: r.subregistries,
+ m: r.m,
+
+ metrics: r.metrics,
+ }
+
+ r.subregistries[registryKey] = subregistry
+ return subregistry
+}
+
+func (r *Registry) newMetricName(name string) string {
+ return registryutil.BuildFQName(r.separator, r.prefix, name)
+}
+
+func (r *Registry) registerMetric(s Metric) Metric {
+ if r.rated {
+ Rated(s)
+ }
+
+ key := r.metricKey(s)
+
+ oldMetric, loaded := r.metrics.LoadOrStore(key, s)
+ if !loaded {
+ return s
+ }
+
+ if reflect.TypeOf(oldMetric) == reflect.TypeOf(s) {
+ return oldMetric.(Metric)
+ } else {
+ r.metrics.Store(key, s)
+ return s
+ }
+}
+
+func (r *Registry) unregisterMetric(s Metric) {
+ if r.rated {
+ Rated(s)
+ }
+
+ r.metrics.Delete(r.metricKey(s))
+}
+
+func (r *Registry) metricKey(s Metric) string {
+ // differ simple and rated registries
+ keyTags := registryutil.MergeTags(r.tags, map[string]string{"rated": strconv.FormatBool(r.rated)})
+ return registryutil.BuildRegistryKey(s.Name(), keyTags)
+}
diff --git a/library/go/core/metrics/solomon/registry_opts.go b/library/go/core/metrics/solomon/registry_opts.go
new file mode 100644
index 0000000000..c3df17940a
--- /dev/null
+++ b/library/go/core/metrics/solomon/registry_opts.go
@@ -0,0 +1,87 @@
+package solomon
+
+import (
+ "context"
+
+ "github.com/ydb-platform/ydb/library/go/core/metrics"
+ "github.com/ydb-platform/ydb/library/go/core/metrics/collect"
+ "github.com/ydb-platform/ydb/library/go/core/metrics/internal/pkg/registryutil"
+)
+
+type RegistryOpts struct {
+ Separator rune
+ Prefix string
+ Tags map[string]string
+ Rated bool
+ UseNameTag bool
+ Collectors []func(metrics.Registry)
+}
+
+// NewRegistryOpts returns new initialized instance of RegistryOpts
+func NewRegistryOpts() *RegistryOpts {
+ return &RegistryOpts{
+ Separator: '.',
+ Tags: make(map[string]string),
+ UseNameTag: false,
+ }
+}
+
+// SetUseNameTag overrides current UseNameTag opt
+func (o *RegistryOpts) SetUseNameTag(useNameTag bool) *RegistryOpts {
+ o.UseNameTag = useNameTag
+ return o
+}
+
+// SetTags overrides existing tags
+func (o *RegistryOpts) SetTags(tags map[string]string) *RegistryOpts {
+ o.Tags = tags
+ return o
+}
+
+// AddTags merges given tags with existing
+func (o *RegistryOpts) AddTags(tags map[string]string) *RegistryOpts {
+ for k, v := range tags {
+ o.Tags[k] = v
+ }
+ return o
+}
+
+// SetPrefix overrides existing prefix
+func (o *RegistryOpts) SetPrefix(prefix string) *RegistryOpts {
+ o.Prefix = prefix
+ return o
+}
+
+// AppendPrefix adds given prefix as postfix to existing using separator
+func (o *RegistryOpts) AppendPrefix(prefix string) *RegistryOpts {
+ o.Prefix = registryutil.BuildFQName(string(o.Separator), o.Prefix, prefix)
+ return o
+}
+
+// SetSeparator overrides existing separator
+func (o *RegistryOpts) SetSeparator(separator rune) *RegistryOpts {
+ o.Separator = separator
+ return o
+}
+
+// SetRated overrides existing rated flag
+func (o *RegistryOpts) SetRated(rated bool) *RegistryOpts {
+ o.Rated = rated
+ return o
+}
+
+// AddCollectors adds collectors that handle their metrics automatically (e.g. system metrics).
+func (o *RegistryOpts) AddCollectors(
+ ctx context.Context, c metrics.CollectPolicy, collectors ...collect.Func,
+) *RegistryOpts {
+ if len(collectors) == 0 {
+ return o
+ }
+
+ o.Collectors = append(o.Collectors, func(r metrics.Registry) {
+ for _, collector := range collectors {
+ collector(ctx, r, c)
+ }
+ })
+ return o
+}
diff --git a/library/go/core/metrics/solomon/registry_test.go b/library/go/core/metrics/solomon/registry_test.go
new file mode 100644
index 0000000000..a870203b31
--- /dev/null
+++ b/library/go/core/metrics/solomon/registry_test.go
@@ -0,0 +1,168 @@
+package solomon
+
+import (
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/google/go-cmp/cmp/cmpopts"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/atomic"
+)
+
+func TestRegistry_Gather(t *testing.T) {
+ r := &Registry{
+ separator: ".",
+ prefix: "myprefix",
+ tags: make(map[string]string),
+ subregistries: make(map[string]*Registry),
+ metrics: func() *sync.Map {
+ metrics := map[string]Metric{
+ "myprefix.mycounter": &Counter{
+ name: "myprefix.mycounter",
+ tags: map[string]string{"ololo": "trololo"},
+ value: *atomic.NewInt64(42),
+ },
+ "myprefix.mygauge": &Gauge{
+ name: "myprefix.mygauge",
+ tags: map[string]string{"shimba": "boomba"},
+ value: *atomic.NewFloat64(14.89),
+ },
+ "myprefix.mytimer": &Timer{
+ name: "myprefix.mytimer",
+ tags: map[string]string{"looken": "tooken"},
+ value: *atomic.NewDuration(1456 * time.Millisecond),
+ },
+ "myprefix.myhistogram": &Histogram{
+ name: "myprefix.myhistogram",
+ tags: map[string]string{"chicken": "cooken"},
+ bucketBounds: []float64{1, 2, 3},
+ bucketValues: []int64{1, 2, 1},
+ infValue: *atomic.NewInt64(1),
+ },
+ }
+
+ sm := new(sync.Map)
+ for k, v := range metrics {
+ sm.Store(k, v)
+ }
+
+ return sm
+ }(),
+ }
+
+ s, err := r.Gather()
+ assert.NoError(t, err)
+
+ expected := &Metrics{}
+ r.metrics.Range(func(_, s interface{}) bool {
+ expected.metrics = append(expected.metrics, s.(Metric))
+ return true
+ })
+
+ opts := cmp.Options{
+ cmp.AllowUnexported(Metrics{}, Counter{}, Gauge{}, Timer{}, Histogram{}),
+ cmpopts.IgnoreUnexported(sync.Mutex{}, atomic.Duration{}, atomic.Int64{}, atomic.Float64{}),
+ // this will sort both slices for latest tests as well
+ cmpopts.SortSlices(func(x, y Metric) bool {
+ return x.Name() < y.Name()
+ }),
+ }
+
+ assert.True(t, cmp.Equal(expected, s, opts...), cmp.Diff(expected, s, opts...))
+
+ for _, sen := range s.metrics {
+ var expectedMetric Metric
+ for _, expSen := range expected.metrics {
+ if expSen.Name() == sen.Name() {
+ expectedMetric = expSen
+ break
+ }
+ }
+ require.NotNil(t, expectedMetric)
+
+ assert.NotEqual(t, fmt.Sprintf("%p", expectedMetric), fmt.Sprintf("%p", sen))
+ assert.IsType(t, expectedMetric, sen)
+
+ switch st := sen.(type) {
+ case *Counter:
+ assert.NotEqual(t, fmt.Sprintf("%p", expectedMetric.(*Counter)), fmt.Sprintf("%p", st))
+ case *Gauge:
+ assert.NotEqual(t, fmt.Sprintf("%p", expectedMetric.(*Gauge)), fmt.Sprintf("%p", st))
+ case *Timer:
+ assert.NotEqual(t, fmt.Sprintf("%p", expectedMetric.(*Timer)), fmt.Sprintf("%p", st))
+ case *Histogram:
+ assert.NotEqual(t, fmt.Sprintf("%p", expectedMetric.(*Histogram)), fmt.Sprintf("%p", st))
+ default:
+ t.Fatalf("unexpected metric type: %T", sen)
+ }
+ }
+}
+
+func TestDoubleRegistration(t *testing.T) {
+ r := NewRegistry(NewRegistryOpts())
+
+ c0 := r.Counter("counter")
+ c1 := r.Counter("counter")
+ require.Equal(t, c0, c1)
+
+ g0 := r.Gauge("counter")
+ g1 := r.Gauge("counter")
+ require.Equal(t, g0, g1)
+
+ c2 := r.Counter("counter")
+ require.NotEqual(t, reflect.ValueOf(c0).Elem().UnsafeAddr(), reflect.ValueOf(c2).Elem().UnsafeAddr())
+}
+
+func TestSubregistry(t *testing.T) {
+ r := NewRegistry(NewRegistryOpts())
+
+ r0 := r.WithPrefix("one")
+ r1 := r0.WithPrefix("two")
+ r2 := r0.WithTags(map[string]string{"foo": "bar"})
+
+ _ = r0.Counter("counter")
+ _ = r1.Counter("counter")
+ _ = r2.Counter("counter")
+}
+
+func TestSubregistry_TagAndPrefixReorder(t *testing.T) {
+ r := NewRegistry(NewRegistryOpts())
+
+ r0 := r.WithPrefix("one")
+ r1 := r.WithTags(map[string]string{"foo": "bar"})
+
+ r3 := r0.WithTags(map[string]string{"foo": "bar"})
+ r4 := r1.WithPrefix("one")
+
+ require.True(t, r3 == r4)
+}
+
+func TestRatedRegistry(t *testing.T) {
+ r := NewRegistry(NewRegistryOpts().SetRated(true))
+ s := r.Counter("counter")
+ b, _ := json.Marshal(s)
+ expected := []byte(`{"type":"RATE","labels":{"sensor":"counter"},"value":0}`)
+ assert.Equal(t, expected, b)
+}
+
+func TestNameTagRegistry(t *testing.T) {
+ r := NewRegistry(NewRegistryOpts().SetUseNameTag(true))
+ s := r.Counter("counter")
+
+ b, _ := json.Marshal(s)
+ expected := []byte(`{"type":"COUNTER","labels":{"name":"counter"},"value":0}`)
+ assert.Equal(t, expected, b)
+
+ sr := r.WithTags(map[string]string{"foo": "bar"})
+ ssr := sr.Counter("sub_counter")
+
+ b1, _ := json.Marshal(ssr)
+ expected1 := []byte(`{"type":"COUNTER","labels":{"foo":"bar","name":"sub_counter"},"value":0}`)
+ assert.Equal(t, expected1, b1)
+}
diff --git a/library/go/core/metrics/solomon/spack.go b/library/go/core/metrics/solomon/spack.go
new file mode 100644
index 0000000000..48938d19b6
--- /dev/null
+++ b/library/go/core/metrics/solomon/spack.go
@@ -0,0 +1,387 @@
+package solomon
+
+import (
+ "bytes"
+ "context"
+ "encoding/binary"
+ "io"
+
+ "github.com/ydb-platform/ydb/library/go/core/xerrors"
+)
+
+type spackVersion uint16
+
+const (
+ version11 spackVersion = 0x0101
+ version12 spackVersion = 0x0102
+)
+
+type errWriter struct {
+ w io.Writer
+ err error
+}
+
+func (ew *errWriter) binaryWrite(data interface{}) {
+ if ew.err != nil {
+ return
+ }
+ switch t := data.(type) {
+ case uint8:
+ ew.err = binary.Write(ew.w, binary.LittleEndian, data.(uint8))
+ case uint16:
+ ew.err = binary.Write(ew.w, binary.LittleEndian, data.(uint16))
+ case uint32:
+ ew.err = binary.Write(ew.w, binary.LittleEndian, data.(uint32))
+ default:
+ ew.err = xerrors.Errorf("binaryWrite not supported type %v", t)
+ }
+}
+
+func writeULEB128(w io.Writer, value uint32) error {
+ remaining := value >> 7
+ for remaining != 0 {
+ err := binary.Write(w, binary.LittleEndian, uint8(value&0x7f|0x80))
+ if err != nil {
+ return xerrors.Errorf("binary.Write failed: %w", err)
+ }
+ value = remaining
+ remaining >>= 7
+ }
+ err := binary.Write(w, binary.LittleEndian, uint8(value&0x7f))
+ if err != nil {
+ return xerrors.Errorf("binary.Write failed: %w", err)
+ }
+ return err
+}
+
+type spackMetric struct {
+ flags uint8
+
+ nameValueIndex uint32
+ labelsCount uint32
+ labels bytes.Buffer
+
+ metric Metric
+}
+
+func (s *spackMetric) writeLabelPool(se *spackEncoder, namesIdx map[string]uint32, valuesIdx map[string]uint32, name string, value string) error {
+ _, ok := namesIdx[name]
+ if !ok {
+ namesIdx[name] = se.nameCounter
+ se.nameCounter++
+ _, err := se.labelNamePool.WriteString(name)
+ if err != nil {
+ return err
+ }
+ err = se.labelNamePool.WriteByte(0)
+ if err != nil {
+ return err
+ }
+ }
+
+ _, ok = valuesIdx[value]
+ if !ok {
+ valuesIdx[value] = se.valueCounter
+ se.valueCounter++
+ _, err := se.labelValuePool.WriteString(value)
+ if err != nil {
+ return err
+ }
+ err = se.labelValuePool.WriteByte(0)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (s *spackMetric) writeLabel(se *spackEncoder, namesIdx map[string]uint32, valuesIdx map[string]uint32, name string, value string) error {
+ s.labelsCount++
+
+ err := s.writeLabelPool(se, namesIdx, valuesIdx, name, value)
+ if err != nil {
+ return err
+ }
+
+ err = writeULEB128(&s.labels, uint32(namesIdx[name]))
+ if err != nil {
+ return err
+ }
+ err = writeULEB128(&s.labels, uint32(valuesIdx[value]))
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (s *spackMetric) writeMetric(w io.Writer, version spackVersion) error {
+ metricValueType := valueTypeOneWithoutTS
+ if s.metric.getTimestamp() != nil {
+ metricValueType = valueTypeOneWithTS
+ }
+ // library/cpp/monlib/encode/spack/spack_v1_encoder.cpp?rev=r9098142#L190
+ types := uint8(s.metric.getType()<<2) | uint8(metricValueType)
+ err := binary.Write(w, binary.LittleEndian, types)
+ if err != nil {
+ return xerrors.Errorf("binary.Write types failed: %w", err)
+ }
+
+ err = binary.Write(w, binary.LittleEndian, uint8(s.flags))
+ if err != nil {
+ return xerrors.Errorf("binary.Write flags failed: %w", err)
+ }
+ if version >= version12 {
+ err = writeULEB128(w, uint32(s.nameValueIndex))
+ if err != nil {
+ return xerrors.Errorf("writeULEB128 name value index: %w", err)
+ }
+ }
+ err = writeULEB128(w, uint32(s.labelsCount))
+ if err != nil {
+ return xerrors.Errorf("writeULEB128 labels count failed: %w", err)
+ }
+
+ _, err = w.Write(s.labels.Bytes()) // s.writeLabels(w)
+ if err != nil {
+ return xerrors.Errorf("write labels failed: %w", err)
+ }
+ if s.metric.getTimestamp() != nil {
+ err = binary.Write(w, binary.LittleEndian, uint32(s.metric.getTimestamp().Unix()))
+ if err != nil {
+ return xerrors.Errorf("write timestamp failed: %w", err)
+ }
+ }
+
+ switch s.metric.getType() {
+ case typeGauge:
+ err = binary.Write(w, binary.LittleEndian, s.metric.getValue().(float64))
+ if err != nil {
+ return xerrors.Errorf("binary.Write gauge value failed: %w", err)
+ }
+ case typeIGauge:
+ err = binary.Write(w, binary.LittleEndian, s.metric.getValue().(int64))
+ if err != nil {
+ return xerrors.Errorf("binary.Write igauge value failed: %w", err)
+ }
+ case typeCounter, typeRated:
+ err = binary.Write(w, binary.LittleEndian, uint64(s.metric.getValue().(int64)))
+ if err != nil {
+ return xerrors.Errorf("binary.Write counter value failed: %w", err)
+ }
+ case typeHistogram, typeRatedHistogram:
+ h := s.metric.getValue().(histogram)
+ err = h.writeHistogram(w)
+ if err != nil {
+ return xerrors.Errorf("writeHistogram failed: %w", err)
+ }
+ default:
+ return xerrors.Errorf("unknown metric type: %v", s.metric.getType())
+ }
+ return nil
+}
+
+type SpackOpts func(*spackEncoder)
+
+func WithVersion12() func(*spackEncoder) {
+ return func(se *spackEncoder) {
+ se.version = version12
+ }
+}
+
+type spackEncoder struct {
+ context context.Context
+ compression uint8
+ version spackVersion
+
+ nameCounter uint32
+ valueCounter uint32
+
+ labelNamePool bytes.Buffer
+ labelValuePool bytes.Buffer
+
+ metrics Metrics
+}
+
+func NewSpackEncoder(ctx context.Context, compression CompressionType, metrics *Metrics, opts ...SpackOpts) *spackEncoder {
+ if metrics == nil {
+ metrics = &Metrics{}
+ }
+ se := &spackEncoder{
+ context: ctx,
+ compression: uint8(compression),
+ version: version11,
+ metrics: *metrics,
+ }
+ for _, op := range opts {
+ op(se)
+ }
+ return se
+}
+
+func (se *spackEncoder) writeLabels() ([]spackMetric, error) {
+ namesIdx := make(map[string]uint32)
+ valuesIdx := make(map[string]uint32)
+ spackMetrics := make([]spackMetric, len(se.metrics.metrics))
+
+ for idx, metric := range se.metrics.metrics {
+ m := spackMetric{metric: metric}
+
+ var err error
+ if se.version >= version12 {
+ err = m.writeLabelPool(se, namesIdx, valuesIdx, metric.getNameTag(), metric.Name())
+ m.nameValueIndex = valuesIdx[metric.getNameTag()]
+ } else {
+ err = m.writeLabel(se, namesIdx, valuesIdx, metric.getNameTag(), metric.Name())
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ for name, value := range metric.getLabels() {
+ if err := m.writeLabel(se, namesIdx, valuesIdx, name, value); err != nil {
+ return nil, err
+ }
+
+ }
+ spackMetrics[idx] = m
+ }
+
+ return spackMetrics, nil
+}
+
+func (se *spackEncoder) Encode(w io.Writer) (written int, err error) {
+ spackMetrics, err := se.writeLabels()
+ if err != nil {
+ return written, xerrors.Errorf("writeLabels failed: %w", err)
+ }
+
+ err = se.writeHeader(w)
+ if err != nil {
+ return written, xerrors.Errorf("writeHeader failed: %w", err)
+ }
+ written += HeaderSize
+ compression := CompressionType(se.compression)
+
+ cw := newCompressedWriter(w, compression)
+
+ err = se.writeLabelNamesPool(cw)
+ if err != nil {
+ return written, xerrors.Errorf("writeLabelNamesPool failed: %w", err)
+ }
+
+ err = se.writeLabelValuesPool(cw)
+ if err != nil {
+ return written, xerrors.Errorf("writeLabelValuesPool failed: %w", err)
+ }
+
+ err = se.writeCommonTime(cw)
+ if err != nil {
+ return written, xerrors.Errorf("writeCommonTime failed: %w", err)
+ }
+
+ err = se.writeCommonLabels(cw)
+ if err != nil {
+ return written, xerrors.Errorf("writeCommonLabels failed: %w", err)
+ }
+
+ err = se.writeMetricsData(cw, spackMetrics)
+ if err != nil {
+ return written, xerrors.Errorf("writeMetricsData failed: %w", err)
+ }
+
+ err = cw.Close()
+ if err != nil {
+ return written, xerrors.Errorf("close failed: %w", err)
+ }
+
+ switch compression {
+ case CompressionNone:
+ written += cw.(*noCompressionWriteCloser).written
+ case CompressionLz4:
+ written += cw.(*lz4CompressionWriteCloser).written
+ }
+
+ return written, nil
+}
+
+func (se *spackEncoder) writeHeader(w io.Writer) error {
+ if se.context.Err() != nil {
+ return xerrors.Errorf("streamSpack context error: %w", se.context.Err())
+ }
+ ew := &errWriter{w: w}
+ ew.binaryWrite(uint16(0x5053)) // Magic
+ ew.binaryWrite(uint16(se.version)) // Version
+ ew.binaryWrite(uint16(24)) // HeaderSize
+ ew.binaryWrite(uint8(0)) // TimePrecision(SECONDS)
+ ew.binaryWrite(uint8(se.compression)) // CompressionAlg
+ ew.binaryWrite(uint32(se.labelNamePool.Len())) // LabelNamesSize
+ ew.binaryWrite(uint32(se.labelValuePool.Len())) // LabelValuesSize
+ ew.binaryWrite(uint32(len(se.metrics.metrics))) // MetricsCount
+ ew.binaryWrite(uint32(len(se.metrics.metrics))) // PointsCount
+ if ew.err != nil {
+ return xerrors.Errorf("binaryWrite failed: %w", ew.err)
+ }
+ return nil
+}
+
+func (se *spackEncoder) writeLabelNamesPool(w io.Writer) error {
+ if se.context.Err() != nil {
+ return xerrors.Errorf("streamSpack context error: %w", se.context.Err())
+ }
+ _, err := w.Write(se.labelNamePool.Bytes())
+ if err != nil {
+ return xerrors.Errorf("write labelNamePool failed: %w", err)
+ }
+ return nil
+}
+
+func (se *spackEncoder) writeLabelValuesPool(w io.Writer) error {
+ if se.context.Err() != nil {
+ return xerrors.Errorf("streamSpack context error: %w", se.context.Err())
+ }
+
+ _, err := w.Write(se.labelValuePool.Bytes())
+ if err != nil {
+ return xerrors.Errorf("write labelValuePool failed: %w", err)
+ }
+ return nil
+}
+
+func (se *spackEncoder) writeCommonTime(w io.Writer) error {
+ if se.context.Err() != nil {
+ return xerrors.Errorf("streamSpack context error: %w", se.context.Err())
+ }
+
+ if se.metrics.timestamp == nil {
+ return binary.Write(w, binary.LittleEndian, uint32(0))
+ }
+ return binary.Write(w, binary.LittleEndian, uint32(se.metrics.timestamp.Unix()))
+}
+
+func (se *spackEncoder) writeCommonLabels(w io.Writer) error {
+ if se.context.Err() != nil {
+ return xerrors.Errorf("streamSpack context error: %w", se.context.Err())
+ }
+
+ _, err := w.Write([]byte{0})
+ if err != nil {
+ return xerrors.Errorf("write commonLabels failed: %w", err)
+ }
+ return nil
+}
+
+func (se *spackEncoder) writeMetricsData(w io.Writer, metrics []spackMetric) error {
+ for _, s := range metrics {
+ if se.context.Err() != nil {
+ return xerrors.Errorf("streamSpack context error: %w", se.context.Err())
+ }
+
+ err := s.writeMetric(w, se.version)
+ if err != nil {
+ return xerrors.Errorf("write metric failed: %w", err)
+ }
+ }
+ return nil
+}
diff --git a/library/go/core/metrics/solomon/spack_compression.go b/library/go/core/metrics/solomon/spack_compression.go
new file mode 100644
index 0000000000..004fe0150d
--- /dev/null
+++ b/library/go/core/metrics/solomon/spack_compression.go
@@ -0,0 +1,162 @@
+package solomon
+
+import (
+ "encoding/binary"
+ "io"
+
+ "github.com/OneOfOne/xxhash"
+ "github.com/pierrec/lz4"
+)
+
+type CompressionType uint8
+
+const (
+ CompressionNone CompressionType = 0x0
+ CompressionZlib CompressionType = 0x1
+ CompressionZstd CompressionType = 0x2
+ CompressionLz4 CompressionType = 0x3
+)
+
+const (
+ compressionFrameLength = 512 * 1024
+ hashTableSize = 64 * 1024
+)
+
+type noCompressionWriteCloser struct {
+ underlying io.Writer
+ written int
+}
+
+func (w *noCompressionWriteCloser) Write(p []byte) (int, error) {
+ n, err := w.underlying.Write(p)
+ w.written += n
+ return n, err
+}
+
+func (w *noCompressionWriteCloser) Close() error {
+ return nil
+}
+
+type lz4CompressionWriteCloser struct {
+ underlying io.Writer
+ buffer []byte
+ table []int
+ written int
+}
+
+func (w *lz4CompressionWriteCloser) flushFrame() (written int, err error) {
+ src := w.buffer
+ dst := make([]byte, lz4.CompressBlockBound(len(src)))
+
+ sz, err := lz4.CompressBlock(src, dst, w.table)
+ if err != nil {
+ return written, err
+ }
+
+ if sz == 0 {
+ dst = src
+ } else {
+ dst = dst[:sz]
+ }
+
+ err = binary.Write(w.underlying, binary.LittleEndian, uint32(len(dst)))
+ if err != nil {
+ return written, err
+ }
+ w.written += 4
+
+ err = binary.Write(w.underlying, binary.LittleEndian, uint32(len(src)))
+ if err != nil {
+ return written, err
+ }
+ w.written += 4
+
+ n, err := w.underlying.Write(dst)
+ if err != nil {
+ return written, err
+ }
+ w.written += n
+
+ checksum := xxhash.Checksum32S(dst, 0x1337c0de)
+ err = binary.Write(w.underlying, binary.LittleEndian, checksum)
+ if err != nil {
+ return written, err
+ }
+ w.written += 4
+
+ w.buffer = w.buffer[:0]
+
+ return written, nil
+}
+
+func (w *lz4CompressionWriteCloser) Write(p []byte) (written int, err error) {
+ q := p[:]
+ for len(q) > 0 {
+ space := compressionFrameLength - len(w.buffer)
+ if space == 0 {
+ n, err := w.flushFrame()
+ if err != nil {
+ return written, err
+ }
+ w.written += n
+ space = compressionFrameLength
+ }
+ length := len(q)
+ if length > space {
+ length = space
+ }
+ w.buffer = append(w.buffer, q[:length]...)
+ q = q[length:]
+ }
+ return written, nil
+}
+
+func (w *lz4CompressionWriteCloser) Close() error {
+ var err error
+ if len(w.buffer) > 0 {
+ n, err := w.flushFrame()
+ if err != nil {
+ return err
+ }
+ w.written += n
+ }
+ err = binary.Write(w.underlying, binary.LittleEndian, uint32(0))
+ if err != nil {
+ return nil
+ }
+ w.written += 4
+
+ err = binary.Write(w.underlying, binary.LittleEndian, uint32(0))
+ if err != nil {
+ return nil
+ }
+ w.written += 4
+
+ err = binary.Write(w.underlying, binary.LittleEndian, uint32(0))
+ if err != nil {
+ return nil
+ }
+ w.written += 4
+
+ return nil
+}
+
+func newCompressedWriter(w io.Writer, compression CompressionType) io.WriteCloser {
+ switch compression {
+ case CompressionNone:
+ return &noCompressionWriteCloser{w, 0}
+ case CompressionZlib:
+ panic("zlib compression not supported")
+ case CompressionZstd:
+ panic("zstd compression not supported")
+ case CompressionLz4:
+ return &lz4CompressionWriteCloser{
+ w,
+ make([]byte, 0, compressionFrameLength),
+ make([]int, hashTableSize),
+ 0,
+ }
+ default:
+ panic("unsupported compression algorithm")
+ }
+}
diff --git a/library/go/core/metrics/solomon/spack_compression_test.go b/library/go/core/metrics/solomon/spack_compression_test.go
new file mode 100644
index 0000000000..baa8a8d1e9
--- /dev/null
+++ b/library/go/core/metrics/solomon/spack_compression_test.go
@@ -0,0 +1,26 @@
+package solomon
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func compress(t *testing.T, c uint8, s string) []byte {
+ buf := bytes.Buffer{}
+ w := newCompressedWriter(&buf, CompressionType(c))
+ _, err := w.Write([]byte(s))
+ assert.Equal(t, nil, err)
+ assert.Equal(t, nil, w.Close())
+ return buf.Bytes()
+}
+
+func TestCompression_None(t *testing.T) {
+ assert.Equal(t, []byte(nil), compress(t, uint8(CompressionNone), ""))
+ assert.Equal(t, []byte{'a'}, compress(t, uint8(CompressionNone), "a"))
+}
+
+func TestCompression_Lz4(t *testing.T) {
+ assert.Equal(t, []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, compress(t, uint8(CompressionLz4), ""))
+}
diff --git a/library/go/core/metrics/solomon/spack_test.go b/library/go/core/metrics/solomon/spack_test.go
new file mode 100644
index 0000000000..64b504bf42
--- /dev/null
+++ b/library/go/core/metrics/solomon/spack_test.go
@@ -0,0 +1,184 @@
+package solomon
+
+import (
+ "bytes"
+ "context"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func Test_metrics_encode(t *testing.T) {
+ expectHeader := []byte{
+ 0x53, 0x50, // magic
+ 0x01, 0x01, // version
+ 0x18, 0x00, // header size
+ 0x0, // time precision
+ 0x0, // compression algorithm
+ 0x7, 0x0, 0x0, 0x0, // label names size
+ 0x8, 0x0, 0x0, 0x0, // label values size
+ 0x1, 0x0, 0x0, 0x0, // metric count
+ 0x1, 0x0, 0x0, 0x0, // point count
+ // label names pool
+ 0x73, 0x65, 0x6e, 0x73, 0x6f, 0x72, 0x0, // "sensor"
+ // label values pool
+ 0x6d, 0x79, 0x67, 0x61, 0x75, 0x67, 0x65, 0x0, // "gauge"
+ }
+
+ testCases := []struct {
+ name string
+ metrics *Metrics
+ expectCommonTime []byte
+ expectCommonLabels []byte
+ expectMetrics [][]byte
+ expectWritten int
+ }{
+ {
+ "common-ts+gauge",
+ &Metrics{
+ metrics: []Metric{
+ func() Metric {
+ g := NewGauge("mygauge", 43)
+ return &g
+ }(),
+ },
+ timestamp: timeAsRef(time.Unix(1500000000, 0)),
+ },
+ []byte{0x0, 0x2f, 0x68, 0x59}, // common time /1500000000
+ []byte{0x0}, // common labels count and indexes
+ [][]byte{
+ {
+ 0x5, // types
+ 0x0, // flags
+ 0x1, // labels index size
+ 0x0, // indexes of name labels
+ 0x0, // indexes of value labels
+
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x45, 0x40, // 43 // metrics value
+
+ },
+ },
+ 57,
+ },
+ {
+ "gauge+ts",
+ &Metrics{
+ metrics: []Metric{
+ func() Metric {
+ g := NewGauge("mygauge", 43, WithTimestamp(time.Unix(1657710476, 0)))
+ return &g
+ }(),
+ },
+ },
+ []byte{0x0, 0x0, 0x0, 0x0}, // common time
+ []byte{0x0}, // common labels count and indexes
+ [][]byte{
+ {
+ 0x6, // uint8(typeGauge << 2) | uint8(valueTypeOneWithTS)
+ 0x0, // flags
+ 0x1, // labels index size
+ 0x0, // indexes of name labels
+ 0x0, // indexes of value labels
+
+ 0x8c, 0xa7, 0xce, 0x62, //metric ts
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x45, 0x40, // 43 // metrics value
+
+ },
+ },
+ 61,
+ },
+ {
+ "common-ts+gauge+ts",
+ &Metrics{
+ metrics: []Metric{
+ func() Metric {
+ g := NewGauge("mygauge", 43, WithTimestamp(time.Unix(1657710476, 0)))
+ return &g
+ }(),
+ func() Metric {
+ g := NewGauge("mygauge", 42, WithTimestamp(time.Unix(1500000000, 0)))
+ return &g
+ }(),
+ },
+ timestamp: timeAsRef(time.Unix(1500000000, 0)),
+ },
+ []byte{0x0, 0x2f, 0x68, 0x59}, // common time /1500000000
+ []byte{0x0}, // common labels count and indexes
+ [][]byte{
+ {
+ 0x6, // types
+ 0x0, // flags
+ 0x1, // labels index size
+ 0x0, // indexes of name labels
+ 0x0, // indexes of value labels
+
+ 0x8c, 0xa7, 0xce, 0x62, //metric ts
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x45, 0x40, // 43 // metrics value
+
+ },
+ {
+ 0x6, // types
+ 0x0, // flags
+ 0x1, // labels index size
+ 0x0, // indexes of name labels
+ 0x0, // indexes of value labels
+
+ 0x0, 0x2f, 0x68, 0x59, // metric ts
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x45, 0x40, //42 // metrics value
+
+ },
+ },
+ 78,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ var buf bytes.Buffer
+ ctx := context.Background()
+
+ written, err := NewSpackEncoder(ctx, CompressionNone, tc.metrics).Encode(&buf)
+
+ assert.NoError(t, err)
+ assert.Equal(t, tc.expectWritten, written)
+
+ body := buf.Bytes()
+ setMetricsCount(expectHeader, len(tc.metrics.metrics))
+
+ require.True(t, bytes.HasPrefix(body, expectHeader))
+ body = body[len(expectHeader):]
+
+ require.True(t, bytes.HasPrefix(body, tc.expectCommonTime))
+ body = body[len(tc.expectCommonTime):]
+
+ require.True(t, bytes.HasPrefix(body, tc.expectCommonLabels))
+ body = body[len(tc.expectCommonLabels):]
+
+ expectButMissing := [][]byte{}
+ for range tc.expectMetrics {
+ var seen bool
+ var val []byte
+ for _, v := range tc.expectMetrics {
+ val = v
+ if bytes.HasPrefix(body, v) {
+ body = bytes.Replace(body, v, []byte{}, 1)
+ seen = true
+ break
+ }
+ }
+ if !seen {
+ expectButMissing = append(expectButMissing, val)
+ }
+ }
+ assert.Empty(t, body, "unexpected bytes seen")
+ assert.Empty(t, expectButMissing, "missing metrics bytes")
+ })
+ }
+}
+
+func setMetricsCount(header []byte, count int) {
+ header[16] = uint8(count)
+ header[20] = uint8(count)
+}
diff --git a/library/go/core/metrics/solomon/stream.go b/library/go/core/metrics/solomon/stream.go
new file mode 100644
index 0000000000..7cf6d70064
--- /dev/null
+++ b/library/go/core/metrics/solomon/stream.go
@@ -0,0 +1,89 @@
+package solomon
+
+import (
+ "context"
+ "encoding/json"
+ "io"
+
+ "github.com/ydb-platform/ydb/library/go/core/xerrors"
+)
+
+const HeaderSize = 24
+
+type StreamFormat string
+
+func (r *Registry) StreamJSON(ctx context.Context, w io.Writer) (written int, err error) {
+ cw := newCompressedWriter(w, CompressionNone)
+
+ if ctx.Err() != nil {
+ return written, xerrors.Errorf("streamJSON context error: %w", ctx.Err())
+ }
+ _, err = cw.Write([]byte("{\"metrics\":["))
+ if err != nil {
+ return written, xerrors.Errorf("write metrics failed: %w", err)
+ }
+
+ first := true
+ r.metrics.Range(func(_, s interface{}) bool {
+ if ctx.Err() != nil {
+ err = xerrors.Errorf("streamJSON context error: %w", ctx.Err())
+ return false
+ }
+
+ // write trailing comma
+ if !first {
+ _, err = cw.Write([]byte(","))
+ if err != nil {
+ err = xerrors.Errorf("write metrics failed: %w", err)
+ return false
+ }
+ }
+
+ var b []byte
+
+ b, err = json.Marshal(s)
+ if err != nil {
+ err = xerrors.Errorf("marshal metric failed: %w", err)
+ return false
+ }
+
+ // write metric json
+ _, err = cw.Write(b)
+ if err != nil {
+ err = xerrors.Errorf("write metric failed: %w", err)
+ return false
+ }
+
+ first = false
+ return true
+ })
+ if err != nil {
+ return written, err
+ }
+
+ if ctx.Err() != nil {
+ return written, xerrors.Errorf("streamJSON context error: %w", ctx.Err())
+ }
+ _, err = cw.Write([]byte("]}"))
+ if err != nil {
+ return written, xerrors.Errorf("write metrics failed: %w", err)
+ }
+
+ if ctx.Err() != nil {
+ return written, xerrors.Errorf("streamJSON context error: %w", ctx.Err())
+ }
+ err = cw.Close()
+ if err != nil {
+ return written, xerrors.Errorf("close failed: %w", err)
+ }
+
+ return cw.(*noCompressionWriteCloser).written, nil
+}
+
+func (r *Registry) StreamSpack(ctx context.Context, w io.Writer, compression CompressionType) (int, error) {
+ metrics, err := r.Gather()
+ if err != nil {
+ return 0, err
+ }
+ return NewSpackEncoder(ctx, compression, metrics).Encode(w)
+}
diff --git a/library/go/core/metrics/solomon/stream_test.go b/library/go/core/metrics/solomon/stream_test.go
new file mode 100644
index 0000000000..7548f77dbb
--- /dev/null
+++ b/library/go/core/metrics/solomon/stream_test.go
@@ -0,0 +1,595 @@
+package solomon
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "net/http/httptest"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "github.com/ydb-platform/ydb/library/go/core/metrics"
+)
+
+func Test_streamJson(t *testing.T) {
+ testCases := []struct {
+ name string
+ registry *Registry
+ expect string
+ expectWritten int
+ expectErr error
+ }{
+ {
+ "success",
+ func() *Registry {
+ r := NewRegistry(NewRegistryOpts())
+
+ cnt := r.Counter("mycounter")
+ cnt.Add(42)
+
+ gg := r.Gauge("mygauge")
+ gg.Set(2)
+
+ return r
+ }(),
+ `{"metrics":[{"type":"COUNTER","labels":{"sensor":"mycounter"},"value":42},{"type":"DGAUGE","labels":{"sensor":"mygauge"},"value":2}]}`,
+ 133,
+ nil,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ w := httptest.NewRecorder()
+ ctx := context.Background()
+
+ written, err := tc.registry.StreamJSON(ctx, w)
+
+ if tc.expectErr == nil {
+ assert.NoError(t, err)
+ } else {
+ assert.EqualError(t, err, tc.expectErr.Error())
+ }
+
+ assert.Equal(t, tc.expectWritten, written)
+ assert.Equal(t, len(tc.expect), w.Body.Len())
+
+ if tc.expect != "" {
+ var expectedObj, givenObj map[string]interface{}
+ err = json.Unmarshal([]byte(tc.expect), &expectedObj)
+ assert.NoError(t, err)
+ err = json.Unmarshal(w.Body.Bytes(), &givenObj)
+ assert.NoError(t, err)
+
+ sameMap(t, expectedObj, givenObj)
+ }
+ })
+ }
+}
+
+func Test_streamSpack(t *testing.T) {
+ testCases := []struct {
+ name string
+ registry *Registry
+ compression CompressionType
+ expectHeader []byte
+ expectLabelNamesPool [][]byte
+ expectValueNamesPool [][]byte
+ expectCommonTime []byte
+ expectCommonLabels []byte
+ expectMetrics [][]byte
+ expectWritten int
+ }{
+ {
+ "counter",
+ func() *Registry {
+ r := NewRegistry(NewRegistryOpts())
+
+ cnt := r.Counter("counter")
+ cnt.Add(42)
+
+ return r
+ }(),
+ CompressionNone,
+ []byte{
+ 0x53, 0x50, // magic
+ 0x01, 0x01, // version
+ 0x18, 0x00, // header size
+ 0x0, // time precision
+ 0x0, // compression algorithm
+ 0x7, 0x0, 0x0, 0x0, // label names size
+ 0x8, 0x0, 0x0, 0x0, // label values size
+ 0x1, 0x0, 0x0, 0x0, // metric count
+ 0x1, 0x0, 0x0, 0x0, // point count
+ },
+ [][]byte{ // label names pool
+ {0x73, 0x65, 0x6e, 0x73, 0x6f, 0x72}, // "sensor"
+ },
+ [][]byte{ // label values pool
+ {0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72}, // "counter"
+ },
+ []byte{0x0, 0x0, 0x0, 0x0}, // common time
+ []byte{0x0}, // common labels count and indexes
+ [][]byte{
+ {
+ 0x9, // types
+ 0x0, // flags
+ 0x1, // labels index size
+ 0x0, // indexes of name labels
+ 0x0, // indexes of value labels
+
+ 0x2a, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // 42 // metrics value
+ },
+ },
+ 57,
+ },
+ {
+ "counter_lz4",
+ func() *Registry {
+ r := NewRegistry(NewRegistryOpts())
+
+ cnt := r.Counter("counter")
+ cnt.Add(0)
+
+ return r
+ }(),
+ CompressionLz4,
+ []byte{
+ 0x53, 0x50, // magic
+ 0x01, 0x01, // version
+ 0x18, 0x00, // header size
+ 0x0, // time precision
+ 0x3, // compression algorithm
+ 0x7, 0x0, 0x0, 0x0, // label names size
+ 0x8, 0x0, 0x0, 0x0, // label values size
+ 0x1, 0x0, 0x0, 0x0, // metric count
+ 0x1, 0x0, 0x0, 0x0, // point count
+ 0x23, 0x00, 0x00, 0x00, // compressed length
+ 0x21, 0x00, 0x00, 0x00, // uncompressed length
+ 0xf0, 0x12,
+ },
+ [][]byte{ // label names pool
+ {0x73, 0x65, 0x6e, 0x73, 0x6f, 0x72}, // "sensor"
+ },
+ [][]byte{ // label values pool
+ {0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72}, // "counter"
+ },
+ []byte{0x0, 0x0, 0x0, 0x0}, // common time
+ []byte{0x0}, // common labels count and indexes
+ [][]byte{
+ {
+ 0x9, // types
+ 0x0, // flags
+ 0x1, // labels index size
+ 0x0, // indexes of name labels
+ 0x0, // indexes of value labels
+
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // 0 //metrics value
+ 0x10, 0x11, 0xa4, 0x22, // checksum
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // end stream
+ },
+ },
+ 83,
+ },
+ {
+ "rate",
+ func() *Registry {
+ r := NewRegistry(NewRegistryOpts())
+
+ cnt := r.Counter("counter")
+ Rated(cnt)
+ cnt.Add(0)
+
+ return r
+ }(),
+ CompressionNone,
+ []byte{
+ 0x53, 0x50, // magic
+ 0x01, 0x01, // version
+ 0x18, 0x00, // header size
+ 0x0, // time precision
+ 0x0, // compression algorithm
+ 0x7, 0x0, 0x0, 0x0, // label names size
+ 0x8, 0x0, 0x0, 0x0, // label values size
+ 0x1, 0x0, 0x0, 0x0, // metric count
+ 0x1, 0x0, 0x0, 0x0, // point count
+ },
+ [][]byte{ // label names pool
+ {0x73, 0x65, 0x6e, 0x73, 0x6f, 0x72}, // "sensor"
+ },
+ [][]byte{ // label values pool
+ {0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72}, // "counter"
+ },
+ []byte{0x0, 0x0, 0x0, 0x0}, // common time
+ []byte{0x0}, // common labels count and indexes
+ [][]byte{
+ {
+ 0xd, // types
+ 0x0, // flags
+ 0x1, // labels index size
+ 0x0, // indexes of name labels
+ 0x0, // indexes of value labels
+
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, //42 // metrics value
+ },
+ },
+ 57,
+ },
+ {
+ "timer",
+ func() *Registry {
+ r := NewRegistry(NewRegistryOpts())
+
+ t := r.Timer("timer")
+ t.RecordDuration(2 * time.Second)
+
+ return r
+ }(),
+ CompressionNone,
+ []byte{
+ 0x53, 0x50, // magic
+ 0x01, 0x01, // version
+ 0x18, 0x00, // header size
+ 0x0, // time precision
+ 0x0, // compression algorithm
+ 0x7, 0x0, 0x0, 0x0, // label names size
+ 0x6, 0x0, 0x0, 0x0, // label values size
+ 0x1, 0x0, 0x0, 0x0, // metric count
+ 0x1, 0x0, 0x0, 0x0, // point count
+ },
+ [][]byte{ // label names pool
+ {0x73, 0x65, 0x6e, 0x73, 0x6f, 0x72}, // "sensor"
+ },
+ [][]byte{ // label values pool
+ {0x74, 0x69, 0x6d, 0x65, 0x72}, // "timer"
+ },
+ []byte{0x0, 0x0, 0x0, 0x0}, // common time
+ []byte{0x0}, // common labels count and indexes
+ [][]byte{
+ {
+ 0x5, // types
+ 0x0, // flags
+ 0x1, // labels index size
+ 0x0, // indexes of name labels
+ 0x0, // indexes of value labels
+
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x40, //2.0 // metrics value
+ },
+ },
+ 55,
+ },
+ {
+ "gauge",
+ func() *Registry {
+ r := NewRegistry(NewRegistryOpts())
+
+ g := r.Gauge("gauge")
+ g.Set(42)
+
+ return r
+ }(),
+ CompressionNone,
+ []byte{
+ 0x53, 0x50, // magic
+ 0x01, 0x01, // version
+ 0x18, 0x00, // header size
+ 0x0, // time precision
+ 0x0, // compression algorithm
+ 0x7, 0x0, 0x0, 0x0, // label names size
+ 0x6, 0x0, 0x0, 0x0, // label values size
+ 0x1, 0x0, 0x0, 0x0, // metric count
+ 0x1, 0x0, 0x0, 0x0, // point count
+ },
+ [][]byte{ // label names pool
+ {0x73, 0x65, 0x6e, 0x73, 0x6f, 0x72}, // "sensor"
+ },
+ [][]byte{ // label values pool
+ {0x67, 0x61, 0x75, 0x67, 0x65}, // "gauge"
+ },
+ []byte{0x0, 0x0, 0x0, 0x0}, // common time
+ []byte{0x0}, // common labels count and indexes
+ [][]byte{
+ {
+ 0x5, // types
+ 0x0, // flags
+ 0x1, // labels index size
+ 0x0, // indexes of name labels
+ 0x0, // indexes of value labels
+
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x45, 0x40, //42 // metrics value
+
+ },
+ },
+ 55,
+ },
+ {
+ "histogram",
+ func() *Registry {
+ r := NewRegistry(NewRegistryOpts())
+
+ _ = r.Histogram("histogram", metrics.NewBuckets(0, 0.1, 0.11))
+
+ return r
+ }(),
+ CompressionNone,
+ []byte{
+ 0x53, 0x50, // magic
+ 0x01, 0x01, // version
+ 0x18, 0x00, // header size
+ 0x0, // time precision
+ 0x0, // compression algorithm
+ 0x7, 0x0, 0x0, 0x0, // label names size
+ 0xa, 0x0, 0x0, 0x0, // label values size
+ 0x1, 0x0, 0x0, 0x0, // metric count
+ 0x1, 0x0, 0x0, 0x0, // point count
+ },
+ [][]byte{ // label names pool
+ {0x73, 0x65, 0x6e, 0x73, 0x6f, 0x72}, // "sensor"
+ },
+ [][]byte{ // label values pool
+ {0x68, 0x69, 0x73, 0x74, 0x6F, 0x67, 0x72, 0x61, 0x6D}, // "histogram"
+ },
+ []byte{0x0, 0x0, 0x0, 0x0}, // common time
+ []byte{0x0}, // common labels count and indexes
+ [][]byte{
+ {
+ /*types*/ 0x15,
+ /*flags*/ 0x0,
+ /*labels*/ 0x1, // ?
+ /*name*/ 0x0,
+ /*value*/ 0x0,
+ /*buckets count*/ 0x3,
+ /*upper bound 0*/ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ /*upper bound 1*/ 0x9a, 0x99, 0x99, 0x99, 0x99, 0x99, 0xb9, 0x3f,
+ /*upper bound 2*/ 0x29, 0x5c, 0x8f, 0xc2, 0xf5, 0x28, 0xbc, 0x3f,
+ /*counter 0*/ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ /*counter 1*/ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ /*counter 2*/ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ },
+ },
+ 100,
+ },
+ {
+ "rate_histogram",
+ func() *Registry {
+ r := NewRegistry(NewRegistryOpts())
+
+ h := r.Histogram("histogram", metrics.NewBuckets(0, 0.1, 0.11))
+ Rated(h)
+
+ return r
+ }(),
+ CompressionNone,
+ []byte{
+ 0x53, 0x50, // magic
+ 0x01, 0x01, // version
+ 0x18, 0x00, // header size
+ 0x0, // time precision
+ 0x0, // compression algorithm
+ 0x7, 0x0, 0x0, 0x0, // label names size
+ 0xa, 0x0, 0x0, 0x0, // label values size
+ 0x1, 0x0, 0x0, 0x0, // metric count
+ 0x1, 0x0, 0x0, 0x0, // point count
+ },
+ [][]byte{ // label names pool
+ {0x73, 0x65, 0x6e, 0x73, 0x6f, 0x72}, // "sensor"
+ },
+ [][]byte{ // label values pool
+ {0x68, 0x69, 0x73, 0x74, 0x6F, 0x67, 0x72, 0x61, 0x6D}, // "histogram"
+ },
+ []byte{0x0, 0x0, 0x0, 0x0}, // common time
+ []byte{0x0}, // common labels count and indexes
+ [][]byte{
+ {
+ /*types*/ 0x19,
+ /*flags*/ 0x0,
+ /*labels*/ 0x1, // ?
+ /*name*/ 0x0,
+ /*value*/ 0x0,
+ /*buckets count*/ 0x3,
+ /*upper bound 0*/ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ /*upper bound 1*/ 0x9a, 0x99, 0x99, 0x99, 0x99, 0x99, 0xb9, 0x3f,
+ /*upper bound 2*/ 0x29, 0x5c, 0x8f, 0xc2, 0xf5, 0x28, 0xbc, 0x3f,
+ /*counter 0*/ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ /*counter 1*/ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ /*counter 2*/ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ },
+ },
+ 100,
+ },
+ {
+ "counter+timer",
+ func() *Registry {
+ r := NewRegistry(NewRegistryOpts())
+
+ cnt := r.Counter("counter")
+ cnt.Add(42)
+
+ t := r.Timer("timer")
+ t.RecordDuration(2 * time.Second)
+
+ return r
+ }(),
+ CompressionNone,
+ []byte{
+ 0x53, 0x50, // magic
+ 0x01, 0x01, // version
+ 0x18, 0x00, // header size
+ 0x0, // time precision
+ 0x0, // compression algorithm
+ 0x7, 0x0, 0x0, 0x0, // label names size
+ 0xe, 0x0, 0x0, 0x0, // label values size
+ 0x2, 0x0, 0x0, 0x0, // metric count
+ 0x2, 0x0, 0x0, 0x0, // point count
+ },
+ [][]byte{ // label names pool
+ {0x73, 0x65, 0x6e, 0x73, 0x6f, 0x72}, // "sensor"
+ },
+ [][]byte{ // label values pool
+ {0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72}, // "counter"
+ {0x74, 0x69, 0x6d, 0x65, 0x72}, // "timer"
+ },
+ []byte{0x0, 0x0, 0x0, 0x0}, // common time
+ []byte{0x0}, // common labels count and indexes
+ [][]byte{
+ {
+ /*types*/ 0x9,
+ /*flags*/ 0x0,
+ /*labels*/ 0x1, // ?
+ /*name*/ 0x0,
+ /*value*/ 0x0,
+ /*metrics value*/ 0x2a, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, //42
+ },
+ {
+ /*types*/ 0x5,
+ /*flags*/ 0x0,
+ /*labels*/ 0x1, // ?
+ /*name*/ 0x0,
+ /*value*/ 0x1,
+ /*metrics value*/ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x40, //2.0
+
+ },
+ },
+ 76,
+ },
+ {
+ "gauge+histogram",
+ func() *Registry {
+ r := NewRegistry(NewRegistryOpts())
+
+ g := r.Gauge("gauge")
+ g.Set(42)
+
+ _ = r.Histogram("histogram", metrics.NewBuckets(0, 0.1, 0.11))
+
+ return r
+ }(),
+ CompressionNone,
+ []byte{
+ 0x53, 0x50, // magic
+ 0x01, 0x01, // version
+ 0x18, 0x00, // header size
+ 0x0, // time precision
+ 0x0, // compression algorithm
+ 0x7, 0x0, 0x0, 0x0, // label names size
+ 0x10, 0x0, 0x0, 0x0, // label values size
+ 0x2, 0x0, 0x0, 0x0, // metric count
+ 0x2, 0x0, 0x0, 0x0, // point count
+ },
+ [][]byte{ // label names pool
+ {0x73, 0x65, 0x6e, 0x73, 0x6f, 0x72}, // "sensor"
+ },
+ [][]byte{ // label values pool
+ {0x67, 0x61, 0x75, 0x67, 0x65}, // "gauge"
+ {0x68, 0x69, 0x73, 0x74, 0x6F, 0x67, 0x72, 0x61, 0x6D}, // "histogram"
+ },
+ []byte{0x0, 0x0, 0x0, 0x0}, // common time
+ []byte{0x0}, // common labels count and indexes
+ [][]byte{
+ {
+
+ /*types*/ 0x5,
+ /*flags*/ 0x0,
+ /*labels*/ 0x1, // ?
+ /*name*/ 0x0,
+ /*value*/ 0x0,
+ /*metrics value*/ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x45, 0x40, //42
+ },
+ {
+ /*types*/ 0x15,
+ /*flags*/ 0x0,
+ /*labels*/ 0x1, // ?
+ /*name*/ 0x0,
+ /*value*/ 0x1,
+ /*buckets count*/ 0x3,
+ /*upper bound 0*/ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ /*upper bound 1*/ 0x9a, 0x99, 0x99, 0x99, 0x99, 0x99, 0xb9, 0x3f,
+ /*upper bound 2*/ 0x29, 0x5c, 0x8f, 0xc2, 0xf5, 0x28, 0xbc, 0x3f,
+ /*counter 0*/ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ /*counter 1*/ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ /*counter 2*/ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ },
+ },
+ 119,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ w := httptest.NewRecorder()
+ ctx := context.Background()
+
+ written, err := tc.registry.StreamSpack(ctx, w, tc.compression)
+
+ assert.NoError(t, err)
+ assert.Equal(t, tc.expectWritten, written)
+ body := w.Body.Bytes()
+ require.True(t, bytes.HasPrefix(body, tc.expectHeader))
+ body = body[len(tc.expectHeader):]
+
+ t.Logf("expectLabelNamesPool: %v", tc.expectLabelNamesPool)
+ labelNamesPoolBytes := body[:len(bytes.Join(tc.expectLabelNamesPool, []byte{0x0}))+1]
+ labelNamesPool := bytes.Split(bytes.Trim(labelNamesPoolBytes, "\x00"), []byte{0x0})
+ require.ElementsMatch(t, tc.expectLabelNamesPool, labelNamesPool)
+ body = body[len(labelNamesPoolBytes):]
+
+ t.Logf("expectValueNamesPool: %v", tc.expectValueNamesPool)
+ valueNamesPoolBytes := body[:len(bytes.Join(tc.expectValueNamesPool, []byte{0x0}))+1]
+ valueNamesPool := bytes.Split(bytes.Trim(valueNamesPoolBytes, "\x00"), []byte{0x0})
+ require.ElementsMatch(t, tc.expectValueNamesPool, valueNamesPool)
+ body = body[len(valueNamesPoolBytes):]
+
+ require.True(t, bytes.HasPrefix(body, tc.expectCommonTime))
+ body = body[len(tc.expectCommonTime):]
+
+ require.True(t, bytes.HasPrefix(body, tc.expectCommonLabels))
+ body = body[len(tc.expectCommonLabels):]
+
+ expectButMissing := [][]byte{}
+ for idx := range tc.expectMetrics {
+ var seen bool
+ var val []byte
+ for _, v := range tc.expectMetrics {
+ val = v[:]
+ fixValueNameIndex(idx, val)
+ if bytes.HasPrefix(body, val) {
+ body = bytes.Replace(body, val, []byte{}, 1)
+ seen = true
+ break
+ }
+ }
+ if !seen {
+ expectButMissing = append(expectButMissing, val)
+ }
+ }
+ assert.Empty(t, body, "unexpected bytes seen")
+ assert.Empty(t, expectButMissing, "missing metrics bytes")
+ })
+ }
+}
+
+func fixValueNameIndex(idx int, metric []byte) {
+ // ASSUMPTION_FOR_TESTS: the size of the index is always equal to one
+ // That is, the number of points in the metric is always one
+ metric[4] = uint8(idx) // fix value name index
+}
+
+func sameMap(t *testing.T, expected, actual map[string]interface{}) bool {
+ if !assert.Len(t, actual, len(expected)) {
+ return false
+ }
+
+ for k := range expected {
+ actualMetric := actual[k]
+ if !assert.NotNil(t, actualMetric, "expected key %q not found", k) {
+ return false
+ }
+
+ if !assert.ElementsMatch(t, expected[k], actualMetric, "%q must have same elements", k) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/library/go/core/metrics/solomon/timer.go b/library/go/core/metrics/solomon/timer.go
new file mode 100644
index 0000000000..d36940a9f7
--- /dev/null
+++ b/library/go/core/metrics/solomon/timer.go
@@ -0,0 +1,91 @@
+package solomon
+
+import (
+ "encoding/json"
+ "time"
+
+ "github.com/ydb-platform/ydb/library/go/core/metrics"
+ "go.uber.org/atomic"
+)
+
+var (
+ _ metrics.Timer = (*Timer)(nil)
+ _ Metric = (*Timer)(nil)
+)
+
+// Timer measures gauge duration.
+type Timer struct {
+ name string
+ metricType metricType
+ tags map[string]string
+ value atomic.Duration
+ timestamp *time.Time
+
+ useNameTag bool
+}
+
+func (t *Timer) RecordDuration(value time.Duration) {
+ t.value.Store(value)
+}
+
+func (t *Timer) Name() string {
+ return t.name
+}
+
+func (t *Timer) getType() metricType {
+ return t.metricType
+}
+
+func (t *Timer) getLabels() map[string]string {
+ return t.tags
+}
+
+func (t *Timer) getValue() interface{} {
+ return t.value.Load().Seconds()
+}
+
+func (t *Timer) getTimestamp() *time.Time {
+ return t.timestamp
+}
+
+func (t *Timer) getNameTag() string {
+ if t.useNameTag {
+ return "name"
+ } else {
+ return "sensor"
+ }
+}
+
+// MarshalJSON implements json.Marshaler.
+func (t *Timer) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string `json:"type"`
+ Labels map[string]string `json:"labels"`
+ Value float64 `json:"value"`
+ Timestamp *int64 `json:"ts,omitempty"`
+ }{
+ Type: t.metricType.String(),
+ Value: t.value.Load().Seconds(),
+ Labels: func() map[string]string {
+ labels := make(map[string]string, len(t.tags)+1)
+ labels[t.getNameTag()] = t.Name()
+ for k, v := range t.tags {
+ labels[k] = v
+ }
+ return labels
+ }(),
+ Timestamp: tsAsRef(t.timestamp),
+ })
+}
+
+// Snapshot returns independent copy on metric.
+func (t *Timer) Snapshot() Metric {
+ return &Timer{
+ name: t.name,
+ metricType: t.metricType,
+ tags: t.tags,
+ value: *atomic.NewDuration(t.value.Load()),
+
+ useNameTag: t.useNameTag,
+ }
+}
diff --git a/library/go/core/metrics/solomon/timer_test.go b/library/go/core/metrics/solomon/timer_test.go
new file mode 100644
index 0000000000..4904815701
--- /dev/null
+++ b/library/go/core/metrics/solomon/timer_test.go
@@ -0,0 +1,56 @@
+package solomon
+
+import (
+ "encoding/json"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "go.uber.org/atomic"
+)
+
+func TestTimer_RecordDuration(t *testing.T) {
+ c := &Timer{
+ name: "mytimer",
+ metricType: typeGauge,
+ tags: map[string]string{"ololo": "trololo"},
+ }
+
+ c.RecordDuration(1 * time.Second)
+ assert.Equal(t, 1*time.Second, c.value.Load())
+
+ c.RecordDuration(42 * time.Millisecond)
+ assert.Equal(t, 42*time.Millisecond, c.value.Load())
+}
+
+func TestTimerRated_MarshalJSON(t *testing.T) {
+ c := &Timer{
+ name: "mytimer",
+ metricType: typeRated,
+ tags: map[string]string{"ololo": "trololo"},
+ value: *atomic.NewDuration(42 * time.Millisecond),
+ }
+
+ b, err := json.Marshal(c)
+ assert.NoError(t, err)
+
+ expected := []byte(`{"type":"RATE","labels":{"ololo":"trololo","sensor":"mytimer"},"value":0.042}`)
+ assert.Equal(t, expected, b)
+}
+
+func TestNameTagTimer_MarshalJSON(t *testing.T) {
+ c := &Timer{
+ name: "mytimer",
+ metricType: typeRated,
+ tags: map[string]string{"ololo": "trololo"},
+ value: *atomic.NewDuration(42 * time.Millisecond),
+
+ useNameTag: true,
+ }
+
+ b, err := json.Marshal(c)
+ assert.NoError(t, err)
+
+ expected := []byte(`{"type":"RATE","labels":{"name":"mytimer","ololo":"trololo"},"value":0.042}`)
+ assert.Equal(t, expected, b)
+}
diff --git a/library/go/core/metrics/solomon/vec.go b/library/go/core/metrics/solomon/vec.go
new file mode 100644
index 0000000000..323919e9f8
--- /dev/null
+++ b/library/go/core/metrics/solomon/vec.go
@@ -0,0 +1,279 @@
+package solomon
+
+import (
+ "sync"
+
+ "github.com/ydb-platform/ydb/library/go/core/metrics"
+ "github.com/ydb-platform/ydb/library/go/core/metrics/internal/pkg/registryutil"
+)
+
+// metricsVector is a base implementation of vector of metrics of any supported type.
+type metricsVector struct {
+ labels []string
+ mtx sync.RWMutex // Protects metrics.
+ metrics map[uint64]Metric
+ rated bool
+ newMetric func(map[string]string) Metric
+ removeMetric func(m Metric)
+}
+
+func (v *metricsVector) with(tags map[string]string) Metric {
+ hv, err := registryutil.VectorHash(tags, v.labels)
+ if err != nil {
+ panic(err)
+ }
+
+ v.mtx.RLock()
+ metric, ok := v.metrics[hv]
+ v.mtx.RUnlock()
+ if ok {
+ return metric
+ }
+
+ v.mtx.Lock()
+ defer v.mtx.Unlock()
+
+ metric, ok = v.metrics[hv]
+ if !ok {
+ metric = v.newMetric(tags)
+ v.metrics[hv] = metric
+ }
+
+ return metric
+}
+
+// reset deletes all metrics in this vector.
+func (v *metricsVector) reset() {
+ v.mtx.Lock()
+ defer v.mtx.Unlock()
+
+ for h, m := range v.metrics {
+ delete(v.metrics, h)
+ v.removeMetric(m)
+ }
+}
+
+var _ metrics.CounterVec = (*CounterVec)(nil)
+
+// CounterVec stores counters and
+// implements metrics.CounterVec interface.
+type CounterVec struct {
+ vec *metricsVector
+}
+
+// CounterVec creates a new counters vector with given metric name and
+// partitioned by the given label names.
+func (r *Registry) CounterVec(name string, labels []string) metrics.CounterVec {
+ var vec *metricsVector
+ vec = &metricsVector{
+ labels: append([]string(nil), labels...),
+ metrics: make(map[uint64]Metric),
+ rated: r.rated,
+ newMetric: func(tags map[string]string) Metric {
+ return r.Rated(vec.rated).
+ WithTags(tags).
+ Counter(name).(*Counter)
+ },
+ removeMetric: func(m Metric) {
+ r.WithTags(m.getLabels()).(*Registry).unregisterMetric(m)
+ },
+ }
+ return &CounterVec{vec: vec}
+}
+
+// With creates new or returns existing counter with given tags from vector.
+// It will panic if tags keys set is not equal to vector labels.
+func (v *CounterVec) With(tags map[string]string) metrics.Counter {
+ return v.vec.with(tags).(*Counter)
+}
+
+// Reset deletes all metrics in this vector.
+func (v *CounterVec) Reset() {
+ v.vec.reset()
+}
+
+var _ metrics.GaugeVec = (*GaugeVec)(nil)
+
+// GaugeVec stores gauges and
+// implements metrics.GaugeVec interface.
+type GaugeVec struct {
+ vec *metricsVector
+}
+
+// GaugeVec creates a new gauges vector with given metric name and
+// partitioned by the given label names.
+func (r *Registry) GaugeVec(name string, labels []string) metrics.GaugeVec {
+ return &GaugeVec{
+ vec: &metricsVector{
+ labels: append([]string(nil), labels...),
+ metrics: make(map[uint64]Metric),
+ newMetric: func(tags map[string]string) Metric {
+ return r.WithTags(tags).Gauge(name).(*Gauge)
+ },
+ removeMetric: func(m Metric) {
+ r.WithTags(m.getLabels()).(*Registry).unregisterMetric(m)
+ },
+ },
+ }
+}
+
+// With creates new or returns existing gauge with given tags from vector.
+// It will panic if tags keys set is not equal to vector labels.
+func (v *GaugeVec) With(tags map[string]string) metrics.Gauge {
+ return v.vec.with(tags).(*Gauge)
+}
+
+// Reset deletes all metrics in this vector.
+func (v *GaugeVec) Reset() {
+ v.vec.reset()
+}
+
+var _ metrics.IntGaugeVec = (*IntGaugeVec)(nil)
+
+// IntGaugeVec stores gauges and
+// implements metrics.IntGaugeVec interface.
+type IntGaugeVec struct {
+ vec *metricsVector
+}
+
+// IntGaugeVec creates a new gauges vector with given metric name and
+// partitioned by the given label names.
+func (r *Registry) IntGaugeVec(name string, labels []string) metrics.IntGaugeVec {
+ return &IntGaugeVec{
+ vec: &metricsVector{
+ labels: append([]string(nil), labels...),
+ metrics: make(map[uint64]Metric),
+ newMetric: func(tags map[string]string) Metric {
+ return r.WithTags(tags).IntGauge(name).(*IntGauge)
+ },
+ removeMetric: func(m Metric) {
+ r.WithTags(m.getLabels()).(*Registry).unregisterMetric(m)
+ },
+ },
+ }
+}
+
+// With creates new or returns existing gauge with given tags from vector.
+// It will panic if tags keys set is not equal to vector labels.
+func (v *IntGaugeVec) With(tags map[string]string) metrics.IntGauge {
+ return v.vec.with(tags).(*IntGauge)
+}
+
+// Reset deletes all metrics in this vector.
+func (v *IntGaugeVec) Reset() {
+ v.vec.reset()
+}
+
+var _ metrics.TimerVec = (*TimerVec)(nil)
+
+// TimerVec stores timers and
+// implements metrics.TimerVec interface.
+type TimerVec struct {
+ vec *metricsVector
+}
+
+// TimerVec creates a new timers vector with given metric name and
+// partitioned by the given label names.
+func (r *Registry) TimerVec(name string, labels []string) metrics.TimerVec {
+ return &TimerVec{
+ vec: &metricsVector{
+ labels: append([]string(nil), labels...),
+ metrics: make(map[uint64]Metric),
+ newMetric: func(tags map[string]string) Metric {
+ return r.WithTags(tags).Timer(name).(*Timer)
+ },
+ removeMetric: func(m Metric) {
+ r.WithTags(m.getLabels()).(*Registry).unregisterMetric(m)
+ },
+ },
+ }
+}
+
+// With creates new or returns existing timer with given tags from vector.
+// It will panic if tags keys set is not equal to vector labels.
+func (v *TimerVec) With(tags map[string]string) metrics.Timer {
+ return v.vec.with(tags).(*Timer)
+}
+
+// Reset deletes all metrics in this vector.
+func (v *TimerVec) Reset() {
+ v.vec.reset()
+}
+
+var _ metrics.HistogramVec = (*HistogramVec)(nil)
+
+// HistogramVec stores histograms and
+// implements metrics.HistogramVec interface.
+type HistogramVec struct {
+ vec *metricsVector
+}
+
+// HistogramVec creates a new histograms vector with given metric name and buckets and
+// partitioned by the given label names.
+func (r *Registry) HistogramVec(name string, buckets metrics.Buckets, labels []string) metrics.HistogramVec {
+ var vec *metricsVector
+ vec = &metricsVector{
+ labels: append([]string(nil), labels...),
+ metrics: make(map[uint64]Metric),
+ rated: r.rated,
+ newMetric: func(tags map[string]string) Metric {
+ return r.Rated(vec.rated).
+ WithTags(tags).
+ Histogram(name, buckets).(*Histogram)
+ },
+ removeMetric: func(m Metric) {
+ r.WithTags(m.getLabels()).(*Registry).unregisterMetric(m)
+ },
+ }
+ return &HistogramVec{vec: vec}
+}
+
+// With creates new or returns existing histogram with given tags from vector.
+// It will panic if tags keys set is not equal to vector labels.
+func (v *HistogramVec) With(tags map[string]string) metrics.Histogram {
+ return v.vec.with(tags).(*Histogram)
+}
+
+// Reset deletes all metrics in this vector.
+func (v *HistogramVec) Reset() {
+ v.vec.reset()
+}
+
+var _ metrics.TimerVec = (*DurationHistogramVec)(nil)
+
+// DurationHistogramVec stores duration histograms and
+// implements metrics.TimerVec interface.
+type DurationHistogramVec struct {
+ vec *metricsVector
+}
+
+// DurationHistogramVec creates a new duration histograms vector with given metric name and buckets and
+// partitioned by the given label names.
+func (r *Registry) DurationHistogramVec(name string, buckets metrics.DurationBuckets, labels []string) metrics.TimerVec {
+ var vec *metricsVector
+ vec = &metricsVector{
+ labels: append([]string(nil), labels...),
+ metrics: make(map[uint64]Metric),
+ rated: r.rated,
+ newMetric: func(tags map[string]string) Metric {
+ return r.Rated(vec.rated).
+ WithTags(tags).
+ DurationHistogram(name, buckets).(*Histogram)
+ },
+ removeMetric: func(m Metric) {
+ r.WithTags(m.getLabels()).(*Registry).unregisterMetric(m)
+ },
+ }
+ return &DurationHistogramVec{vec: vec}
+}
+
+// With creates new or returns existing duration histogram with given tags from vector.
+// It will panic if tags keys set is not equal to vector labels.
+func (v *DurationHistogramVec) With(tags map[string]string) metrics.Timer {
+ return v.vec.with(tags).(*Histogram)
+}
+
+// Reset deletes all metrics in this vector.
+func (v *DurationHistogramVec) Reset() {
+ v.vec.reset()
+}
diff --git a/library/go/core/metrics/solomon/vec_test.go b/library/go/core/metrics/solomon/vec_test.go
new file mode 100644
index 0000000000..cac437f434
--- /dev/null
+++ b/library/go/core/metrics/solomon/vec_test.go
@@ -0,0 +1,339 @@
+package solomon
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "github.com/ydb-platform/ydb/library/go/core/metrics"
+)
+
+func TestVec(t *testing.T) {
+ type args struct {
+ name string
+ labels []string
+ buckets metrics.Buckets
+ dbuckets metrics.DurationBuckets
+ }
+
+ testCases := []struct {
+ name string
+ args args
+ expectedType interface{}
+ expectLabels []string
+ }{
+ {
+ name: "CounterVec",
+ args: args{
+ name: "cntvec",
+ labels: []string{"shimba", "looken"},
+ },
+ expectedType: &CounterVec{},
+ expectLabels: []string{"shimba", "looken"},
+ },
+ {
+ name: "GaugeVec",
+ args: args{
+ name: "ggvec",
+ labels: []string{"shimba", "looken"},
+ },
+ expectedType: &GaugeVec{},
+ expectLabels: []string{"shimba", "looken"},
+ },
+ {
+ name: "TimerVec",
+ args: args{
+ name: "tvec",
+ labels: []string{"shimba", "looken"},
+ },
+ expectedType: &TimerVec{},
+ expectLabels: []string{"shimba", "looken"},
+ },
+ {
+ name: "HistogramVec",
+ args: args{
+ name: "hvec",
+ labels: []string{"shimba", "looken"},
+ buckets: metrics.NewBuckets(1, 2, 3, 4),
+ },
+ expectedType: &HistogramVec{},
+ expectLabels: []string{"shimba", "looken"},
+ },
+ {
+ name: "DurationHistogramVec",
+ args: args{
+ name: "dhvec",
+ labels: []string{"shimba", "looken"},
+ dbuckets: metrics.NewDurationBuckets(1, 2, 3, 4),
+ },
+ expectedType: &DurationHistogramVec{},
+ expectLabels: []string{"shimba", "looken"},
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ rg := NewRegistry(NewRegistryOpts())
+
+ switch vect := tc.expectedType.(type) {
+ case *CounterVec:
+ vec := rg.CounterVec(tc.args.name, tc.args.labels)
+ assert.IsType(t, vect, vec)
+ assert.Equal(t, tc.expectLabels, vec.(*CounterVec).vec.labels)
+ case *GaugeVec:
+ vec := rg.GaugeVec(tc.args.name, tc.args.labels)
+ assert.IsType(t, vect, vec)
+ assert.Equal(t, tc.expectLabels, vec.(*GaugeVec).vec.labels)
+ case *TimerVec:
+ vec := rg.TimerVec(tc.args.name, tc.args.labels)
+ assert.IsType(t, vect, vec)
+ assert.Equal(t, tc.expectLabels, vec.(*TimerVec).vec.labels)
+ case *HistogramVec:
+ vec := rg.HistogramVec(tc.args.name, tc.args.buckets, tc.args.labels)
+ assert.IsType(t, vect, vec)
+ assert.Equal(t, tc.expectLabels, vec.(*HistogramVec).vec.labels)
+ case *DurationHistogramVec:
+ vec := rg.DurationHistogramVec(tc.args.name, tc.args.dbuckets, tc.args.labels)
+ assert.IsType(t, vect, vec)
+ assert.Equal(t, tc.expectLabels, vec.(*DurationHistogramVec).vec.labels)
+ default:
+ t.Errorf("unknown type: %T", vect)
+ }
+ })
+ }
+}
+
+func TestCounterVecWith(t *testing.T) {
+ rg := NewRegistry(NewRegistryOpts())
+
+ t.Run("plain", func(t *testing.T) {
+ vec := rg.CounterVec("ololo", []string{"shimba", "looken"})
+ tags := map[string]string{
+ "shimba": "boomba",
+ "looken": "tooken",
+ }
+ metric := vec.With(tags)
+
+ assert.IsType(t, &CounterVec{}, vec)
+ assert.IsType(t, &Counter{}, metric)
+ assert.Equal(t, typeCounter, metric.(*Counter).metricType)
+
+ assert.NotEmpty(t, vec.(*CounterVec).vec.metrics)
+ vec.Reset()
+ assert.Empty(t, vec.(*CounterVec).vec.metrics)
+ assertMetricRemoved(t, rg.WithTags(tags).(*Registry), metric.(*Counter))
+ })
+
+ t.Run("rated", func(t *testing.T) {
+ vec := rg.CounterVec("ololo", []string{"shimba", "looken"})
+ Rated(vec)
+ tags := map[string]string{
+ "shimba": "boomba",
+ "looken": "tooken",
+ }
+ metric := vec.With(tags)
+
+ assert.IsType(t, &CounterVec{}, vec)
+ assert.IsType(t, &Counter{}, metric)
+ assert.Equal(t, typeRated, metric.(*Counter).metricType)
+
+ assert.NotEmpty(t, vec.(*CounterVec).vec.metrics)
+ vec.Reset()
+ assert.Empty(t, vec.(*CounterVec).vec.metrics)
+ assertMetricRemoved(t, rg.WithTags(tags).(*Registry), metric.(*Counter))
+ })
+}
+
+func TestGaugeVecWith(t *testing.T) {
+ rg := NewRegistry(NewRegistryOpts())
+
+ vec := rg.GaugeVec("ololo", []string{"shimba", "looken"})
+ tags := map[string]string{
+ "shimba": "boomba",
+ "looken": "tooken",
+ }
+ metric := vec.With(tags)
+
+ assert.IsType(t, &GaugeVec{}, vec)
+ assert.IsType(t, &Gauge{}, metric)
+ assert.Equal(t, typeGauge, metric.(*Gauge).metricType)
+
+ assert.NotEmpty(t, vec.(*GaugeVec).vec.metrics)
+ vec.Reset()
+ assert.Empty(t, vec.(*GaugeVec).vec.metrics)
+ assertMetricRemoved(t, rg.WithTags(tags).(*Registry), metric.(*Gauge))
+}
+
+func TestTimerVecWith(t *testing.T) {
+ rg := NewRegistry(NewRegistryOpts())
+ vec := rg.TimerVec("ololo", []string{"shimba", "looken"})
+ tags := map[string]string{
+ "shimba": "boomba",
+ "looken": "tooken",
+ }
+ metric := vec.With(tags)
+
+ assert.IsType(t, &TimerVec{}, vec)
+ assert.IsType(t, &Timer{}, metric)
+ assert.Equal(t, typeGauge, metric.(*Timer).metricType)
+
+ assert.NotEmpty(t, vec.(*TimerVec).vec.metrics)
+ vec.Reset()
+ assert.Empty(t, vec.(*TimerVec).vec.metrics)
+ assertMetricRemoved(t, rg.WithTags(tags).(*Registry), metric.(*Timer))
+}
+
+func TestHistogramVecWith(t *testing.T) {
+ rg := NewRegistry(NewRegistryOpts())
+
+ t.Run("plain", func(t *testing.T) {
+ buckets := metrics.NewBuckets(1, 2, 3)
+ vec := rg.HistogramVec("ololo", buckets, []string{"shimba", "looken"})
+ tags := map[string]string{
+ "shimba": "boomba",
+ "looken": "tooken",
+ }
+ metric := vec.With(tags)
+
+ assert.IsType(t, &HistogramVec{}, vec)
+ assert.IsType(t, &Histogram{}, metric)
+ assert.Equal(t, typeHistogram, metric.(*Histogram).metricType)
+
+ assert.NotEmpty(t, vec.(*HistogramVec).vec.metrics)
+ vec.Reset()
+ assert.Empty(t, vec.(*HistogramVec).vec.metrics)
+ assertMetricRemoved(t, rg.WithTags(tags).(*Registry), metric.(*Histogram))
+ })
+
+ t.Run("rated", func(t *testing.T) {
+ buckets := metrics.NewBuckets(1, 2, 3)
+ vec := rg.HistogramVec("ololo", buckets, []string{"shimba", "looken"})
+ Rated(vec)
+ tags := map[string]string{
+ "shimba": "boomba",
+ "looken": "tooken",
+ }
+ metric := vec.With(tags)
+
+ assert.IsType(t, &HistogramVec{}, vec)
+ assert.IsType(t, &Histogram{}, metric)
+ assert.Equal(t, typeRatedHistogram, metric.(*Histogram).metricType)
+
+ assert.NotEmpty(t, vec.(*HistogramVec).vec.metrics)
+ vec.Reset()
+ assert.Empty(t, vec.(*HistogramVec).vec.metrics)
+ assertMetricRemoved(t, rg.WithTags(tags).(*Registry), metric.(*Histogram))
+ })
+}
+
+func TestDurationHistogramVecWith(t *testing.T) {
+ rg := NewRegistry(NewRegistryOpts())
+
+ t.Run("plain", func(t *testing.T) {
+ buckets := metrics.NewDurationBuckets(1, 2, 3)
+ vec := rg.DurationHistogramVec("ololo", buckets, []string{"shimba", "looken"})
+ tags := map[string]string{
+ "shimba": "boomba",
+ "looken": "tooken",
+ }
+ metric := vec.With(tags)
+
+ assert.IsType(t, &DurationHistogramVec{}, vec)
+ assert.IsType(t, &Histogram{}, metric)
+ assert.Equal(t, typeHistogram, metric.(*Histogram).metricType)
+
+ assert.NotEmpty(t, vec.(*DurationHistogramVec).vec.metrics)
+ vec.Reset()
+ assert.Empty(t, vec.(*DurationHistogramVec).vec.metrics)
+ assertMetricRemoved(t, rg.WithTags(tags).(*Registry), metric.(*Histogram))
+ })
+
+ t.Run("rated", func(t *testing.T) {
+ buckets := metrics.NewDurationBuckets(1, 2, 3)
+ vec := rg.DurationHistogramVec("ololo", buckets, []string{"shimba", "looken"})
+ Rated(vec)
+ tags := map[string]string{
+ "shimba": "boomba",
+ "looken": "tooken",
+ }
+ metric := vec.With(tags)
+
+ assert.IsType(t, &DurationHistogramVec{}, vec)
+ assert.IsType(t, &Histogram{}, metric)
+ assert.Equal(t, typeRatedHistogram, metric.(*Histogram).metricType)
+
+ assert.NotEmpty(t, vec.(*DurationHistogramVec).vec.metrics)
+ vec.Reset()
+ assert.Empty(t, vec.(*DurationHistogramVec).vec.metrics)
+ assertMetricRemoved(t, rg.WithTags(tags).(*Registry), metric.(*Histogram))
+ })
+}
+
+func TestMetricsVectorWith(t *testing.T) {
+ rg := NewRegistry(NewRegistryOpts())
+
+ name := "ololo"
+ tags := map[string]string{
+ "shimba": "boomba",
+ "looken": "tooken",
+ }
+
+ vec := &metricsVector{
+ labels: []string{"shimba", "looken"},
+ metrics: make(map[uint64]Metric),
+ newMetric: func(tags map[string]string) Metric {
+ return rg.WithTags(tags).Counter(name).(*Counter)
+ },
+ removeMetric: func(m Metric) {
+ rg.WithTags(m.getLabels()).(*Registry).unregisterMetric(m)
+ },
+ }
+
+ // check first counter
+ metric := vec.with(tags)
+ require.IsType(t, &Counter{}, metric)
+ cnt := metric.(*Counter)
+ assert.Equal(t, name, cnt.name)
+ assert.Equal(t, tags, cnt.tags)
+
+ // check vector length
+ assert.Equal(t, 1, len(vec.metrics))
+
+ // check same counter returned for same tags set
+ cnt2 := vec.with(tags)
+ assert.Same(t, cnt, cnt2)
+
+ // check vector length
+ assert.Equal(t, 1, len(vec.metrics))
+
+ // return new counter
+ cnt3 := vec.with(map[string]string{
+ "shimba": "boomba",
+ "looken": "cooken",
+ })
+ assert.NotSame(t, cnt, cnt3)
+
+ // check vector length
+ assert.Equal(t, 2, len(vec.metrics))
+
+ // check for panic
+ assert.Panics(t, func() {
+ vec.with(map[string]string{"chicken": "cooken"})
+ })
+ assert.Panics(t, func() {
+ vec.with(map[string]string{"shimba": "boomba", "chicken": "cooken"})
+ })
+
+ // check reset
+ vec.reset()
+ assert.Empty(t, vec.metrics)
+ assertMetricRemoved(t, rg.WithTags(tags).(*Registry), cnt2)
+ assertMetricRemoved(t, rg.WithTags(tags).(*Registry), cnt3)
+}
+
+func assertMetricRemoved(t *testing.T, rg *Registry, m Metric) {
+ t.Helper()
+
+ v, ok := rg.metrics.Load(rg.metricKey(m))
+ assert.False(t, ok, v)
+}
diff --git a/library/go/core/metrics/solomon/ya.make b/library/go/core/metrics/solomon/ya.make
new file mode 100644
index 0000000000..a4de14cadf
--- /dev/null
+++ b/library/go/core/metrics/solomon/ya.make
@@ -0,0 +1,44 @@
+GO_LIBRARY()
+
+SRCS(
+ converter.go
+ counter.go
+ func_counter.go
+ func_gauge.go
+ func_int_gauge.go
+ gauge.go
+ int_gauge.go
+ histogram.go
+ metrics.go
+ metrics_opts.go
+ registry.go
+ registry_opts.go
+ spack.go
+ spack_compression.go
+ stream.go
+ timer.go
+ vec.go
+)
+
+GO_TEST_SRCS(
+ converter_test.go
+ counter_test.go
+ func_counter_test.go
+ func_gauge_test.go
+ func_int_gauge_test.go
+ gauge_test.go
+ int_gauge_test.go
+ histogram_test.go
+ metrics_test.go
+ registry_test.go
+ spack_compression_test.go
+ spack_test.go
+ stream_test.go
+ timer_test.go
+ vec_test.go
+ race_test.go
+)
+
+END()
+
+RECURSE(gotest)
diff --git a/library/go/core/metrics/ya.make b/library/go/core/metrics/ya.make
new file mode 100644
index 0000000000..0a42f422af
--- /dev/null
+++ b/library/go/core/metrics/ya.make
@@ -0,0 +1,20 @@
+GO_LIBRARY()
+
+SRCS(
+ buckets.go
+ metrics.go
+)
+
+GO_TEST_SRCS(buckets_test.go)
+
+END()
+
+RECURSE(
+ collect
+ gotest
+ internal
+ mock
+ nop
+ prometheus
+ solomon
+)
diff --git a/library/go/httputil/headers/accept.go b/library/go/httputil/headers/accept.go
new file mode 100644
index 0000000000..394bed7360
--- /dev/null
+++ b/library/go/httputil/headers/accept.go
@@ -0,0 +1,259 @@
+package headers
+
+import (
+ "fmt"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+const (
+ AcceptKey = "Accept"
+ AcceptEncodingKey = "Accept-Encoding"
+)
+
+type AcceptableEncodings []AcceptableEncoding
+
+type AcceptableEncoding struct {
+ Encoding ContentEncoding
+ Weight float32
+
+ pos int
+}
+
+func (as AcceptableEncodings) IsAcceptable(encoding ContentEncoding) bool {
+ for _, ae := range as {
+ if ae.Encoding == encoding {
+ return ae.Weight != 0
+ }
+ }
+ return false
+}
+
+func (as AcceptableEncodings) String() string {
+ if len(as) == 0 {
+ return ""
+ }
+
+ var b strings.Builder
+ for i, ae := range as {
+ b.WriteString(ae.Encoding.String())
+
+ if ae.Weight > 0.0 && ae.Weight < 1.0 {
+ b.WriteString(";q=" + strconv.FormatFloat(float64(ae.Weight), 'f', 1, 32))
+ }
+
+ if i < len(as)-1 {
+ b.WriteString(", ")
+ }
+ }
+ return b.String()
+}
+
+type AcceptableTypes []AcceptableType
+
+func (as AcceptableTypes) IsAcceptable(contentType ContentType) bool {
+ for _, ae := range as {
+ if ae.Type == contentType {
+ return ae.Weight != 0
+ }
+ }
+ return false
+}
+
+type AcceptableType struct {
+ Type ContentType
+ Weight float32
+ Extension map[string]string
+
+ pos int
+}
+
+func (as AcceptableTypes) String() string {
+ if len(as) == 0 {
+ return ""
+ }
+
+ var b strings.Builder
+ for i, at := range as {
+ b.WriteString(at.Type.String())
+
+ if at.Weight > 0.0 && at.Weight < 1.0 {
+ b.WriteString(";q=" + strconv.FormatFloat(float64(at.Weight), 'f', 1, 32))
+ }
+
+ for k, v := range at.Extension {
+ b.WriteString(";" + k + "=" + v)
+ }
+
+ if i < len(as)-1 {
+ b.WriteString(", ")
+ }
+ }
+ return b.String()
+}
+
+// ParseAccept parses Accept HTTP header.
+// It will sort acceptable types by weight, specificity and position.
+// See: https://tools.ietf.org/html/rfc2616#section-14.1
+func ParseAccept(headerValue string) (AcceptableTypes, error) {
+ if headerValue == "" {
+ return nil, nil
+ }
+
+ parsedValues, err := parseAcceptFamilyHeader(headerValue)
+ if err != nil {
+ return nil, err
+ }
+ ah := make(AcceptableTypes, 0, len(parsedValues))
+ for _, parsedValue := range parsedValues {
+ ah = append(ah, AcceptableType{
+ Type: ContentType(parsedValue.Value),
+ Weight: parsedValue.Weight,
+ Extension: parsedValue.Extension,
+ pos: parsedValue.pos,
+ })
+ }
+
+ sort.Slice(ah, func(i, j int) bool {
+ // sort by weight only
+ if ah[i].Weight != ah[j].Weight {
+ return ah[i].Weight > ah[j].Weight
+ }
+
+ // sort by most specific if types are equal
+ if ah[i].Type == ah[j].Type {
+ return len(ah[i].Extension) > len(ah[j].Extension)
+ }
+
+ // move counterpart up if one of types is ANY
+ if ah[i].Type == ContentTypeAny {
+ return false
+ }
+ if ah[j].Type == ContentTypeAny {
+ return true
+ }
+
+ // i type has j type as prefix
+ if strings.HasSuffix(string(ah[j].Type), "/*") &&
+ strings.HasPrefix(string(ah[i].Type), string(ah[j].Type)[:len(ah[j].Type)-1]) {
+ return true
+ }
+
+ // j type has i type as prefix
+ if strings.HasSuffix(string(ah[i].Type), "/*") &&
+ strings.HasPrefix(string(ah[j].Type), string(ah[i].Type)[:len(ah[i].Type)-1]) {
+ return false
+ }
+
+ // sort by position if nothing else left
+ return ah[i].pos < ah[j].pos
+ })
+
+ return ah, nil
+}
+
+// ParseAcceptEncoding parses Accept-Encoding HTTP header.
+// It will sort acceptable encodings by weight and position.
+// See: https://tools.ietf.org/html/rfc2616#section-14.3
+func ParseAcceptEncoding(headerValue string) (AcceptableEncodings, error) {
+ if headerValue == "" {
+ return nil, nil
+ }
+
+ // e.g. gzip;q=1.0, compress, identity
+ parsedValues, err := parseAcceptFamilyHeader(headerValue)
+ if err != nil {
+ return nil, err
+ }
+ acceptableEncodings := make(AcceptableEncodings, 0, len(parsedValues))
+ for _, parsedValue := range parsedValues {
+ acceptableEncodings = append(acceptableEncodings, AcceptableEncoding{
+ Encoding: ContentEncoding(parsedValue.Value),
+ Weight: parsedValue.Weight,
+ pos: parsedValue.pos,
+ })
+ }
+ sort.Slice(acceptableEncodings, func(i, j int) bool {
+ // sort by weight only
+ if acceptableEncodings[i].Weight != acceptableEncodings[j].Weight {
+ return acceptableEncodings[i].Weight > acceptableEncodings[j].Weight
+ }
+
+ // move counterpart up if one of encodings is ANY
+ if acceptableEncodings[i].Encoding == EncodingAny {
+ return false
+ }
+ if acceptableEncodings[j].Encoding == EncodingAny {
+ return true
+ }
+
+ // sort by position if nothing else left
+ return acceptableEncodings[i].pos < acceptableEncodings[j].pos
+ })
+
+ return acceptableEncodings, nil
+}
+
+type acceptHeaderValue struct {
+ Value string
+ Weight float32
+ Extension map[string]string
+
+ pos int
+}
+
+// parseAcceptFamilyHeader parses family of Accept* HTTP headers
+// See: https://tools.ietf.org/html/rfc2616#section-14.1
+func parseAcceptFamilyHeader(header string) ([]acceptHeaderValue, error) {
+ headerValues := strings.Split(header, ",")
+
+ parsedValues := make([]acceptHeaderValue, 0, len(headerValues))
+ for i, headerValue := range headerValues {
+ valueParams := strings.Split(headerValue, ";")
+
+ parsedValue := acceptHeaderValue{
+ Value: strings.TrimSpace(valueParams[0]),
+ Weight: 1.0,
+ pos: i,
+ }
+
+ // parse quality factor and/or accept extension
+ if len(valueParams) > 1 {
+ for _, rawParam := range valueParams[1:] {
+ rawParam = strings.TrimSpace(rawParam)
+ params := strings.SplitN(rawParam, "=", 2)
+ key := strings.TrimSpace(params[0])
+
+ // quality factor
+ if key == "q" {
+ if len(params) != 2 {
+ return nil, fmt.Errorf("invalid quality factor format: %q", rawParam)
+ }
+
+ w, err := strconv.ParseFloat(params[1], 32)
+ if err != nil {
+ return nil, err
+ }
+ parsedValue.Weight = float32(w)
+
+ continue
+ }
+
+ // extension
+ if parsedValue.Extension == nil {
+ parsedValue.Extension = make(map[string]string)
+ }
+
+ var value string
+ if len(params) == 2 {
+ value = strings.TrimSpace(params[1])
+ }
+ parsedValue.Extension[key] = value
+ }
+ }
+
+ parsedValues = append(parsedValues, parsedValue)
+ }
+ return parsedValues, nil
+}
diff --git a/library/go/httputil/headers/accept_test.go b/library/go/httputil/headers/accept_test.go
new file mode 100644
index 0000000000..09d3da086f
--- /dev/null
+++ b/library/go/httputil/headers/accept_test.go
@@ -0,0 +1,309 @@
+package headers_test
+
+import (
+ "testing"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/google/go-cmp/cmp/cmpopts"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "github.com/ydb-platform/ydb/library/go/httputil/headers"
+)
+
+// examples for tests taken from https://tools.ietf.org/html/rfc2616#section-14.3
+func TestParseAcceptEncoding(t *testing.T) {
+ testCases := []struct {
+ name string
+ input string
+ expected headers.AcceptableEncodings
+ expectedErr error
+ }{
+ {
+ "ietf_example_1",
+ "compress, gzip",
+ headers.AcceptableEncodings{
+ {Encoding: headers.ContentEncoding("compress"), Weight: 1.0},
+ {Encoding: headers.ContentEncoding("gzip"), Weight: 1.0},
+ },
+ nil,
+ },
+ {
+ "ietf_example_2",
+ "",
+ nil,
+ nil,
+ },
+ {
+ "ietf_example_3",
+ "*",
+ headers.AcceptableEncodings{
+ {Encoding: headers.ContentEncoding("*"), Weight: 1.0},
+ },
+ nil,
+ },
+ {
+ "ietf_example_4",
+ "compress;q=0.5, gzip;q=1.0",
+ headers.AcceptableEncodings{
+ {Encoding: headers.ContentEncoding("gzip"), Weight: 1.0},
+ {Encoding: headers.ContentEncoding("compress"), Weight: 0.5},
+ },
+ nil,
+ },
+ {
+ "ietf_example_5",
+ "gzip;q=1.0, identity; q=0.5, *;q=0",
+ headers.AcceptableEncodings{
+ {Encoding: headers.ContentEncoding("gzip"), Weight: 1.0},
+ {Encoding: headers.ContentEncoding("identity"), Weight: 0.5},
+ {Encoding: headers.ContentEncoding("*"), Weight: 0},
+ },
+ nil,
+ },
+ {
+ "solomon_headers",
+ "zstd,lz4,gzip,deflate",
+ headers.AcceptableEncodings{
+ {Encoding: headers.ContentEncoding("zstd"), Weight: 1.0},
+ {Encoding: headers.ContentEncoding("lz4"), Weight: 1.0},
+ {Encoding: headers.ContentEncoding("gzip"), Weight: 1.0},
+ {Encoding: headers.ContentEncoding("deflate"), Weight: 1.0},
+ },
+ nil,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ acceptableEncodings, err := headers.ParseAcceptEncoding(tc.input)
+
+ if tc.expectedErr != nil {
+ assert.EqualError(t, err, tc.expectedErr.Error())
+ } else {
+ assert.NoError(t, err)
+ }
+
+ require.Len(t, acceptableEncodings, len(tc.expected))
+
+ opt := cmpopts.IgnoreUnexported(headers.AcceptableEncoding{})
+ assert.True(t, cmp.Equal(tc.expected, acceptableEncodings, opt), cmp.Diff(tc.expected, acceptableEncodings, opt))
+ })
+ }
+}
+
+func TestParseAccept(t *testing.T) {
+ testCases := []struct {
+ name string
+ input string
+ expected headers.AcceptableTypes
+ expectedErr error
+ }{
+ {
+ "empty_header",
+ "",
+ nil,
+ nil,
+ },
+ {
+ "accept_any",
+ "*/*",
+ headers.AcceptableTypes{
+ {Type: headers.ContentTypeAny, Weight: 1.0},
+ },
+ nil,
+ },
+ {
+ "accept_single",
+ "application/json",
+ headers.AcceptableTypes{
+ {Type: headers.TypeApplicationJSON, Weight: 1.0},
+ },
+ nil,
+ },
+ {
+ "accept_multiple",
+ "application/json, application/protobuf",
+ headers.AcceptableTypes{
+ {Type: headers.TypeApplicationJSON, Weight: 1.0},
+ {Type: headers.TypeApplicationProtobuf, Weight: 1.0},
+ },
+ nil,
+ },
+ {
+ "accept_multiple_weighted",
+ "application/json;q=0.8, application/protobuf",
+ headers.AcceptableTypes{
+ {Type: headers.TypeApplicationProtobuf, Weight: 1.0},
+ {Type: headers.TypeApplicationJSON, Weight: 0.8},
+ },
+ nil,
+ },
+ {
+ "accept_multiple_weighted_unsorted",
+ "text/plain;q=0.5, application/protobuf, application/json;q=0.5",
+ headers.AcceptableTypes{
+ {Type: headers.TypeApplicationProtobuf, Weight: 1.0},
+ {Type: headers.TypeTextPlain, Weight: 0.5},
+ {Type: headers.TypeApplicationJSON, Weight: 0.5},
+ },
+ nil,
+ },
+ {
+ "unknown_type",
+ "custom/type, unknown/my_type;q=0.2",
+ headers.AcceptableTypes{
+ {Type: headers.ContentType("custom/type"), Weight: 1.0},
+ {Type: headers.ContentType("unknown/my_type"), Weight: 0.2},
+ },
+ nil,
+ },
+ {
+ "yabro_19.6.0",
+ "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3",
+ headers.AcceptableTypes{
+ {Type: headers.ContentType("text/html"), Weight: 1.0},
+ {Type: headers.ContentType("application/xhtml+xml"), Weight: 1.0},
+ {Type: headers.ContentType("image/webp"), Weight: 1.0},
+ {Type: headers.ContentType("image/apng"), Weight: 1.0},
+ {Type: headers.ContentType("application/signed-exchange"), Weight: 1.0, Extension: map[string]string{"v": "b3"}},
+ {Type: headers.ContentType("application/xml"), Weight: 0.9},
+ {Type: headers.ContentType("*/*"), Weight: 0.8},
+ },
+ nil,
+ },
+ {
+ "chrome_81.0.4044",
+ "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
+ headers.AcceptableTypes{
+ {Type: headers.ContentType("text/html"), Weight: 1.0},
+ {Type: headers.ContentType("application/xhtml+xml"), Weight: 1.0},
+ {Type: headers.ContentType("image/webp"), Weight: 1.0},
+ {Type: headers.ContentType("image/apng"), Weight: 1.0},
+ {Type: headers.ContentType("application/xml"), Weight: 0.9},
+ {Type: headers.ContentType("application/signed-exchange"), Weight: 0.9, Extension: map[string]string{"v": "b3"}},
+ {Type: headers.ContentType("*/*"), Weight: 0.8},
+ },
+ nil,
+ },
+ {
+ "firefox_77.0b3",
+ "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
+ headers.AcceptableTypes{
+ {Type: headers.ContentType("text/html"), Weight: 1.0},
+ {Type: headers.ContentType("application/xhtml+xml"), Weight: 1.0},
+ {Type: headers.ContentType("image/webp"), Weight: 1.0},
+ {Type: headers.ContentType("application/xml"), Weight: 0.9},
+ {Type: headers.ContentType("*/*"), Weight: 0.8},
+ },
+ nil,
+ },
+ {
+ "sort_by_most_specific",
+ "text/*, text/html, */*, text/html;level=1",
+ headers.AcceptableTypes{
+ {Type: headers.ContentType("text/html"), Weight: 1.0, Extension: map[string]string{"level": "1"}},
+ {Type: headers.ContentType("text/html"), Weight: 1.0},
+ {Type: headers.ContentType("text/*"), Weight: 1.0},
+ {Type: headers.ContentType("*/*"), Weight: 1.0},
+ },
+ nil,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ at, err := headers.ParseAccept(tc.input)
+
+ if tc.expectedErr != nil {
+ assert.EqualError(t, err, tc.expectedErr.Error())
+ } else {
+ assert.NoError(t, err)
+ }
+
+ require.Len(t, at, len(tc.expected))
+
+ opt := cmpopts.IgnoreUnexported(headers.AcceptableType{})
+ assert.True(t, cmp.Equal(tc.expected, at, opt), cmp.Diff(tc.expected, at, opt))
+ })
+ }
+}
+
+func TestAcceptableTypesString(t *testing.T) {
+ testCases := []struct {
+ name string
+ types headers.AcceptableTypes
+ expected string
+ }{
+ {
+ "empty",
+ headers.AcceptableTypes{},
+ "",
+ },
+ {
+ "single",
+ headers.AcceptableTypes{
+ {Type: headers.TypeApplicationJSON},
+ },
+ "application/json",
+ },
+ {
+ "single_weighted",
+ headers.AcceptableTypes{
+ {Type: headers.TypeApplicationJSON, Weight: 0.8},
+ },
+ "application/json;q=0.8",
+ },
+ {
+ "multiple",
+ headers.AcceptableTypes{
+ {Type: headers.TypeApplicationJSON},
+ {Type: headers.TypeApplicationProtobuf},
+ },
+ "application/json, application/protobuf",
+ },
+ {
+ "multiple_weighted",
+ headers.AcceptableTypes{
+ {Type: headers.TypeApplicationProtobuf},
+ {Type: headers.TypeApplicationJSON, Weight: 0.8},
+ },
+ "application/protobuf, application/json;q=0.8",
+ },
+ {
+ "multiple_weighted_with_extension",
+ headers.AcceptableTypes{
+ {Type: headers.TypeApplicationProtobuf},
+ {Type: headers.TypeApplicationJSON, Weight: 0.8},
+ {Type: headers.TypeApplicationXML, Weight: 0.5, Extension: map[string]string{"label": "1"}},
+ },
+ "application/protobuf, application/json;q=0.8, application/xml;q=0.5;label=1",
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ assert.Equal(t, tc.expected, tc.types.String())
+ })
+ }
+}
+
+func BenchmarkParseAccept(b *testing.B) {
+ benchCases := []string{
+ "",
+ "*/*",
+ "application/json",
+ "application/json, application/protobuf",
+ "application/json;q=0.8, application/protobuf",
+ "text/plain;q=0.5, application/protobuf, application/json;q=0.5",
+ "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3",
+ "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
+ "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
+ "text/*, text/html, */*, text/html;level=1",
+ }
+
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, _ = headers.ParseAccept(benchCases[i%len(benchCases)])
+ }
+}
diff --git a/library/go/httputil/headers/authorization.go b/library/go/httputil/headers/authorization.go
new file mode 100644
index 0000000000..145e04f931
--- /dev/null
+++ b/library/go/httputil/headers/authorization.go
@@ -0,0 +1,31 @@
+package headers
+
+import "strings"
+
+const (
+ AuthorizationKey = "Authorization"
+
+ TokenTypeBearer TokenType = "bearer"
+ TokenTypeMAC TokenType = "mac"
+)
+
+type TokenType string
+
+// String implements stringer interface
+func (tt TokenType) String() string {
+ return string(tt)
+}
+
+func AuthorizationTokenType(token string) TokenType {
+ if len(token) > len(TokenTypeBearer) &&
+ strings.ToLower(token[:len(TokenTypeBearer)]) == TokenTypeBearer.String() {
+ return TokenTypeBearer
+ }
+
+ if len(token) > len(TokenTypeMAC) &&
+ strings.ToLower(token[:len(TokenTypeMAC)]) == TokenTypeMAC.String() {
+ return TokenTypeMAC
+ }
+
+ return TokenType("unknown")
+}
diff --git a/library/go/httputil/headers/authorization_test.go b/library/go/httputil/headers/authorization_test.go
new file mode 100644
index 0000000000..4e93aac1cd
--- /dev/null
+++ b/library/go/httputil/headers/authorization_test.go
@@ -0,0 +1,30 @@
+package headers_test
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/ydb-platform/ydb/library/go/httputil/headers"
+)
+
+func TestAuthorizationTokenType(t *testing.T) {
+ testCases := []struct {
+ name string
+ token string
+ expected headers.TokenType
+ }{
+ {"bearer", "bearer ololo.trololo", headers.TokenTypeBearer},
+ {"Bearer", "Bearer ololo.trololo", headers.TokenTypeBearer},
+ {"BEARER", "BEARER ololo.trololo", headers.TokenTypeBearer},
+ {"mac", "mac ololo.trololo", headers.TokenTypeMAC},
+ {"Mac", "Mac ololo.trololo", headers.TokenTypeMAC},
+ {"MAC", "MAC ololo.trololo", headers.TokenTypeMAC},
+ {"unknown", "shimba ololo.trololo", headers.TokenType("unknown")},
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ assert.Equal(t, tc.expected, headers.AuthorizationTokenType(tc.token))
+ })
+ }
+}
diff --git a/library/go/httputil/headers/content.go b/library/go/httputil/headers/content.go
new file mode 100644
index 0000000000..b92e013cc3
--- /dev/null
+++ b/library/go/httputil/headers/content.go
@@ -0,0 +1,57 @@
+package headers
+
+type ContentType string
+
+// String implements stringer interface
+func (ct ContentType) String() string {
+ return string(ct)
+}
+
+type ContentEncoding string
+
+// String implements stringer interface
+func (ce ContentEncoding) String() string {
+ return string(ce)
+}
+
+const (
+ ContentTypeKey = "Content-Type"
+ ContentLength = "Content-Length"
+ ContentEncodingKey = "Content-Encoding"
+
+ ContentTypeAny ContentType = "*/*"
+
+ TypeApplicationJSON ContentType = "application/json"
+ TypeApplicationXML ContentType = "application/xml"
+ TypeApplicationOctetStream ContentType = "application/octet-stream"
+ TypeApplicationProtobuf ContentType = "application/protobuf"
+ TypeApplicationMsgpack ContentType = "application/msgpack"
+ TypeApplicationXSolomonSpack ContentType = "application/x-solomon-spack"
+
+ EncodingAny ContentEncoding = "*"
+ EncodingZSTD ContentEncoding = "zstd"
+ EncodingLZ4 ContentEncoding = "lz4"
+ EncodingGZIP ContentEncoding = "gzip"
+ EncodingDeflate ContentEncoding = "deflate"
+
+ TypeTextPlain ContentType = "text/plain"
+ TypeTextHTML ContentType = "text/html"
+ TypeTextCSV ContentType = "text/csv"
+ TypeTextCmd ContentType = "text/cmd"
+ TypeTextCSS ContentType = "text/css"
+ TypeTextXML ContentType = "text/xml"
+ TypeTextMarkdown ContentType = "text/markdown"
+
+ TypeImageAny ContentType = "image/*"
+ TypeImageJPEG ContentType = "image/jpeg"
+ TypeImageGIF ContentType = "image/gif"
+ TypeImagePNG ContentType = "image/png"
+ TypeImageSVG ContentType = "image/svg+xml"
+ TypeImageTIFF ContentType = "image/tiff"
+ TypeImageWebP ContentType = "image/webp"
+
+ TypeVideoMPEG ContentType = "video/mpeg"
+ TypeVideoMP4 ContentType = "video/mp4"
+ TypeVideoOgg ContentType = "video/ogg"
+ TypeVideoWebM ContentType = "video/webm"
+)
diff --git a/library/go/httputil/headers/content_test.go b/library/go/httputil/headers/content_test.go
new file mode 100644
index 0000000000..36c7b8ea8f
--- /dev/null
+++ b/library/go/httputil/headers/content_test.go
@@ -0,0 +1,41 @@
+package headers_test
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/ydb-platform/ydb/library/go/httputil/headers"
+)
+
+func TestContentTypeConsts(t *testing.T) {
+ assert.Equal(t, headers.ContentTypeKey, "Content-Type")
+
+ assert.Equal(t, headers.ContentTypeAny, headers.ContentType("*/*"))
+
+ assert.Equal(t, headers.TypeApplicationJSON, headers.ContentType("application/json"))
+ assert.Equal(t, headers.TypeApplicationXML, headers.ContentType("application/xml"))
+ assert.Equal(t, headers.TypeApplicationOctetStream, headers.ContentType("application/octet-stream"))
+ assert.Equal(t, headers.TypeApplicationProtobuf, headers.ContentType("application/protobuf"))
+ assert.Equal(t, headers.TypeApplicationMsgpack, headers.ContentType("application/msgpack"))
+
+ assert.Equal(t, headers.TypeTextPlain, headers.ContentType("text/plain"))
+ assert.Equal(t, headers.TypeTextHTML, headers.ContentType("text/html"))
+ assert.Equal(t, headers.TypeTextCSV, headers.ContentType("text/csv"))
+ assert.Equal(t, headers.TypeTextCmd, headers.ContentType("text/cmd"))
+ assert.Equal(t, headers.TypeTextCSS, headers.ContentType("text/css"))
+ assert.Equal(t, headers.TypeTextXML, headers.ContentType("text/xml"))
+ assert.Equal(t, headers.TypeTextMarkdown, headers.ContentType("text/markdown"))
+
+ assert.Equal(t, headers.TypeImageAny, headers.ContentType("image/*"))
+ assert.Equal(t, headers.TypeImageJPEG, headers.ContentType("image/jpeg"))
+ assert.Equal(t, headers.TypeImageGIF, headers.ContentType("image/gif"))
+ assert.Equal(t, headers.TypeImagePNG, headers.ContentType("image/png"))
+ assert.Equal(t, headers.TypeImageSVG, headers.ContentType("image/svg+xml"))
+ assert.Equal(t, headers.TypeImageTIFF, headers.ContentType("image/tiff"))
+ assert.Equal(t, headers.TypeImageWebP, headers.ContentType("image/webp"))
+
+ assert.Equal(t, headers.TypeVideoMPEG, headers.ContentType("video/mpeg"))
+ assert.Equal(t, headers.TypeVideoMP4, headers.ContentType("video/mp4"))
+ assert.Equal(t, headers.TypeVideoOgg, headers.ContentType("video/ogg"))
+ assert.Equal(t, headers.TypeVideoWebM, headers.ContentType("video/webm"))
+}
diff --git a/library/go/httputil/headers/cookie.go b/library/go/httputil/headers/cookie.go
new file mode 100644
index 0000000000..bcc685c474
--- /dev/null
+++ b/library/go/httputil/headers/cookie.go
@@ -0,0 +1,5 @@
+package headers
+
+const (
+ CookieKey = "Cookie"
+)
diff --git a/library/go/httputil/headers/gotest/ya.make b/library/go/httputil/headers/gotest/ya.make
new file mode 100644
index 0000000000..467fc88ca4
--- /dev/null
+++ b/library/go/httputil/headers/gotest/ya.make
@@ -0,0 +1,3 @@
+GO_TEST_FOR(library/go/httputil/headers)
+
+END()
diff --git a/library/go/httputil/headers/tvm.go b/library/go/httputil/headers/tvm.go
new file mode 100644
index 0000000000..1737cc69d7
--- /dev/null
+++ b/library/go/httputil/headers/tvm.go
@@ -0,0 +1,8 @@
+package headers
+
+const (
+ // XYaServiceTicket is http header that should be used for service ticket transfer.
+ XYaServiceTicketKey = "X-Ya-Service-Ticket"
+ // XYaUserTicket is http header that should be used for user ticket transfer.
+ XYaUserTicketKey = "X-Ya-User-Ticket"
+)
diff --git a/library/go/httputil/headers/user_agent.go b/library/go/httputil/headers/user_agent.go
new file mode 100644
index 0000000000..366606a01d
--- /dev/null
+++ b/library/go/httputil/headers/user_agent.go
@@ -0,0 +1,5 @@
+package headers
+
+const (
+ UserAgentKey = "User-Agent"
+)
diff --git a/library/go/httputil/headers/warning.go b/library/go/httputil/headers/warning.go
new file mode 100644
index 0000000000..20df80e664
--- /dev/null
+++ b/library/go/httputil/headers/warning.go
@@ -0,0 +1,167 @@
+package headers
+
+import (
+ "errors"
+ "net/http"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/ydb-platform/ydb/library/go/core/xerrors"
+)
+
+const (
+ WarningKey = "Warning"
+
+ WarningResponseIsStale = 110 // RFC 7234, 5.5.1
+ WarningRevalidationFailed = 111 // RFC 7234, 5.5.2
+ WarningDisconnectedOperation = 112 // RFC 7234, 5.5.3
+ WarningHeuristicExpiration = 113 // RFC 7234, 5.5.4
+ WarningMiscellaneousWarning = 199 // RFC 7234, 5.5.5
+ WarningTransformationApplied = 214 // RFC 7234, 5.5.6
+ WarningMiscellaneousPersistentWarning = 299 // RFC 7234, 5.5.7
+)
+
+var warningStatusText = map[int]string{
+ WarningResponseIsStale: "Response is Stale",
+ WarningRevalidationFailed: "Revalidation Failed",
+ WarningDisconnectedOperation: "Disconnected Operation",
+ WarningHeuristicExpiration: "Heuristic Expiration",
+ WarningMiscellaneousWarning: "Miscellaneous Warning",
+ WarningTransformationApplied: "Transformation Applied",
+ WarningMiscellaneousPersistentWarning: "Miscellaneous Persistent Warning",
+}
+
+// WarningText returns a text for the warning header code. It returns the empty
+// string if the code is unknown.
+func WarningText(warn int) string {
+ return warningStatusText[warn]
+}
+
+// AddWarning adds Warning to http.Header with proper formatting
+// see: https://tools.ietf.org/html/rfc7234#section-5.5
+func AddWarning(h http.Header, warn int, agent, reason string, date time.Time) {
+ values := make([]string, 0, 4)
+ values = append(values, strconv.Itoa(warn))
+
+ if agent != "" {
+ values = append(values, agent)
+ } else {
+ values = append(values, "-")
+ }
+
+ if reason != "" {
+ values = append(values, strconv.Quote(reason))
+ }
+
+ if !date.IsZero() {
+ values = append(values, strconv.Quote(date.Format(time.RFC1123)))
+ }
+
+ h.Add(WarningKey, strings.Join(values, " "))
+}
+
+type WarningHeader struct {
+ Code int
+ Agent string
+ Reason string
+ Date time.Time
+}
+
+// ParseWarnings reads and parses Warning headers from http.Header
+func ParseWarnings(h http.Header) ([]WarningHeader, error) {
+ warnings, ok := h[WarningKey]
+ if !ok {
+ return nil, nil
+ }
+
+ res := make([]WarningHeader, 0, len(warnings))
+ for _, warn := range warnings {
+ wh, err := parseWarning(warn)
+ if err != nil {
+ return nil, xerrors.Errorf("cannot parse '%s' header: %w", warn, err)
+ }
+ res = append(res, wh)
+ }
+
+ return res, nil
+}
+
+func parseWarning(warn string) (WarningHeader, error) {
+ var res WarningHeader
+
+ // parse code
+ {
+ codeSP := strings.Index(warn, " ")
+
+ // fast path - code only warning
+ if codeSP == -1 {
+ code, err := strconv.Atoi(warn)
+ res.Code = code
+ return res, err
+ }
+
+ code, err := strconv.Atoi(warn[:codeSP])
+ if err != nil {
+ return WarningHeader{}, err
+ }
+ res.Code = code
+
+ warn = strings.TrimSpace(warn[codeSP+1:])
+ }
+
+ // parse agent
+ {
+ agentSP := strings.Index(warn, " ")
+
+ // fast path - no data after agent
+ if agentSP == -1 {
+ res.Agent = warn
+ return res, nil
+ }
+
+ res.Agent = warn[:agentSP]
+ warn = strings.TrimSpace(warn[agentSP+1:])
+ }
+
+ // parse reason
+ {
+ if len(warn) == 0 {
+ return res, nil
+ }
+
+ // reason must by quoted, so we search for second quote
+ reasonSP := strings.Index(warn[1:], `"`)
+
+ // fast path - bad reason
+ if reasonSP == -1 {
+ return WarningHeader{}, errors.New("bad reason formatting")
+ }
+
+ res.Reason = warn[1 : reasonSP+1]
+ warn = strings.TrimSpace(warn[reasonSP+2:])
+ }
+
+ // parse date
+ {
+ if len(warn) == 0 {
+ return res, nil
+ }
+
+ // optional date must by quoted, so we search for second quote
+ dateSP := strings.Index(warn[1:], `"`)
+
+ // fast path - bad date
+ if dateSP == -1 {
+ return WarningHeader{}, errors.New("bad date formatting")
+ }
+
+ dt, err := time.Parse(time.RFC1123, warn[1:dateSP+1])
+ if err != nil {
+ return WarningHeader{}, err
+ }
+ res.Date = dt
+ }
+
+ return res, nil
+}
diff --git a/library/go/httputil/headers/warning_test.go b/library/go/httputil/headers/warning_test.go
new file mode 100644
index 0000000000..9decb2f52f
--- /dev/null
+++ b/library/go/httputil/headers/warning_test.go
@@ -0,0 +1,245 @@
+package headers
+
+import (
+ "net/http"
+ "strconv"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestWarningText(t *testing.T) {
+ testCases := []struct {
+ code int
+ expect string
+ }{
+ {WarningResponseIsStale, "Response is Stale"},
+ {WarningRevalidationFailed, "Revalidation Failed"},
+ {WarningDisconnectedOperation, "Disconnected Operation"},
+ {WarningHeuristicExpiration, "Heuristic Expiration"},
+ {WarningMiscellaneousWarning, "Miscellaneous Warning"},
+ {WarningTransformationApplied, "Transformation Applied"},
+ {WarningMiscellaneousPersistentWarning, "Miscellaneous Persistent Warning"},
+ {42, ""},
+ {1489, ""},
+ }
+
+ for _, tc := range testCases {
+ t.Run(strconv.Itoa(tc.code), func(t *testing.T) {
+ assert.Equal(t, tc.expect, WarningText(tc.code))
+ })
+ }
+}
+
+func TestAddWarning(t *testing.T) {
+ type args struct {
+ warn int
+ agent string
+ reason string
+ date time.Time
+ }
+
+ testCases := []struct {
+ name string
+ args args
+ expect http.Header
+ }{
+ {
+ name: "code_only",
+ args: args{warn: WarningResponseIsStale, agent: "", reason: "", date: time.Time{}},
+ expect: http.Header{
+ WarningKey: []string{
+ "110 -",
+ },
+ },
+ },
+ {
+ name: "code_agent",
+ args: args{warn: WarningResponseIsStale, agent: "ololo/trololo", reason: "", date: time.Time{}},
+ expect: http.Header{
+ WarningKey: []string{
+ "110 ololo/trololo",
+ },
+ },
+ },
+ {
+ name: "code_agent_reason",
+ args: args{warn: WarningResponseIsStale, agent: "ololo/trololo", reason: "shimba-boomba", date: time.Time{}},
+ expect: http.Header{
+ WarningKey: []string{
+ `110 ololo/trololo "shimba-boomba"`,
+ },
+ },
+ },
+ {
+ name: "code_agent_reason_date",
+ args: args{
+ warn: WarningResponseIsStale,
+ agent: "ololo/trololo",
+ reason: "shimba-boomba",
+ date: time.Date(2019, time.January, 14, 10, 50, 43, 0, time.UTC),
+ },
+ expect: http.Header{
+ WarningKey: []string{
+ `110 ololo/trololo "shimba-boomba" "Mon, 14 Jan 2019 10:50:43 UTC"`,
+ },
+ },
+ },
+ {
+ name: "code_reason_date",
+ args: args{
+ warn: WarningResponseIsStale,
+ agent: "",
+ reason: "shimba-boomba",
+ date: time.Date(2019, time.January, 14, 10, 50, 43, 0, time.UTC),
+ },
+ expect: http.Header{
+ WarningKey: []string{
+ `110 - "shimba-boomba" "Mon, 14 Jan 2019 10:50:43 UTC"`,
+ },
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ h := http.Header{}
+ AddWarning(h, tc.args.warn, tc.args.agent, tc.args.reason, tc.args.date)
+ assert.Equal(t, tc.expect, h)
+ })
+ }
+}
+
+func TestParseWarnings(t *testing.T) {
+ testCases := []struct {
+ name string
+ h http.Header
+ expect []WarningHeader
+ expectErr bool
+ }{
+ {
+ name: "no_warnings",
+ h: http.Header{},
+ expect: nil,
+ expectErr: false,
+ },
+ {
+ name: "single_code_only",
+ h: http.Header{
+ WarningKey: []string{
+ "110",
+ },
+ },
+ expect: []WarningHeader{
+ {
+ Code: 110,
+ Agent: "",
+ Reason: "",
+ Date: time.Time{},
+ },
+ },
+ },
+ {
+ name: "single_code_and_empty_agent",
+ h: http.Header{
+ WarningKey: []string{
+ "110 -",
+ },
+ },
+ expect: []WarningHeader{
+ {
+ Code: 110,
+ Agent: "-",
+ Reason: "",
+ Date: time.Time{},
+ },
+ },
+ },
+ {
+ name: "single_code_and_agent",
+ h: http.Header{
+ WarningKey: []string{
+ "110 shimba/boomba",
+ },
+ },
+ expect: []WarningHeader{
+ {
+ Code: 110,
+ Agent: "shimba/boomba",
+ Reason: "",
+ Date: time.Time{},
+ },
+ },
+ },
+ {
+ name: "single_code_agent_and_reason",
+ h: http.Header{
+ WarningKey: []string{
+ `110 shimba/boomba "looken tooken"`,
+ },
+ },
+ expect: []WarningHeader{
+ {
+ Code: 110,
+ Agent: "shimba/boomba",
+ Reason: "looken tooken",
+ Date: time.Time{},
+ },
+ },
+ },
+ {
+ name: "single_full",
+ h: http.Header{
+ WarningKey: []string{
+ `110 shimba/boomba "looken tooken" "Mon, 14 Jan 2019 10:50:43 UTC"`,
+ },
+ },
+ expect: []WarningHeader{
+ {
+ Code: 110,
+ Agent: "shimba/boomba",
+ Reason: "looken tooken",
+ Date: time.Date(2019, time.January, 14, 10, 50, 43, 0, time.UTC),
+ },
+ },
+ },
+ {
+ name: "multiple_full",
+ h: http.Header{
+ WarningKey: []string{
+ `110 shimba/boomba "looken tooken" "Mon, 14 Jan 2019 10:50:43 UTC"`,
+ `112 chiken "cooken" "Mon, 15 Jan 2019 10:51:43 UTC"`,
+ },
+ },
+ expect: []WarningHeader{
+ {
+ Code: 110,
+ Agent: "shimba/boomba",
+ Reason: "looken tooken",
+ Date: time.Date(2019, time.January, 14, 10, 50, 43, 0, time.UTC),
+ },
+ {
+ Code: 112,
+ Agent: "chiken",
+ Reason: "cooken",
+ Date: time.Date(2019, time.January, 15, 10, 51, 43, 0, time.UTC),
+ },
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ got, err := ParseWarnings(tc.h)
+
+ if tc.expectErr {
+ assert.Error(t, err)
+ } else {
+ assert.NoError(t, err)
+ }
+
+ assert.Equal(t, tc.expect, got)
+ })
+ }
+}
diff --git a/library/go/httputil/headers/ya.make b/library/go/httputil/headers/ya.make
new file mode 100644
index 0000000000..d249197dc3
--- /dev/null
+++ b/library/go/httputil/headers/ya.make
@@ -0,0 +1,23 @@
+GO_LIBRARY()
+
+SRCS(
+ accept.go
+ authorization.go
+ content.go
+ cookie.go
+ tvm.go
+ user_agent.go
+ warning.go
+)
+
+GO_TEST_SRCS(warning_test.go)
+
+GO_XTEST_SRCS(
+ accept_test.go
+ authorization_test.go
+ content_test.go
+)
+
+END()
+
+RECURSE(gotest)
diff --git a/library/go/ptr/ptr.go b/library/go/ptr/ptr.go
new file mode 100644
index 0000000000..7ebf3dbd72
--- /dev/null
+++ b/library/go/ptr/ptr.go
@@ -0,0 +1,75 @@
+package ptr
+
+import "time"
+
+// Int returns pointer to provided value
+func Int(v int) *int { return &v }
+
+// Int8 returns pointer to provided value
+func Int8(v int8) *int8 { return &v }
+
+// Int16 returns pointer to provided value
+func Int16(v int16) *int16 { return &v }
+
+// Int32 returns pointer to provided value
+func Int32(v int32) *int32 { return &v }
+
+// Int64 returns pointer to provided value
+func Int64(v int64) *int64 { return &v }
+
+// Uint returns pointer to provided value
+func Uint(v uint) *uint { return &v }
+
+// Uint8 returns pointer to provided value
+func Uint8(v uint8) *uint8 { return &v }
+
+// Uint16 returns pointer to provided value
+func Uint16(v uint16) *uint16 { return &v }
+
+// Uint32 returns pointer to provided value
+func Uint32(v uint32) *uint32 { return &v }
+
+// Uint64 returns pointer to provided value
+func Uint64(v uint64) *uint64 { return &v }
+
+// Float32 returns pointer to provided value
+func Float32(v float32) *float32 { return &v }
+
+// Float64 returns pointer to provided value
+func Float64(v float64) *float64 { return &v }
+
+// Bool returns pointer to provided value
+func Bool(v bool) *bool { return &v }
+
+// String returns pointer to provided value
+func String(v string) *string { return &v }
+
+// Byte returns pointer to provided value
+func Byte(v byte) *byte { return &v }
+
+// Rune returns pointer to provided value
+func Rune(v rune) *rune { return &v }
+
+// Complex64 returns pointer to provided value
+func Complex64(v complex64) *complex64 { return &v }
+
+// Complex128 returns pointer to provided value
+func Complex128(v complex128) *complex128 { return &v }
+
+// Time returns pointer to provided value
+func Time(v time.Time) *time.Time { return &v }
+
+// Duration returns pointer to provided value
+func Duration(v time.Duration) *time.Duration { return &v }
+
+// T returns pointer to provided value
+func T[T any](v T) *T { return &v }
+
+// From returns value from pointer
+func From[T any](v *T) T {
+ if v == nil {
+ return *new(T)
+ }
+
+ return *v
+}
diff --git a/library/go/ptr/ya.make b/library/go/ptr/ya.make
new file mode 100644
index 0000000000..17cf07a3c6
--- /dev/null
+++ b/library/go/ptr/ya.make
@@ -0,0 +1,5 @@
+GO_LIBRARY()
+
+SRCS(ptr.go)
+
+END()
diff --git a/library/go/test/assertpb/assert.go b/library/go/test/assertpb/assert.go
new file mode 100644
index 0000000000..f5420748d2
--- /dev/null
+++ b/library/go/test/assertpb/assert.go
@@ -0,0 +1,35 @@
+package assertpb
+
+import (
+ "fmt"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/google/go-cmp/cmp"
+ "github.com/stretchr/testify/assert"
+)
+
+type TestingT interface {
+ Errorf(format string, args ...interface{})
+ FailNow()
+ Helper()
+}
+
+func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+ t.Helper()
+
+ if cmp.Equal(expected, actual, cmp.Comparer(proto.Equal)) {
+ return true
+ }
+
+ diff := cmp.Diff(expected, actual, cmp.Comparer(proto.Equal))
+ return assert.Fail(t, fmt.Sprintf("Not equal: \n"+
+ "expected: %s\n"+
+ "actual : %s\n"+
+ "diff : %s", expected, actual, diff), msgAndArgs)
+}
+
+func Equalf(t TestingT, expected, actual interface{}, msg string, args ...interface{}) bool {
+ t.Helper()
+
+ return Equal(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
diff --git a/library/go/test/assertpb/ya.make b/library/go/test/assertpb/ya.make
new file mode 100644
index 0000000000..109571f55a
--- /dev/null
+++ b/library/go/test/assertpb/ya.make
@@ -0,0 +1,9 @@
+GO_LIBRARY()
+
+SRCS(assert.go)
+
+GO_TEST_SRCS(assert_test.go)
+
+END()
+
+RECURSE(gotest)
diff --git a/library/go/x/xsync/singleinflight.go b/library/go/x/xsync/singleinflight.go
new file mode 100644
index 0000000000..3beee1ea67
--- /dev/null
+++ b/library/go/x/xsync/singleinflight.go
@@ -0,0 +1,36 @@
+package xsync
+
+import (
+ "sync"
+ "sync/atomic"
+)
+
+// SingleInflight allows only one execution of function at time.
+// For more exhaustive functionality see https://pkg.go.dev/golang.org/x/sync/singleflight.
+type SingleInflight struct {
+ updatingOnce atomic.Value
+}
+
+// NewSingleInflight creates new SingleInflight.
+func NewSingleInflight() SingleInflight {
+ var v atomic.Value
+ v.Store(new(sync.Once))
+ return SingleInflight{updatingOnce: v}
+}
+
+// Do executes the given function, making sure that only one execution is in-flight.
+// If another caller comes in, it waits for the original to complete.
+func (i *SingleInflight) Do(f func()) {
+ i.getOnce().Do(func() {
+ f()
+ i.setOnce()
+ })
+}
+
+func (i *SingleInflight) getOnce() *sync.Once {
+ return i.updatingOnce.Load().(*sync.Once)
+}
+
+func (i *SingleInflight) setOnce() {
+ i.updatingOnce.Store(new(sync.Once))
+}
diff --git a/library/go/x/xsync/ya.make b/library/go/x/xsync/ya.make
new file mode 100644
index 0000000000..cffc9f89b8
--- /dev/null
+++ b/library/go/x/xsync/ya.make
@@ -0,0 +1,9 @@
+GO_LIBRARY()
+
+SRCS(singleinflight.go)
+
+GO_TEST_SRCS(singleinflight_test.go)
+
+END()
+
+RECURSE(gotest)
diff --git a/ydb/library/yql/providers/generic/connector/app/config/server.pb.go b/ydb/library/yql/providers/generic/connector/app/config/server.pb.go
index b43cc8dc08..2e59cf6607 100644
--- a/ydb/library/yql/providers/generic/connector/app/config/server.pb.go
+++ b/ydb/library/yql/providers/generic/connector/app/config/server.pb.go
@@ -102,6 +102,8 @@ type TServerConfig struct {
// Go runtime profiler.
// Disabled if this part of config is empty.
PprofServer *TPprofServerConfig `protobuf:"bytes,6,opt,name=pprof_server,json=pprofServer,proto3" json:"pprof_server,omitempty"`
+ // Metrics server config
+ MetricsServer *TMetricsServerConfig `protobuf:"bytes,7,opt,name=metrics_server,json=metricsServer,proto3" json:"metrics_server,omitempty"`
}
func (x *TServerConfig) Reset() {
@@ -180,6 +182,13 @@ func (x *TServerConfig) GetPprofServer() *TPprofServerConfig {
return nil
}
+func (x *TServerConfig) GetMetricsServer() *TMetricsServerConfig {
+ if x != nil {
+ return x.MetricsServer
+ }
+ return nil
+}
+
// TConnectorServerConfig - configuration of the main GRPC server
type TConnectorServerConfig struct {
state protoimpl.MessageState
@@ -462,6 +471,65 @@ func (x *TPprofServerConfig) GetTls() *TServerTLSConfig {
return nil
}
+// TMetricsConfig - configuration of the metrics service
+type TMetricsServerConfig struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Network address server will be listening on
+ Endpoint *common.TEndpoint `protobuf:"bytes,1,opt,name=endpoint,proto3" json:"endpoint,omitempty"`
+ // TLS settings.
+ // Leave it empty for insecure connections.
+ Tls *TServerTLSConfig `protobuf:"bytes,2,opt,name=tls,proto3" json:"tls,omitempty"`
+}
+
+func (x *TMetricsServerConfig) Reset() {
+ *x = TMetricsServerConfig{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_ydb_library_yql_providers_generic_connector_app_config_server_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TMetricsServerConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TMetricsServerConfig) ProtoMessage() {}
+
+func (x *TMetricsServerConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_ydb_library_yql_providers_generic_connector_app_config_server_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TMetricsServerConfig.ProtoReflect.Descriptor instead.
+func (*TMetricsServerConfig) Descriptor() ([]byte, []int) {
+ return file_ydb_library_yql_providers_generic_connector_app_config_server_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *TMetricsServerConfig) GetEndpoint() *common.TEndpoint {
+ if x != nil {
+ return x.Endpoint
+ }
+ return nil
+}
+
+func (x *TMetricsServerConfig) GetTls() *TServerTLSConfig {
+ if x != nil {
+ return x.Tls
+ }
+ return nil
+}
+
var File_ydb_library_yql_providers_generic_connector_app_config_server_proto protoreflect.FileDescriptor
var file_ydb_library_yql_providers_generic_connector_app_config_server_proto_rawDesc = []byte{
@@ -475,7 +543,7 @@ var file_ydb_library_yql_providers_generic_connector_app_config_server_proto_raw
0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x67, 0x65, 0x6e, 0x65,
0x72, 0x69, 0x63, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x61, 0x70,
0x69, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e,
- 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd1, 0x03, 0x0a, 0x0d, 0x54, 0x53, 0x65, 0x72,
+ 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa9, 0x04, 0x0a, 0x0d, 0x54, 0x53, 0x65, 0x72,
0x76, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3f, 0x0a, 0x08, 0x65, 0x6e, 0x64,
0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x4e, 0x59,
0x71, 0x6c, 0x2e, 0x4e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4e, 0x41,
@@ -504,51 +572,66 @@ var file_ydb_library_yql_providers_generic_connector_app_config_server_proto_raw
0x2d, 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72,
0x2e, 0x41, 0x70, 0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x50, 0x70, 0x72,
0x6f, 0x66, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b,
- 0x70, 0x70, 0x72, 0x6f, 0x66, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x22, 0x94, 0x01, 0x0a, 0x16,
- 0x54, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72,
- 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3b, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69,
- 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e,
- 0x4e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4e, 0x41, 0x70, 0x69, 0x2e,
- 0x54, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x08, 0x65, 0x6e, 0x64, 0x70, 0x6f,
- 0x69, 0x6e, 0x74, 0x12, 0x3d, 0x0a, 0x03, 0x74, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x2b, 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f,
- 0x72, 0x2e, 0x41, 0x70, 0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x53, 0x65,
- 0x72, 0x76, 0x65, 0x72, 0x54, 0x4c, 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x03, 0x74,
- 0x6c, 0x73, 0x22, 0x3e, 0x0a, 0x10, 0x54, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x4c, 0x53,
- 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x72, 0x74,
- 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x72, 0x74, 0x4a, 0x04, 0x08, 0x01,
- 0x10, 0x02, 0x22, 0x26, 0x0a, 0x10, 0x54, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x61,
- 0x64, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x22, 0x8b, 0x01, 0x0a, 0x0d, 0x54,
- 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x41, 0x0a, 0x09,
- 0x6c, 0x6f, 0x67, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32,
- 0x24, 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72,
- 0x2e, 0x41, 0x70, 0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x45, 0x4c, 0x6f, 0x67,
- 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x08, 0x6c, 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12,
- 0x37, 0x0a, 0x18, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x71, 0x6c, 0x5f, 0x71, 0x75,
- 0x65, 0x72, 0x79, 0x5f, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x08, 0x52, 0x15, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x71, 0x6c, 0x51, 0x75, 0x65, 0x72,
- 0x79, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x22, 0x90, 0x01, 0x0a, 0x12, 0x54, 0x50, 0x70,
- 0x72, 0x6f, 0x66, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12,
- 0x3b, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x1f, 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e, 0x4e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63,
- 0x74, 0x6f, 0x72, 0x2e, 0x4e, 0x41, 0x70, 0x69, 0x2e, 0x54, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69,
- 0x6e, 0x74, 0x52, 0x08, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x3d, 0x0a, 0x03,
- 0x74, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x4e, 0x59, 0x71, 0x6c,
- 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x41, 0x70, 0x70, 0x2e, 0x43,
- 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x4c, 0x53,
- 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x03, 0x74, 0x6c, 0x73, 0x2a, 0x4b, 0x0a, 0x09, 0x45,
- 0x4c, 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x09, 0x0a, 0x05, 0x54, 0x52, 0x41, 0x43,
- 0x45, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x44, 0x45, 0x42, 0x55, 0x47, 0x10, 0x01, 0x12, 0x08,
- 0x0a, 0x04, 0x49, 0x4e, 0x46, 0x4f, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x57, 0x41, 0x52, 0x4e,
- 0x10, 0x03, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x04, 0x12, 0x09, 0x0a,
- 0x05, 0x46, 0x41, 0x54, 0x41, 0x4c, 0x10, 0x05, 0x42, 0x49, 0x5a, 0x47, 0x61, 0x2e, 0x79, 0x61,
- 0x6e, 0x64, 0x65, 0x78, 0x2d, 0x74, 0x65, 0x61, 0x6d, 0x2e, 0x72, 0x75, 0x2f, 0x79, 0x64, 0x62,
- 0x2f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x2f, 0x79, 0x71, 0x6c, 0x2f, 0x70, 0x72, 0x6f,
- 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x2f, 0x63,
- 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x61, 0x70, 0x70, 0x2f, 0x63, 0x6f, 0x6e,
- 0x66, 0x69, 0x67, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x70, 0x70, 0x72, 0x6f, 0x66, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x56, 0x0a, 0x0e, 0x6d,
+ 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x18, 0x07, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65,
+ 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x41, 0x70, 0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e,
+ 0x54, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0d, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x53, 0x65, 0x72,
+ 0x76, 0x65, 0x72, 0x22, 0x94, 0x01, 0x0a, 0x16, 0x54, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74,
+ 0x6f, 0x72, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3b,
+ 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1f, 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e, 0x4e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74,
+ 0x6f, 0x72, 0x2e, 0x4e, 0x41, 0x70, 0x69, 0x2e, 0x54, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e,
+ 0x74, 0x52, 0x08, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x3d, 0x0a, 0x03, 0x74,
+ 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e,
+ 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x41, 0x70, 0x70, 0x2e, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x4c, 0x53, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x03, 0x74, 0x6c, 0x73, 0x22, 0x3e, 0x0a, 0x10, 0x54, 0x53,
+ 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x4c, 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x10,
+ 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79,
+ 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
+ 0x63, 0x65, 0x72, 0x74, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0x26, 0x0a, 0x10, 0x54, 0x53,
+ 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x61, 0x64, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x12,
+ 0x0a, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x72, 0x6f,
+ 0x77, 0x73, 0x22, 0x8b, 0x01, 0x0a, 0x0d, 0x54, 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x12, 0x41, 0x0a, 0x09, 0x6c, 0x6f, 0x67, 0x5f, 0x6c, 0x65, 0x76, 0x65,
+ 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e, 0x43,
+ 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x41, 0x70, 0x70, 0x2e, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x2e, 0x45, 0x4c, 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x08, 0x6c,
+ 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a, 0x18, 0x65, 0x6e, 0x61, 0x62, 0x6c,
+ 0x65, 0x5f, 0x73, 0x71, 0x6c, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x6c, 0x6f, 0x67, 0x67,
+ 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x65, 0x6e, 0x61, 0x62, 0x6c,
+ 0x65, 0x53, 0x71, 0x6c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67,
+ 0x22, 0x90, 0x01, 0x0a, 0x12, 0x54, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x53, 0x65, 0x72, 0x76, 0x65,
+ 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3b, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x70, 0x6f,
+ 0x69, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x4e, 0x59, 0x71, 0x6c,
+ 0x2e, 0x4e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4e, 0x41, 0x70, 0x69,
+ 0x2e, 0x54, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x08, 0x65, 0x6e, 0x64, 0x70,
+ 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x3d, 0x0a, 0x03, 0x74, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x2b, 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74,
+ 0x6f, 0x72, 0x2e, 0x41, 0x70, 0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x53,
+ 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x4c, 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x03,
+ 0x74, 0x6c, 0x73, 0x22, 0x92, 0x01, 0x0a, 0x14, 0x54, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73,
+ 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3b, 0x0a, 0x08,
+ 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f,
+ 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e, 0x4e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72,
+ 0x2e, 0x4e, 0x41, 0x70, 0x69, 0x2e, 0x54, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52,
+ 0x08, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x3d, 0x0a, 0x03, 0x74, 0x6c, 0x73,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x4e, 0x59, 0x71, 0x6c, 0x2e, 0x43, 0x6f,
+ 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x41, 0x70, 0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x2e, 0x54, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x4c, 0x53, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x52, 0x03, 0x74, 0x6c, 0x73, 0x2a, 0x4b, 0x0a, 0x09, 0x45, 0x4c, 0x6f, 0x67,
+ 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x09, 0x0a, 0x05, 0x54, 0x52, 0x41, 0x43, 0x45, 0x10, 0x00,
+ 0x12, 0x09, 0x0a, 0x05, 0x44, 0x45, 0x42, 0x55, 0x47, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x49,
+ 0x4e, 0x46, 0x4f, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x57, 0x41, 0x52, 0x4e, 0x10, 0x03, 0x12,
+ 0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x04, 0x12, 0x09, 0x0a, 0x05, 0x46, 0x41,
+ 0x54, 0x41, 0x4c, 0x10, 0x05, 0x42, 0x49, 0x5a, 0x47, 0x61, 0x2e, 0x79, 0x61, 0x6e, 0x64, 0x65,
+ 0x78, 0x2d, 0x74, 0x65, 0x61, 0x6d, 0x2e, 0x72, 0x75, 0x2f, 0x79, 0x64, 0x62, 0x2f, 0x6c, 0x69,
+ 0x62, 0x72, 0x61, 0x72, 0x79, 0x2f, 0x79, 0x71, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64,
+ 0x65, 0x72, 0x73, 0x2f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x2f, 0x63, 0x6f, 0x6e, 0x6e,
+ 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x61, 0x70, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -564,7 +647,7 @@ func file_ydb_library_yql_providers_generic_connector_app_config_server_proto_ra
}
var file_ydb_library_yql_providers_generic_connector_app_config_server_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
-var file_ydb_library_yql_providers_generic_connector_app_config_server_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
+var file_ydb_library_yql_providers_generic_connector_app_config_server_proto_msgTypes = make([]protoimpl.MessageInfo, 7)
var file_ydb_library_yql_providers_generic_connector_app_config_server_proto_goTypes = []interface{}{
(ELogLevel)(0), // 0: NYql.Connector.App.Config.ELogLevel
(*TServerConfig)(nil), // 1: NYql.Connector.App.Config.TServerConfig
@@ -573,25 +656,29 @@ var file_ydb_library_yql_providers_generic_connector_app_config_server_proto_goT
(*TServerReadLimit)(nil), // 4: NYql.Connector.App.Config.TServerReadLimit
(*TLoggerConfig)(nil), // 5: NYql.Connector.App.Config.TLoggerConfig
(*TPprofServerConfig)(nil), // 6: NYql.Connector.App.Config.TPprofServerConfig
- (*common.TEndpoint)(nil), // 7: NYql.NConnector.NApi.TEndpoint
+ (*TMetricsServerConfig)(nil), // 7: NYql.Connector.App.Config.TMetricsServerConfig
+ (*common.TEndpoint)(nil), // 8: NYql.NConnector.NApi.TEndpoint
}
var file_ydb_library_yql_providers_generic_connector_app_config_server_proto_depIdxs = []int32{
- 7, // 0: NYql.Connector.App.Config.TServerConfig.endpoint:type_name -> NYql.NConnector.NApi.TEndpoint
+ 8, // 0: NYql.Connector.App.Config.TServerConfig.endpoint:type_name -> NYql.NConnector.NApi.TEndpoint
3, // 1: NYql.Connector.App.Config.TServerConfig.tls:type_name -> NYql.Connector.App.Config.TServerTLSConfig
2, // 2: NYql.Connector.App.Config.TServerConfig.connector_server:type_name -> NYql.Connector.App.Config.TConnectorServerConfig
4, // 3: NYql.Connector.App.Config.TServerConfig.read_limit:type_name -> NYql.Connector.App.Config.TServerReadLimit
5, // 4: NYql.Connector.App.Config.TServerConfig.logger:type_name -> NYql.Connector.App.Config.TLoggerConfig
6, // 5: NYql.Connector.App.Config.TServerConfig.pprof_server:type_name -> NYql.Connector.App.Config.TPprofServerConfig
- 7, // 6: NYql.Connector.App.Config.TConnectorServerConfig.endpoint:type_name -> NYql.NConnector.NApi.TEndpoint
- 3, // 7: NYql.Connector.App.Config.TConnectorServerConfig.tls:type_name -> NYql.Connector.App.Config.TServerTLSConfig
- 0, // 8: NYql.Connector.App.Config.TLoggerConfig.log_level:type_name -> NYql.Connector.App.Config.ELogLevel
- 7, // 9: NYql.Connector.App.Config.TPprofServerConfig.endpoint:type_name -> NYql.NConnector.NApi.TEndpoint
- 3, // 10: NYql.Connector.App.Config.TPprofServerConfig.tls:type_name -> NYql.Connector.App.Config.TServerTLSConfig
- 11, // [11:11] is the sub-list for method output_type
- 11, // [11:11] is the sub-list for method input_type
- 11, // [11:11] is the sub-list for extension type_name
- 11, // [11:11] is the sub-list for extension extendee
- 0, // [0:11] is the sub-list for field type_name
+ 7, // 6: NYql.Connector.App.Config.TServerConfig.metrics_server:type_name -> NYql.Connector.App.Config.TMetricsServerConfig
+ 8, // 7: NYql.Connector.App.Config.TConnectorServerConfig.endpoint:type_name -> NYql.NConnector.NApi.TEndpoint
+ 3, // 8: NYql.Connector.App.Config.TConnectorServerConfig.tls:type_name -> NYql.Connector.App.Config.TServerTLSConfig
+ 0, // 9: NYql.Connector.App.Config.TLoggerConfig.log_level:type_name -> NYql.Connector.App.Config.ELogLevel
+ 8, // 10: NYql.Connector.App.Config.TPprofServerConfig.endpoint:type_name -> NYql.NConnector.NApi.TEndpoint
+ 3, // 11: NYql.Connector.App.Config.TPprofServerConfig.tls:type_name -> NYql.Connector.App.Config.TServerTLSConfig
+ 8, // 12: NYql.Connector.App.Config.TMetricsServerConfig.endpoint:type_name -> NYql.NConnector.NApi.TEndpoint
+ 3, // 13: NYql.Connector.App.Config.TMetricsServerConfig.tls:type_name -> NYql.Connector.App.Config.TServerTLSConfig
+ 14, // [14:14] is the sub-list for method output_type
+ 14, // [14:14] is the sub-list for method input_type
+ 14, // [14:14] is the sub-list for extension type_name
+ 14, // [14:14] is the sub-list for extension extendee
+ 0, // [0:14] is the sub-list for field type_name
}
func init() { file_ydb_library_yql_providers_generic_connector_app_config_server_proto_init() }
@@ -672,6 +759,18 @@ func file_ydb_library_yql_providers_generic_connector_app_config_server_proto_in
return nil
}
}
+ file_ydb_library_yql_providers_generic_connector_app_config_server_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TMetricsServerConfig); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
}
type x struct{}
out := protoimpl.TypeBuilder{
@@ -679,7 +778,7 @@ func file_ydb_library_yql_providers_generic_connector_app_config_server_proto_in
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_ydb_library_yql_providers_generic_connector_app_config_server_proto_rawDesc,
NumEnums: 1,
- NumMessages: 6,
+ NumMessages: 7,
NumExtensions: 0,
NumServices: 0,
},
diff --git a/ydb/library/yql/providers/generic/connector/app/config/server.proto b/ydb/library/yql/providers/generic/connector/app/config/server.proto
index cedbc2a893..89eaa289d4 100644
--- a/ydb/library/yql/providers/generic/connector/app/config/server.proto
+++ b/ydb/library/yql/providers/generic/connector/app/config/server.proto
@@ -21,6 +21,8 @@ message TServerConfig {
// Go runtime profiler.
// Disabled if this part of config is empty.
TPprofServerConfig pprof_server = 6;
+ // Metrics server config
+ TMetricsServerConfig metrics_server = 7;
}
// TConnectorServerConfig - configuration of the main GRPC server
@@ -73,3 +75,12 @@ message TPprofServerConfig {
// Leave it empty for insecure connections.
TServerTLSConfig tls = 2;
}
+
+// TMetricsConfig - configuration of the metrics service
+message TMetricsServerConfig {
+ // Network address server will be listening on
+ NYql.NConnector.NApi.TEndpoint endpoint = 1;
+ // TLS settings.
+ // Leave it empty for insecure connections.
+ TServerTLSConfig tls = 2;
+}
diff --git a/ydb/library/yql/providers/generic/connector/app/server/grpc_metrics.go b/ydb/library/yql/providers/generic/connector/app/server/grpc_metrics.go
new file mode 100644
index 0000000000..6e1ff6bada
--- /dev/null
+++ b/ydb/library/yql/providers/generic/connector/app/server/grpc_metrics.go
@@ -0,0 +1,162 @@
+package server
+
+import (
+ "context"
+ "time"
+
+ "github.com/ydb-platform/ydb/library/go/core/metrics"
+ "github.com/ydb-platform/ydb/library/go/core/metrics/solomon"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/status"
+)
+
+func UnaryServerMetrics(registry metrics.Registry) grpc.UnaryServerInterceptor {
+ requestCount := registry.CounterVec("requests_total", []string{"protocol", "endpoint"})
+ requestDuration := registry.DurationHistogramVec("request_duration_seconds", metrics.MakeExponentialDurationBuckets(250*time.Microsecond, 1.5, 35), []string{"protocol", "endpoint"})
+ panicsCount := registry.CounterVec("panics_total", []string{"protocol", "endpoint"})
+ inflightRequests := registry.GaugeVec("inflight_requests", []string{"protocol", "endpoint"})
+ responseCountCount := registry.CounterVec("status_total", []string{"protocol", "endpoint", "status"})
+
+ solomon.Rated(requestCount)
+ solomon.Rated(requestDuration)
+ solomon.Rated(panicsCount)
+ solomon.Rated(responseCountCount)
+
+ return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (_ any, err error) {
+ deferFunc := func(startTime time.Time, opName string) {
+ requestDuration.With(map[string]string{
+ "protocol": "grpc",
+ "endpoint": opName,
+ }).RecordDuration(time.Since(startTime))
+
+ inflightRequests.With(map[string]string{
+ "protocol": "grpc",
+ "endpoint": opName,
+ }).Add(-1)
+
+ if p := recover(); p != nil {
+ panicsCount.With(map[string]string{
+ "protocol": "grpc",
+ "endpoint": opName,
+ }).Inc()
+ panic(p)
+ }
+ }
+
+ opName := info.FullMethod
+
+ requestCount.With(map[string]string{
+ "protocol": "grpc",
+ "endpoint": opName,
+ }).Inc()
+
+ inflightRequests.With(map[string]string{
+ "protocol": "grpc",
+ "endpoint": opName,
+ }).Add(1)
+
+ startTime := time.Now()
+ defer deferFunc(startTime, opName)
+
+ resp, err := handler(ctx, req)
+
+ code := status.Code(err)
+ responseCountCount.With(map[string]string{
+ "protocol": "grpc",
+ "endpoint": opName,
+ "status": code.String(),
+ }).Inc()
+
+ return resp, err
+ }
+}
+
+func StreamServerMetrics(registry metrics.Registry) grpc.StreamServerInterceptor {
+ streamCount := registry.CounterVec("streams_total", []string{"protocol", "endpoint"})
+ streamDuration := registry.DurationHistogramVec("stream_duration_seconds", metrics.MakeExponentialDurationBuckets(250*time.Microsecond, 1.5, 35), []string{"protocol", "endpoint"})
+ inflightStreams := registry.GaugeVec("inflight_streams", []string{"protocol", "endpoint"})
+ panicsCount := registry.CounterVec("panics_total", []string{"protocol", "endpoint"})
+ sentStreamMessages := registry.CounterVec("sent_stream_messages_total", []string{"protocol", "endpoint"})
+ receivedStreamMessages := registry.CounterVec("received_stream_messages_total", []string{"protocol", "endpoint"})
+
+ solomon.Rated(streamCount)
+ solomon.Rated(streamDuration)
+ solomon.Rated(panicsCount)
+ solomon.Rated(sentStreamMessages)
+ solomon.Rated(receivedStreamMessages)
+
+ return func(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
+ deferFunc := func(startTime time.Time, opName string) {
+ streamDuration.With(map[string]string{
+ "protocol": "grpc",
+ "endpoint": opName,
+ }).RecordDuration(time.Since(startTime))
+
+ inflightStreams.With(map[string]string{
+ "protocol": "grpc",
+ "endpoint": opName,
+ }).Add(-1)
+
+ if p := recover(); p != nil {
+ panicsCount.With(map[string]string{
+ "protocol": "grpc",
+ "endpoint": opName,
+ }).Inc()
+ panic(p)
+ }
+ }
+
+ opName := info.FullMethod
+
+ streamCount.With(map[string]string{
+ "protocol": "grpc",
+ "endpoint": opName,
+ }).Inc()
+
+ inflightStreams.With(map[string]string{
+ "protocol": "grpc",
+ "endpoint": opName,
+ }).Add(1)
+
+ startTime := time.Now()
+ defer deferFunc(startTime, opName)
+
+ return handler(srv, serverStreamWithMessagesCount{
+ ServerStream: ss,
+ sentStreamMessages: sentStreamMessages.With(map[string]string{
+ "protocol": "grpc",
+ "endpoint": opName,
+ }),
+ receivedStreamMessages: receivedStreamMessages.With(map[string]string{
+ "protocol": "grpc",
+ "endpoint": opName,
+ }),
+ })
+ }
+}
+
+type serverStreamWithMessagesCount struct {
+ grpc.ServerStream
+ sentStreamMessages metrics.Counter
+ receivedStreamMessages metrics.Counter
+}
+
+func (s serverStreamWithMessagesCount) SendMsg(m any) error {
+ err := s.ServerStream.SendMsg(m)
+
+ if err == nil {
+ s.sentStreamMessages.Inc()
+ }
+
+ return err
+}
+
+func (s serverStreamWithMessagesCount) RecvMsg(m any) error {
+ err := s.ServerStream.RecvMsg(m)
+
+ if err == nil {
+ s.receivedStreamMessages.Inc()
+ }
+
+ return err
+}
diff --git a/ydb/library/yql/providers/generic/connector/app/server/httppuller.go b/ydb/library/yql/providers/generic/connector/app/server/httppuller.go
new file mode 100644
index 0000000000..b099fe4bca
--- /dev/null
+++ b/ydb/library/yql/providers/generic/connector/app/server/httppuller.go
@@ -0,0 +1,126 @@
+package server
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "net/http"
+
+ "github.com/ydb-platform/ydb/library/go/core/log"
+ "github.com/ydb-platform/ydb/library/go/core/log/nop"
+ "github.com/ydb-platform/ydb/library/go/core/metrics/solomon"
+ "github.com/ydb-platform/ydb/library/go/httputil/headers"
+)
+
+type MetricsStreamer interface {
+ StreamJSON(context.Context, io.Writer) (int, error)
+ StreamSpack(context.Context, io.Writer, solomon.CompressionType) (int, error)
+}
+
+type handler struct {
+ registry MetricsStreamer
+ streamFormat headers.ContentType
+ logger log.Logger
+}
+
+type spackOption struct {
+}
+
+func (*spackOption) isOption() {}
+
+func WithSpack() Option {
+ return &spackOption{}
+}
+
+type Option interface {
+ isOption()
+}
+
+// NewHTTPPullerHandler returns new HTTP handler to expose gathered metrics using metrics dumper
+func NewHTTPPullerHandler(r MetricsStreamer, opts ...Option) http.Handler {
+ h := handler{
+ registry: r,
+ streamFormat: headers.TypeApplicationJSON,
+ logger: &nop.Logger{},
+ }
+
+ for _, opt := range opts {
+ switch opt.(type) {
+ case *spackOption:
+ h.streamFormat = headers.TypeApplicationXSolomonSpack
+ default:
+ panic(fmt.Sprintf("unsupported option %T", opt))
+ }
+ }
+
+ return h
+}
+
+func (h handler) okSpack(header http.Header) bool {
+ if h.streamFormat != headers.TypeApplicationXSolomonSpack {
+ return false
+ }
+
+ for _, header := range header[headers.AcceptKey] {
+ types, err := headers.ParseAccept(header)
+ if err != nil {
+ h.logger.Warn("Can't parse accept header", log.Error(err), log.String("header", header))
+
+ continue
+ }
+
+ for _, acceptableType := range types {
+ if acceptableType.Type == headers.TypeApplicationXSolomonSpack {
+ return true
+ }
+ }
+ }
+
+ return false
+}
+
+func (h handler) okLZ4Compression(header http.Header) bool {
+ for _, header := range header[headers.AcceptEncodingKey] {
+ encodings, err := headers.ParseAcceptEncoding(header)
+
+ if err != nil {
+ h.logger.Warn("Can't parse accept-encoding header", log.Error(err), log.String("header", header))
+
+ continue
+ }
+
+ for _, acceptableEncoding := range encodings {
+ if acceptableEncoding.Encoding == headers.EncodingLZ4 {
+ return true
+ }
+ }
+ }
+
+ return false
+}
+
+func (h handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ if h.okSpack(r.Header) {
+ compression := solomon.CompressionNone
+
+ if h.okLZ4Compression(r.Header) {
+ compression = solomon.CompressionLz4
+ }
+
+ w.Header().Set(headers.ContentTypeKey, headers.TypeApplicationXSolomonSpack.String())
+ _, err := h.registry.StreamSpack(r.Context(), w, compression)
+
+ if err != nil {
+ h.logger.Error("Failed to write compressed spack", log.Error(err))
+ }
+
+ return
+ }
+
+ w.Header().Set(headers.ContentTypeKey, headers.TypeApplicationJSON.String())
+ _, err := h.registry.StreamJSON(r.Context(), w)
+
+ if err != nil {
+ h.logger.Error("Failed to write json", log.Error(err))
+ }
+}
diff --git a/ydb/library/yql/providers/generic/connector/app/server/launcher.go b/ydb/library/yql/providers/generic/connector/app/server/launcher.go
index 3711c0dda3..4664eae7eb 100644
--- a/ydb/library/yql/providers/generic/connector/app/server/launcher.go
+++ b/ydb/library/yql/providers/generic/connector/app/server/launcher.go
@@ -8,6 +8,7 @@ import (
"github.com/spf13/cobra"
"github.com/ydb-platform/ydb/library/go/core/log"
+ "github.com/ydb-platform/ydb/library/go/core/metrics/solomon"
"github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/app/config"
"github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/app/server/utils"
)
@@ -49,6 +50,7 @@ func (l *launcher) stop() {
const (
connectorServiceKey = "connector"
pprofServiceKey = "pprof"
+ metricsKey = "metrics"
)
func newLauncher(logger log.Logger, cfg *config.TServerConfig) (*launcher, error) {
@@ -59,10 +61,21 @@ func newLauncher(logger log.Logger, cfg *config.TServerConfig) (*launcher, error
var err error
+ registry := solomon.NewRegistry(&solomon.RegistryOpts{
+ Separator: '.',
+ UseNameTag: true,
+ })
+
+ if cfg.MetricsServer != nil {
+ l.services[metricsKey] = newServiceMetrics(
+ log.With(logger, log.String("service", metricsKey)),
+ cfg.MetricsServer, registry)
+ }
+
// init GRPC server
l.services[connectorServiceKey], err = newServiceConnector(
log.With(logger, log.String("service", connectorServiceKey)),
- cfg)
+ cfg, registry)
if err != nil {
return nil, fmt.Errorf("new connector server: %w", err)
}
diff --git a/ydb/library/yql/providers/generic/connector/app/server/service_connector.go b/ydb/library/yql/providers/generic/connector/app/server/service_connector.go
index 2e7dca12d8..29d1fe1726 100644
--- a/ydb/library/yql/providers/generic/connector/app/server/service_connector.go
+++ b/ydb/library/yql/providers/generic/connector/app/server/service_connector.go
@@ -8,6 +8,7 @@ import (
"github.com/apache/arrow/go/v13/arrow/memory"
"github.com/ydb-platform/ydb/library/go/core/log"
+ "github.com/ydb-platform/ydb/library/go/core/metrics/solomon"
api_common "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/api/common"
"github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/app/config"
"github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/app/server/paging"
@@ -242,7 +243,7 @@ func (s *serviceConnector) start() error {
return nil
}
-func makeGRPCOptions(logger log.Logger, cfg *config.TServerConfig) ([]grpc.ServerOption, error) {
+func makeGRPCOptions(logger log.Logger, cfg *config.TServerConfig, registry *solomon.Registry) ([]grpc.ServerOption, error) {
var (
opts []grpc.ServerOption
tlsConfig *config.TServerTLSConfig
@@ -272,7 +273,11 @@ func makeGRPCOptions(logger log.Logger, cfg *config.TServerConfig) ([]grpc.Serve
// for security reasons we do not allow TLS < 1.2, see YQ-1877
creds := credentials.NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}, MinVersion: tls.VersionTLS12})
- opts = append(opts, grpc.Creds(creds))
+ unaryInterceptors := []grpc.UnaryServerInterceptor{UnaryServerMetrics(registry)}
+
+ streamInterceptors := []grpc.StreamServerInterceptor{StreamServerMetrics(registry)}
+
+ opts = append(opts, grpc.Creds(creds), grpc.ChainUnaryInterceptor(unaryInterceptors...), grpc.ChainStreamInterceptor(streamInterceptors...))
return opts, nil
}
@@ -284,6 +289,7 @@ func (s *serviceConnector) stop() {
func newServiceConnector(
logger log.Logger,
cfg *config.TServerConfig,
+ registry *solomon.Registry,
) (service, error) {
queryLoggerFactory := utils.NewQueryLoggerFactory(cfg.Logger)
@@ -306,7 +312,7 @@ func newServiceConnector(
return nil, fmt.Errorf("net listen: %w", err)
}
- options, err := makeGRPCOptions(logger, cfg)
+ options, err := makeGRPCOptions(logger, cfg, registry)
if err != nil {
return nil, fmt.Errorf("make GRPC options: %w", err)
}
diff --git a/ydb/library/yql/providers/generic/connector/app/server/service_metrics.go b/ydb/library/yql/providers/generic/connector/app/server/service_metrics.go
new file mode 100644
index 0000000000..1533d76e5a
--- /dev/null
+++ b/ydb/library/yql/providers/generic/connector/app/server/service_metrics.go
@@ -0,0 +1,57 @@
+package server
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+
+ "github.com/ydb-platform/ydb/library/go/core/log"
+ "github.com/ydb-platform/ydb/library/go/core/metrics/solomon"
+ "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/app/config"
+ "github.com/ydb-platform/ydb/ydb/library/yql/providers/generic/connector/app/server/utils"
+)
+
+type serviceMetrics struct {
+ httpServer *http.Server
+ logger log.Logger
+ registry *solomon.Registry
+}
+
+func (s *serviceMetrics) start() error {
+ s.logger.Debug("starting HTTP metrics server", log.String("address", s.httpServer.Addr))
+
+ if err := s.httpServer.ListenAndServe(); err != nil {
+ return fmt.Errorf("http metrics server listen and serve: %w", err)
+ }
+
+ return nil
+}
+
+func (s *serviceMetrics) stop() {
+ ctx, cancel := context.WithTimeout(context.Background(), shutdownTimeout)
+ defer cancel()
+
+ err := s.httpServer.Shutdown(ctx)
+ if err != nil {
+ s.logger.Error("shutdown http metrics server", log.Error(err))
+ }
+}
+
+func newServiceMetrics(logger log.Logger, cfg *config.TMetricsServerConfig, registry *solomon.Registry) service {
+ mux := http.NewServeMux()
+ mux.Handle("/metrics", NewHTTPPullerHandler(registry, WithSpack()))
+
+ httpServer := &http.Server{
+ Addr: utils.EndpointToString(cfg.Endpoint),
+ Handler: mux,
+ }
+
+ // TODO: TLS
+ logger.Warn("metrics server will use insecure connections")
+
+ return &serviceMetrics{
+ httpServer: httpServer,
+ logger: logger,
+ registry: registry,
+ }
+}
diff --git a/ydb/library/yql/providers/generic/connector/app/server/ya.make b/ydb/library/yql/providers/generic/connector/app/server/ya.make
index 9fad9ff98f..063cb98f35 100644
--- a/ydb/library/yql/providers/generic/connector/app/server/ya.make
+++ b/ydb/library/yql/providers/generic/connector/app/server/ya.make
@@ -4,8 +4,11 @@ SRCS(
cmd.go
config.go
doc.go
+ grpc_metrics.go
+ httppuller.go
launcher.go
service_connector.go
+ service_metrics.go
service_pprof.go
validate.go
)