From 0b8e976e448836605372908241d140b4cfc783c0 Mon Sep 17 00:00:00 2001 From: matt durham Date: Wed, 11 Sep 2024 10:49:22 -0400 Subject: [PATCH 01/11] Adding the serialization features. --- .../remote/queue/filequeue/filequeue.go | 20 +- .../remote/queue/serialization/appender.go | 121 + .../queue/serialization/appender_test.go | 55 + .../queue/serialization/seralizer_test.go | 110 + .../remote/queue/serialization/serializer.go | 223 ++ .../serialization/serializer_bench_test.go | 112 + .../remote/queue/types/serialization.go | 286 ++ .../remote/queue/types/serialization_gen.go | 3294 +++++++++++++++++ .../queue/types/serialization_gen_test.go | 914 +++++ .../remote/queue/types/serialization_test.go | 59 + .../remote/queue/types/serializer.go | 22 + .../prometheus/remote/queue/types/stats.go | 8 + 12 files changed, 5214 insertions(+), 10 deletions(-) create mode 100644 internal/component/prometheus/remote/queue/serialization/appender.go create mode 100644 internal/component/prometheus/remote/queue/serialization/appender_test.go create mode 100644 internal/component/prometheus/remote/queue/serialization/seralizer_test.go create mode 100644 internal/component/prometheus/remote/queue/serialization/serializer.go create mode 100644 internal/component/prometheus/remote/queue/serialization/serializer_bench_test.go create mode 100644 internal/component/prometheus/remote/queue/types/serialization.go create mode 100644 internal/component/prometheus/remote/queue/types/serialization_gen.go create mode 100644 internal/component/prometheus/remote/queue/types/serialization_gen_test.go create mode 100644 internal/component/prometheus/remote/queue/types/serialization_test.go create mode 100644 internal/component/prometheus/remote/queue/types/serializer.go create mode 100644 internal/component/prometheus/remote/queue/types/stats.go diff --git a/internal/component/prometheus/remote/queue/filequeue/filequeue.go b/internal/component/prometheus/remote/queue/filequeue/filequeue.go index 175c4bc610..7dacdcbe19 100644 --- a/internal/component/prometheus/remote/queue/filequeue/filequeue.go +++ b/internal/component/prometheus/remote/queue/filequeue/filequeue.go @@ -30,7 +30,7 @@ type queue struct { // block until ready for another record. out func(ctx context.Context, dh types.DataHandle) // existingFiles is the list of files found initially. - existingsFiles []string + existingFiles []string } // NewQueue returns a implementation of FileStorage. @@ -61,18 +61,18 @@ func NewQueue(directory string, out func(ctx context.Context, dh types.DataHandl currentMaxID = ids[len(ids)-1] } q := &queue{ - directory: directory, - maxID: currentMaxID, - logger: logger, - out: out, - dataQueue: actor.NewMailbox[types.Data](), - existingsFiles: make([]string, 0), + directory: directory, + maxID: currentMaxID, + logger: logger, + out: out, + dataQueue: actor.NewMailbox[types.Data](), + existingFiles: make([]string, 0), } // Save the existing files in `q.existingFiles`, which will have their data pushed to `out` when actor starts. for _, id := range ids { name := filepath.Join(directory, fmt.Sprintf("%d.committed", id)) - q.existingsFiles = append(q.existingsFiles, name) + q.existingFiles = append(q.existingFiles, name) } return q, nil } @@ -115,7 +115,7 @@ func get(logger log.Logger, name string) (map[string]string, []byte, error) { // DoWork allows most of the queue to be single threaded with work only coming in and going out via mailboxes(channels). func (q *queue) DoWork(ctx actor.Context) actor.WorkerStatus { // Queue up our existing items. - for _, name := range q.existingsFiles { + for _, name := range q.existingFiles { q.out(ctx, types.DataHandle{ Name: name, Pop: func() (map[string]string, []byte, error) { @@ -124,7 +124,7 @@ func (q *queue) DoWork(ctx actor.Context) actor.WorkerStatus { }) } // We only want to process existing files once. - q.existingsFiles = nil + q.existingFiles = nil select { case <-ctx.Done(): return actor.WorkerEnd diff --git a/internal/component/prometheus/remote/queue/serialization/appender.go b/internal/component/prometheus/remote/queue/serialization/appender.go new file mode 100644 index 0000000000..541617f307 --- /dev/null +++ b/internal/component/prometheus/remote/queue/serialization/appender.go @@ -0,0 +1,121 @@ +package serialization + +import ( + "context" + "time" + + "github.com/go-kit/log" + "github.com/grafana/alloy/internal/component/prometheus/remote/queue/types" + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/metadata" + "github.com/prometheus/prometheus/storage" +) + +type appender struct { + ctx context.Context + ttl time.Duration + s types.Serializer + logger log.Logger +} + +func (a *appender) AppendCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64) (storage.SeriesRef, error) { + // TODO @mattdurham figure out what to do here later. This mirrors what we do elsewhere. + return ref, nil +} + +// NewAppender returns an Appender that writes to a given serializer. NOTE the Appender returned writes +// data immediately and does not honor commit or rollback. +func NewAppender(ctx context.Context, ttl time.Duration, s types.Serializer, logger log.Logger) storage.Appender { + app := &appender{ + ttl: ttl, + s: s, + logger: logger, + ctx: ctx, + } + return app +} + +// Append metric +func (a *appender) Append(ref storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) { + // Check to see if the TTL has expired for this record. + endTime := time.Now().Unix() - int64(a.ttl.Seconds()) + if t < endTime { + return ref, nil + } + ts := types.GetTimeSeriesBinary() + ts.Labels = l + ts.TS = t + ts.Value = v + ts.Hash = l.Hash() + err := a.s.SendSeries(a.ctx, ts) + return ref, err +} + +// Commit is a no op since we always write. +func (a *appender) Commit() (_ error) { + return nil +} + +// Rollback is a no op since we write all the data. +func (a *appender) Rollback() error { + return nil +} + +// AppendExemplar appends exemplar to cache. The passed in labels is unused, instead use the labels on the exemplar. +func (a *appender) AppendExemplar(ref storage.SeriesRef, _ labels.Labels, e exemplar.Exemplar) (_ storage.SeriesRef, _ error) { + endTime := time.Now().Unix() - int64(a.ttl.Seconds()) + if e.HasTs && e.Ts < endTime { + return ref, nil + } + ts := types.GetTimeSeriesBinary() + ts.Hash = e.Labels.Hash() + ts.TS = e.Ts + ts.Labels = e.Labels + ts.Hash = e.Labels.Hash() + err := a.s.SendSeries(a.ctx, ts) + return ref, err +} + +// AppendHistogram appends histogram +func (a *appender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (_ storage.SeriesRef, _ error) { + endTime := time.Now().Unix() - int64(a.ttl.Seconds()) + if t < endTime { + return ref, nil + } + ts := types.GetTimeSeriesBinary() + ts.Labels = l + ts.TS = t + if h != nil { + ts.FromHistogram(t, h) + } else { + ts.FromFloatHistogram(t, fh) + } + ts.Hash = l.Hash() + err := a.s.SendSeries(a.ctx, ts) + return ref, err +} + +// UpdateMetadata updates metadata. +func (a *appender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (_ storage.SeriesRef, _ error) { + ts := types.GetTimeSeriesBinary() + // We are going to handle converting some strings to hopefully not reused label names. TimeSeriesBinary has a lot of work + // to ensure its efficient it makes sense to encode metadata into it. + combinedLabels := l.Copy() + combinedLabels = append(combinedLabels, labels.Label{ + Name: "__alloy_metadata_type__", + Value: string(m.Type), + }) + combinedLabels = append(combinedLabels, labels.Label{ + Name: "__alloy_metadata_help__", + Value: m.Help, + }) + combinedLabels = append(combinedLabels, labels.Label{ + Name: "__alloy_metadata_unit__", + Value: m.Unit, + }) + ts.Labels = combinedLabels + err := a.s.SendMetadata(a.ctx, ts) + return ref, err +} diff --git a/internal/component/prometheus/remote/queue/serialization/appender_test.go b/internal/component/prometheus/remote/queue/serialization/appender_test.go new file mode 100644 index 0000000000..9d6563b9bf --- /dev/null +++ b/internal/component/prometheus/remote/queue/serialization/appender_test.go @@ -0,0 +1,55 @@ +package serialization + +import ( + "context" + log2 "github.com/go-kit/log" + "github.com/grafana/alloy/internal/component/prometheus/remote/queue/types" + "github.com/prometheus/prometheus/model/labels" + "github.com/stretchr/testify/require" + "testing" + "time" +) + +func TestAppenderTTL(t *testing.T) { + fake := &counterSerializer{} + l := log2.NewNopLogger() + + app := NewAppender(context.Background(), 1*time.Minute, fake, l) + _, err := app.Append(0, labels.FromStrings("one", "two"), time.Now().Unix(), 0) + require.NoError(t, err) + + for i := 0; i < 10; i++ { + _, err = app.Append(0, labels.FromStrings("one", "two"), time.Now().Add(-5*time.Minute).Unix(), 0) + require.NoError(t, err) + } + // Only one record should make it through. + require.True(t, fake.received == 1) +} + +var _ types.Serializer = (*fakeSerializer)(nil) + +type counterSerializer struct { + received int +} + +func (f *counterSerializer) Start() { + +} + +func (f *counterSerializer) Stop() { + +} + +func (f *counterSerializer) SendSeries(ctx context.Context, data *types.TimeSeriesBinary) error { + f.received++ + return nil + +} + +func (f *counterSerializer) SendMetadata(ctx context.Context, data *types.TimeSeriesBinary) error { + return nil +} + +func (f *counterSerializer) UpdateConfig(ctx context.Context, data types.SerializerConfig) error { + return nil +} diff --git a/internal/component/prometheus/remote/queue/serialization/seralizer_test.go b/internal/component/prometheus/remote/queue/serialization/seralizer_test.go new file mode 100644 index 0000000000..23fe152989 --- /dev/null +++ b/internal/component/prometheus/remote/queue/serialization/seralizer_test.go @@ -0,0 +1,110 @@ +package serialization + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/go-kit/log" + "github.com/golang/snappy" + "github.com/grafana/alloy/internal/component/prometheus/remote/queue/types" + "github.com/prometheus/prometheus/model/labels" + "github.com/stretchr/testify/require" +) + +func TestRoundTripSerialization(t *testing.T) { + var totalSeries = 0 + f := &fqq{t: t} + l := log.NewNopLogger() + start := time.Now().Add(-1 * time.Second).Unix() + + s, err := NewSerializer(types.SerializerConfig{ + MaxSignalsInBatch: 10, + FlushFrequency: 5 * time.Second, + }, f, func(stats types.SerializerStats) { + totalSeries = totalSeries + stats.SeriesStored + require.True(t, stats.SeriesStored == 10) + require.True(t, stats.Errors == 0) + require.True(t, stats.MetadataStored == 0) + require.True(t, stats.NewestTimestamp > start) + }, l) + require.NoError(t, err) + + s.Start() + defer s.Stop() + for i := 0; i < 100; i++ { + tss := types.GetTimeSeriesBinary() + tss.Labels = make(labels.Labels, 10) + for j := 0; j < 10; j++ { + tss.Labels[j] = labels.Label{ + Name: fmt.Sprintf("name_%d_%d", i, j), + Value: fmt.Sprintf("value_%d_%d", i, j), + } + tss.Value = float64(i) + tss.TS = time.Now().Unix() + } + sendErr := s.SendSeries(context.Background(), tss) + require.NoError(t, sendErr) + } + require.Eventually(t, func() bool { + return f.total == 100 + }, 5*time.Second, 100*time.Millisecond) + // 100 series send from the above for loop + require.True(t, totalSeries == 100) +} + +func TestUpdateConfig(t *testing.T) { + f := &fqq{t: t} + l := log.NewNopLogger() + s, err := NewSerializer(types.SerializerConfig{ + MaxSignalsInBatch: 10, + FlushFrequency: 5 * time.Second, + }, f, func(stats types.SerializerStats) {}, l) + require.NoError(t, err) + s.Start() + defer s.Stop() + err = s.UpdateConfig(context.Background(), types.SerializerConfig{ + MaxSignalsInBatch: 1, + FlushFrequency: 1 * time.Second, + }) + require.NoError(t, err) + require.Eventually(t, func() bool { + return s.(*serializer).maxItemsBeforeFlush == 1 && s.(*serializer).flushFrequency == 1*time.Second + }, 5*time.Second, 100*time.Millisecond) +} + +var _ types.FileStorage = (*fqq)(nil) + +type fqq struct { + t *testing.T + buf []byte + total int +} + +func (f *fqq) Start() { + +} + +func (f *fqq) Stop() { + +} + +func (f *fqq) Store(ctx context.Context, meta map[string]string, value []byte) error { + f.buf, _ = snappy.Decode(nil, value) + sg := &types.SeriesGroup{} + sg, _, err := types.DeserializeToSeriesGroup(sg, f.buf) + require.NoError(f.t, err) + require.Len(f.t, sg.Series, 10) + for _, series := range sg.Series { + require.Len(f.t, series.LabelsNames, 0) + require.Len(f.t, series.LabelsValues, 0) + require.Len(f.t, series.Labels, 10) + for j := 0; j < 10; j++ { + series.Labels[j].Name = fmt.Sprintf("name_%d_%d", int(series.Value), j) + series.Labels[j].Value = fmt.Sprintf("value_%d_%d", int(series.Value), j) + } + } + f.total += len(sg.Series) + return nil +} diff --git a/internal/component/prometheus/remote/queue/serialization/serializer.go b/internal/component/prometheus/remote/queue/serialization/serializer.go new file mode 100644 index 0000000000..f702de45b9 --- /dev/null +++ b/internal/component/prometheus/remote/queue/serialization/serializer.go @@ -0,0 +1,223 @@ +package serialization + +import ( + "context" + "strconv" + "time" + + snappy "github.com/eapache/go-xerial-snappy" + "github.com/go-kit/log" + "github.com/grafana/alloy/internal/component/prometheus/remote/queue/types" + "github.com/grafana/alloy/internal/runtime/logging/level" + "github.com/vladopajic/go-actor/actor" +) + +// serializer collects data from multiple appenders and will write them to file.Storage. +// serializer will trigger based on the last flush duration OR if it hits a certain amount of items. +type serializer struct { + inbox actor.Mailbox[*types.TimeSeriesBinary] + metaInbox actor.Mailbox[*types.TimeSeriesBinary] + cfgInbox actor.Mailbox[types.SerializerConfig] + maxItemsBeforeFlush int + flushFrequency time.Duration + queue types.FileStorage + lastFlush time.Time + logger log.Logger + self actor.Actor + // Every 1 second we should check if we need to flush. + flushTestTimer *time.Ticker + series []*types.TimeSeriesBinary + meta []*types.TimeSeriesBinary + msgpBuffer []byte + stats func(stats types.SerializerStats) +} + +func NewSerializer(cfg types.SerializerConfig, q types.FileStorage, stats func(stats types.SerializerStats), l log.Logger) (types.Serializer, error) { + s := &serializer{ + maxItemsBeforeFlush: int(cfg.MaxSignalsInBatch), + flushFrequency: cfg.FlushFrequency, + queue: q, + series: make([]*types.TimeSeriesBinary, 0), + logger: l, + inbox: actor.NewMailbox[*types.TimeSeriesBinary](), + metaInbox: actor.NewMailbox[*types.TimeSeriesBinary](), + cfgInbox: actor.NewMailbox[types.SerializerConfig](), + flushTestTimer: time.NewTicker(1 * time.Second), + msgpBuffer: make([]byte, 0), + lastFlush: time.Now(), + stats: stats, + } + + return s, nil +} +func (s *serializer) Start() { + // All the actors and mailboxes need to start. + s.queue.Start() + s.self = actor.Combine(actor.New(s), s.inbox, s.metaInbox, s.cfgInbox).Build() + s.self.Start() +} + +func (s *serializer) Stop() { + s.queue.Stop() + s.self.Stop() +} + +func (s *serializer) SendSeries(ctx context.Context, data *types.TimeSeriesBinary) error { + return s.inbox.Send(ctx, data) +} + +func (s *serializer) SendMetadata(ctx context.Context, data *types.TimeSeriesBinary) error { + return s.metaInbox.Send(ctx, data) +} + +func (s *serializer) UpdateConfig(ctx context.Context, cfg types.SerializerConfig) error { + return s.cfgInbox.Send(ctx, cfg) +} + +func (s *serializer) DoWork(ctx actor.Context) actor.WorkerStatus { + // Check for config which should have priority. Selector is random but since incoming + // series will always have a queue by explicitly checking the config here we always give it a chance. + // By pulling the config from the mailbox we ensure it does NOT need a mutex around access. + select { + case <-ctx.Done(): + return actor.WorkerEnd + case cfg, ok := <-s.cfgInbox.ReceiveC(): + if !ok { + return actor.WorkerEnd + } + s.maxItemsBeforeFlush = int(cfg.MaxSignalsInBatch) + s.flushFrequency = cfg.FlushFrequency + return actor.WorkerContinue + default: + } + + select { + case <-ctx.Done(): + return actor.WorkerEnd + case item, ok := <-s.inbox.ReceiveC(): + if !ok { + return actor.WorkerEnd + } + err := s.Append(ctx, item) + if err != nil { + level.Error(s.logger).Log("msg", "unable to append to serializer", "err", err) + } + return actor.WorkerContinue + case item, ok := <-s.metaInbox.ReceiveC(): + if !ok { + return actor.WorkerEnd + } + err := s.AppendMetadata(ctx, item) + if err != nil { + level.Error(s.logger).Log("msg", "unable to append metadata to serializer", "err", err) + } + return actor.WorkerContinue + case <-s.flushTestTimer.C: + if time.Since(s.lastFlush) > s.flushFrequency { + err := s.store(ctx) + if err != nil { + level.Error(s.logger).Log("msg", "unable to store data", "err", err) + } + } + return actor.WorkerContinue + } +} + +func (s *serializer) AppendMetadata(ctx actor.Context, data *types.TimeSeriesBinary) error { + s.meta = append(s.meta, data) + // If we would go over the max size then send, or if we have hit the flush duration then send. + if len(s.meta)+len(s.series) >= s.maxItemsBeforeFlush { + return s.store(ctx) + } else if time.Since(s.lastFlush) > s.flushFrequency { + return s.store(ctx) + } + return nil +} + +func (s *serializer) Append(ctx actor.Context, data *types.TimeSeriesBinary) error { + s.series = append(s.series, data) + // If we would go over the max size then send, or if we have hit the flush duration then send. + if len(s.meta)+len(s.series) >= s.maxItemsBeforeFlush { + return s.store(ctx) + } else if time.Since(s.lastFlush) > s.flushFrequency { + return s.store(ctx) + } + return nil +} + +func (s *serializer) store(ctx actor.Context) error { + var err error + defer func() { + s.lastFlush = time.Now() + }() + // Do nothing if there is nothing. + if len(s.series) == 0 && len(s.meta) == 0 { + return nil + } + group := &types.SeriesGroup{ + Series: make([]*types.TimeSeriesBinary, len(s.series)), + Metadata: make([]*types.TimeSeriesBinary, len(s.meta)), + } + defer func() { + s.storeStats(err) + // Return series to the pool, this is key to reducing allocs. + types.PutTimeSeriesBinarySlice(s.series) + types.PutTimeSeriesBinarySlice(s.meta) + s.series = s.series[:0] + s.meta = s.series[:0] + }() + + // This maps strings to index position in a slice. This is doing to reduce the file size of the data. + strMapToInt := make(map[string]uint32) + for i, ts := range s.series { + ts.FillLabelMapping(strMapToInt) + group.Series[i] = ts + } + for i, ts := range s.meta { + ts.FillLabelMapping(strMapToInt) + group.Metadata[i] = ts + } + + stringsSlice := make([]string, len(strMapToInt)) + for stringValue, index := range strMapToInt { + stringsSlice[index] = stringValue + } + group.Strings = stringsSlice + + buf, err := group.MarshalMsg(s.msgpBuffer) + if err != nil { + return err + } + + out := snappy.Encode(buf) + meta := map[string]string{ + // product.signal_type.schema.version + "version": "alloy.metrics.queue.v1", + "compression": "snappy", + "series_count": strconv.Itoa(len(group.Series)), + "meta_count": strconv.Itoa(len(group.Metadata)), + "strings_count": strconv.Itoa(len(group.Strings)), + } + err = s.queue.Store(ctx, meta, out) + return err +} + +func (s *serializer) storeStats(err error) { + hasError := 0 + if err != nil { + hasError = 1 + } + newestTS := int64(0) + for _, ts := range s.series { + if ts.TS > newestTS { + newestTS = ts.TS + + } + } + s.stats(types.SerializerStats{ + SeriesStored: len(s.series), + MetadataStored: len(s.meta), + Errors: hasError, + NewestTimestamp: newestTS, + }) +} diff --git a/internal/component/prometheus/remote/queue/serialization/serializer_bench_test.go b/internal/component/prometheus/remote/queue/serialization/serializer_bench_test.go new file mode 100644 index 0000000000..f4d6ffe725 --- /dev/null +++ b/internal/component/prometheus/remote/queue/serialization/serializer_bench_test.go @@ -0,0 +1,112 @@ +package serialization + +import ( + "context" + "fmt" + "math/rand" + "testing" + "time" + + "github.com/go-kit/log" + "github.com/grafana/alloy/internal/component/prometheus/remote/queue/types" + "github.com/prometheus/prometheus/model/labels" +) + +var lbls = labels.FromStrings("one", "two", "three", "four") + +func BenchmarkAppender(b *testing.B) { + // This should be 0 allocs + b.ReportAllocs() + logger := log.NewNopLogger() + for i := 0; i < b.N; i++ { + app := NewAppender(context.Background(), 1*time.Hour, &fakeSerializer{}, logger) + for j := 0; j < 10_000; j++ { + _, _ = app.Append(0, lbls, time.Now().Unix(), 1.1) + } + _ = app.Commit() + } +} + +func BenchmarkSerializer(b *testing.B) { + // This should be around 200-300 allocs 7m ns/op + series := getTimeSeries(b) + b.ResetTimer() + b.ReportAllocs() + logger := log.NewNopLogger() + for i := 0; i < b.N; i++ { + serial, _ := NewSerializer(types.SerializerConfig{ + MaxSignalsInBatch: 1_000, + FlushFrequency: 1 * time.Second, + }, &fakeFileQueue{}, func(stats types.SerializerStats) { + + }, logger) + serial.Start() + for _, s := range series { + _ = serial.SendSeries(context.Background(), s) + } + serial.Stop() + } +} + +func getTimeSeries(b *testing.B) []*types.TimeSeriesBinary { + b.Helper() + series := make([]*types.TimeSeriesBinary, 0) + for j := 0; j < 10_000; j++ { + timeseries := types.GetTimeSeriesBinary() + timeseries.TS = time.Now().Unix() + timeseries.Value = rand.Float64() + timeseries.Labels = getLabels() + series = append(series, timeseries) + } + return series +} + +func getLabels() labels.Labels { + retLbls := make(labels.Labels, 0) + for i := 0; i < rand.Intn(20); i++ { + l := labels.Label{ + Name: fmt.Sprintf("label_%d", i), + Value: fmt.Sprintf("value_%d", i), + } + retLbls = append(retLbls, l) + } + return retLbls +} + +var _ types.Serializer = (*fakeSerializer)(nil) + +type fakeSerializer struct{} + +func (f *fakeSerializer) UpdateConfig(ctx context.Context, cfg types.SerializerConfig) error { + return nil +} + +func (f *fakeSerializer) Start() {} + +func (f *fakeSerializer) Stop() {} + +func (f *fakeSerializer) SendSeries(ctx context.Context, data *types.TimeSeriesBinary) error { + types.PutTimeSeriesBinary(data) + return nil +} + +func (f *fakeSerializer) SendMetadata(ctx context.Context, data *types.TimeSeriesBinary) error { + types.PutTimeSeriesBinary(data) + return nil +} + +var _ types.FileStorage = (*fakeFileQueue)(nil) + +type fakeFileQueue struct{} + +func (f fakeFileQueue) Start() { + +} + +func (f fakeFileQueue) Stop() { + +} + +func (f fakeFileQueue) Store(ctx context.Context, meta map[string]string, value []byte) error { + return nil +} diff --git a/internal/component/prometheus/remote/queue/types/serialization.go b/internal/component/prometheus/remote/queue/types/serialization.go new file mode 100644 index 0000000000..d6926b7e47 --- /dev/null +++ b/internal/component/prometheus/remote/queue/types/serialization.go @@ -0,0 +1,286 @@ +//go:generate msgp +package types + +import ( + "sync" + + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/prompb" + "go.uber.org/atomic" +) + +// SeriesGroup is the holder for TimeSeries, Metadata, and the strings array. +// When serialized the Labels Key,Value array will be transformed into +// LabelNames and LabelsValues that point to the index in Strings. +// This deduplicates the strings and decreases the size on disk. +type SeriesGroup struct { + Strings []string + Series []*TimeSeriesBinary + Metadata []*TimeSeriesBinary +} + +type TimeSeriesBinary struct { + // Labels are not serialized to msgp but are passed in. + Labels labels.Labels `msg:"-"` + LabelsNames []uint32 + LabelsValues []uint32 + TS int64 + Value float64 + Hash uint64 + Histograms Histograms +} + +type Histograms struct { + Histogram *Histogram + FloatHistogram *FloatHistogram +} + +type Histogram struct { + Count HistogramCount + Sum float64 + Schema int32 + ZeroThreshold float64 + ZeroCount HistogramZeroCount + NegativeSpans []BucketSpan + NegativeBuckets []int64 + NegativeCounts []float64 + PositiveSpans []BucketSpan + PositiveBuckets []int64 + PositiveCounts []float64 + ResetHint int32 + TimestampMillisecond int64 +} + +type FloatHistogram struct { + Count HistogramCount + Sum float64 + Schema int32 + ZeroThreshold float64 + ZeroCount HistogramZeroCount + NegativeSpans []BucketSpan + NegativeDeltas []int64 + NegativeCounts []float64 + PositiveSpans []BucketSpan + PositiveDeltas []int64 + PositiveCounts []float64 + ResetHint int32 + TimestampMillisecond int64 +} + +type HistogramCount struct { + IsInt bool + IntValue uint64 + FloatValue float64 +} + +type HistogramZeroCount struct { + IsInt bool + IntValue uint64 + FloatValue float64 +} + +type BucketSpan struct { + Offset int32 + Length uint32 +} + +// IsMetadata is used because it's easier to store metadata as a set of labels. +func (ts TimeSeriesBinary) IsMetadata() bool { + return ts.Labels.Has("__alloy_metadata_type__") +} + +func (h *Histogram) ToPromHistogram() prompb.Histogram { + return prompb.Histogram{ + Count: &prompb.Histogram_CountInt{CountInt: h.Count.IntValue}, + Sum: h.Sum, + Schema: h.Schema, + ZeroThreshold: h.ZeroThreshold, + ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: h.ZeroCount.IntValue}, + NegativeSpans: ToPromBucketSpans(h.NegativeSpans), + NegativeDeltas: h.NegativeBuckets, + PositiveSpans: ToPromBucketSpans(h.PositiveSpans), + PositiveDeltas: h.PositiveBuckets, + ResetHint: prompb.Histogram_ResetHint(h.ResetHint), + Timestamp: h.TimestampMillisecond, + } +} + +func (h *FloatHistogram) ToPromFloatHistogram() prompb.Histogram { + return prompb.Histogram{ + Count: &prompb.Histogram_CountFloat{CountFloat: h.Count.FloatValue}, + Sum: h.Sum, + Schema: h.Schema, + ZeroThreshold: h.ZeroThreshold, + ZeroCount: &prompb.Histogram_ZeroCountFloat{ZeroCountFloat: h.ZeroCount.FloatValue}, + NegativeSpans: ToPromBucketSpans(h.NegativeSpans), + NegativeCounts: h.NegativeCounts, + PositiveSpans: ToPromBucketSpans(h.PositiveSpans), + PositiveCounts: h.PositiveCounts, + ResetHint: prompb.Histogram_ResetHint(h.ResetHint), + Timestamp: h.TimestampMillisecond, + } +} +func ToPromBucketSpans(bss []BucketSpan) []prompb.BucketSpan { + spans := make([]prompb.BucketSpan, len(bss)) + for i, bs := range bss { + spans[i] = bs.ToPromBucketSpan() + } + return spans +} + +func (bs *BucketSpan) ToPromBucketSpan() prompb.BucketSpan { + return prompb.BucketSpan{ + Offset: bs.Offset, + Length: bs.Length, + } +} + +func (ts *TimeSeriesBinary) FromHistogram(timestamp int64, h *histogram.Histogram) { + ts.Histograms.Histogram = &Histogram{ + Count: HistogramCount{IsInt: true, IntValue: h.Count}, + Sum: h.Sum, + Schema: h.Schema, + ZeroThreshold: h.ZeroThreshold, + ZeroCount: HistogramZeroCount{IsInt: true, IntValue: h.ZeroCount}, + NegativeSpans: FromPromSpan(h.NegativeSpans), + NegativeBuckets: h.NegativeBuckets, + PositiveSpans: FromPromSpan(h.PositiveSpans), + PositiveBuckets: h.PositiveBuckets, + ResetHint: int32(h.CounterResetHint), + TimestampMillisecond: timestamp, + } +} +func (ts *TimeSeriesBinary) FromFloatHistogram(timestamp int64, h *histogram.FloatHistogram) { + ts.Histograms.FloatHistogram = &FloatHistogram{ + Count: HistogramCount{IsInt: false, FloatValue: h.Count}, + Sum: h.Sum, + Schema: h.Schema, + ZeroThreshold: h.ZeroThreshold, + ZeroCount: HistogramZeroCount{IsInt: false, FloatValue: h.ZeroCount}, + NegativeSpans: FromPromSpan(h.NegativeSpans), + NegativeCounts: h.NegativeBuckets, + PositiveSpans: FromPromSpan(h.PositiveSpans), + PositiveCounts: h.PositiveBuckets, + ResetHint: int32(h.CounterResetHint), + TimestampMillisecond: timestamp, + } +} +func FromPromSpan(spans []histogram.Span) []BucketSpan { + bs := make([]BucketSpan, len(spans)) + for i, s := range spans { + bs[i].Offset = s.Offset + bs[i].Length = s.Length + } + return bs +} + +// FillLabelMapping is what does the conversion from labels.Labels to LabelNames and +// LabelValues while filling in the string map, that is later converted to []string. +func (ts *TimeSeriesBinary) FillLabelMapping(strMapToInt map[string]uint32) { + ts.LabelsNames = setSliceLength(ts.LabelsNames, len(ts.Labels)) + ts.LabelsValues = setSliceLength(ts.LabelsValues, len(ts.Labels)) + + // This is where we deduplicate the ts.Labels into uint32 values + // that map to a string in the strings slice via the index. + for i, v := range ts.Labels { + val, found := strMapToInt[v.Name] + if !found { + val = uint32(len(strMapToInt)) + strMapToInt[v.Name] = val + } + ts.LabelsNames[i] = val + + val, found = strMapToInt[v.Value] + if !found { + val = uint32(len(strMapToInt)) + strMapToInt[v.Value] = val + } + ts.LabelsValues[i] = val + } + +} + +func setSliceLength(lbls []uint32, length int) []uint32 { + if cap(lbls) <= length { + lbls = make([]uint32, length) + } else { + lbls = lbls[:length] + } + return lbls +} + +var tsBinaryPool = sync.Pool{ + New: func() any { + return &TimeSeriesBinary{} + }, +} + +func GetTimeSeriesBinary() *TimeSeriesBinary { + OutStandingTimeSeriesBinary.Inc() + return tsBinaryPool.Get().(*TimeSeriesBinary) +} + +var OutStandingTimeSeriesBinary = atomic.Int32{} + +func PutTimeSeriesBinarySlice(tss []*TimeSeriesBinary) { + for i := 0; i < len(tss); i++ { + PutTimeSeriesBinary(tss[i]) + } + +} + +func PutTimeSeriesBinary(ts *TimeSeriesBinary) { + OutStandingTimeSeriesBinary.Dec() + ts.LabelsNames = ts.LabelsNames[:0] + ts.LabelsValues = ts.LabelsValues[:0] + ts.Labels = nil + ts.TS = 0 + ts.Value = 0 + ts.Hash = 0 + ts.Histograms.Histogram = nil + ts.Histograms.FloatHistogram = nil + tsBinaryPool.Put(ts) +} + +// DeserializeToSeriesGroup transforms a buffer to a SeriesGroup and converts the stringmap + indexes into actual Labels. +func DeserializeToSeriesGroup(sg *SeriesGroup, buf []byte) (*SeriesGroup, []byte, error) { + buffer, err := sg.UnmarshalMsg(buf) + // Need to fill in the labels. + for _, series := range sg.Series { + if cap(series.Labels) < len(series.LabelsNames) { + series.Labels = make(labels.Labels, len(series.LabelsNames)) + } else { + series.Labels = series.Labels[:len(series.LabelsNames)] + } + // Since the LabelNames/LabelValues are indexes into the Strings slice we can access it like the below. + // 1 Label corresponds to two entries, one in LabelsNames and one in LabelsValues. + for i := range series.LabelsNames { + series.Labels[i] = labels.Label{ + Name: sg.Strings[series.LabelsNames[i]], + Value: sg.Strings[series.LabelsValues[i]], + } + } + series.LabelsNames = series.LabelsNames[:0] + series.LabelsValues = series.LabelsValues[:0] + } + for _, series := range sg.Metadata { + if cap(series.Labels) < len(series.LabelsNames) { + series.Labels = make(labels.Labels, len(series.LabelsNames)) + } else { + series.Labels = series.Labels[:len(series.LabelsNames)] + } + for i := range series.LabelsNames { + series.Labels[i] = labels.Label{ + Name: sg.Strings[series.LabelsNames[i]], + Value: sg.Strings[series.LabelsValues[i]], + } + } + // Finally ensure we reset the labelnames and labelvalues. + series.LabelsNames = series.LabelsNames[:0] + series.LabelsValues = series.LabelsValues[:0] + } + + sg.Strings = sg.Strings[:0] + return sg, buffer, err +} diff --git a/internal/component/prometheus/remote/queue/types/serialization_gen.go b/internal/component/prometheus/remote/queue/types/serialization_gen.go new file mode 100644 index 0000000000..c31dd8d6a4 --- /dev/null +++ b/internal/component/prometheus/remote/queue/types/serialization_gen.go @@ -0,0 +1,3294 @@ +package types + +// Code generated by github.com/tinylib/msgp DO NOT EDIT. + +import ( + "github.com/tinylib/msgp/msgp" +) + +// DecodeMsg implements msgp.Decodable +func (z *BucketSpan) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Offset": + z.Offset, err = dc.ReadInt32() + if err != nil { + err = msgp.WrapError(err, "Offset") + return + } + case "Length": + z.Length, err = dc.ReadUint32() + if err != nil { + err = msgp.WrapError(err, "Length") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z BucketSpan) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 2 + // write "Offset" + err = en.Append(0x82, 0xa6, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74) + if err != nil { + return + } + err = en.WriteInt32(z.Offset) + if err != nil { + err = msgp.WrapError(err, "Offset") + return + } + // write "Length" + err = en.Append(0xa6, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68) + if err != nil { + return + } + err = en.WriteUint32(z.Length) + if err != nil { + err = msgp.WrapError(err, "Length") + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z BucketSpan) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 2 + // string "Offset" + o = append(o, 0x82, 0xa6, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74) + o = msgp.AppendInt32(o, z.Offset) + // string "Length" + o = append(o, 0xa6, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68) + o = msgp.AppendUint32(o, z.Length) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *BucketSpan) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Offset": + z.Offset, bts, err = msgp.ReadInt32Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Offset") + return + } + case "Length": + z.Length, bts, err = msgp.ReadUint32Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Length") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z BucketSpan) Msgsize() (s int) { + s = 1 + 7 + msgp.Int32Size + 7 + msgp.Uint32Size + return +} + +// DecodeMsg implements msgp.Decodable +func (z *FloatHistogram) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Count": + var zb0002 uint32 + zb0002, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err, "Count") + return + } + for zb0002 > 0 { + zb0002-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err, "Count") + return + } + switch msgp.UnsafeString(field) { + case "IsInt": + z.Count.IsInt, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "Count", "IsInt") + return + } + case "IntValue": + z.Count.IntValue, err = dc.ReadUint64() + if err != nil { + err = msgp.WrapError(err, "Count", "IntValue") + return + } + case "FloatValue": + z.Count.FloatValue, err = dc.ReadFloat64() + if err != nil { + err = msgp.WrapError(err, "Count", "FloatValue") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err, "Count") + return + } + } + } + case "Sum": + z.Sum, err = dc.ReadFloat64() + if err != nil { + err = msgp.WrapError(err, "Sum") + return + } + case "Schema": + z.Schema, err = dc.ReadInt32() + if err != nil { + err = msgp.WrapError(err, "Schema") + return + } + case "ZeroThreshold": + z.ZeroThreshold, err = dc.ReadFloat64() + if err != nil { + err = msgp.WrapError(err, "ZeroThreshold") + return + } + case "ZeroCount": + var zb0003 uint32 + zb0003, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err, "ZeroCount") + return + } + for zb0003 > 0 { + zb0003-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err, "ZeroCount") + return + } + switch msgp.UnsafeString(field) { + case "IsInt": + z.ZeroCount.IsInt, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "ZeroCount", "IsInt") + return + } + case "IntValue": + z.ZeroCount.IntValue, err = dc.ReadUint64() + if err != nil { + err = msgp.WrapError(err, "ZeroCount", "IntValue") + return + } + case "FloatValue": + z.ZeroCount.FloatValue, err = dc.ReadFloat64() + if err != nil { + err = msgp.WrapError(err, "ZeroCount", "FloatValue") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err, "ZeroCount") + return + } + } + } + case "NegativeSpans": + var zb0004 uint32 + zb0004, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "NegativeSpans") + return + } + if cap(z.NegativeSpans) >= int(zb0004) { + z.NegativeSpans = (z.NegativeSpans)[:zb0004] + } else { + z.NegativeSpans = make([]BucketSpan, zb0004) + } + for za0001 := range z.NegativeSpans { + var zb0005 uint32 + zb0005, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err, "NegativeSpans", za0001) + return + } + for zb0005 > 0 { + zb0005-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err, "NegativeSpans", za0001) + return + } + switch msgp.UnsafeString(field) { + case "Offset": + z.NegativeSpans[za0001].Offset, err = dc.ReadInt32() + if err != nil { + err = msgp.WrapError(err, "NegativeSpans", za0001, "Offset") + return + } + case "Length": + z.NegativeSpans[za0001].Length, err = dc.ReadUint32() + if err != nil { + err = msgp.WrapError(err, "NegativeSpans", za0001, "Length") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err, "NegativeSpans", za0001) + return + } + } + } + } + case "NegativeDeltas": + var zb0006 uint32 + zb0006, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "NegativeDeltas") + return + } + if cap(z.NegativeDeltas) >= int(zb0006) { + z.NegativeDeltas = (z.NegativeDeltas)[:zb0006] + } else { + z.NegativeDeltas = make([]int64, zb0006) + } + for za0002 := range z.NegativeDeltas { + z.NegativeDeltas[za0002], err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "NegativeDeltas", za0002) + return + } + } + case "NegativeCounts": + var zb0007 uint32 + zb0007, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "NegativeCounts") + return + } + if cap(z.NegativeCounts) >= int(zb0007) { + z.NegativeCounts = (z.NegativeCounts)[:zb0007] + } else { + z.NegativeCounts = make([]float64, zb0007) + } + for za0003 := range z.NegativeCounts { + z.NegativeCounts[za0003], err = dc.ReadFloat64() + if err != nil { + err = msgp.WrapError(err, "NegativeCounts", za0003) + return + } + } + case "PositiveSpans": + var zb0008 uint32 + zb0008, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "PositiveSpans") + return + } + if cap(z.PositiveSpans) >= int(zb0008) { + z.PositiveSpans = (z.PositiveSpans)[:zb0008] + } else { + z.PositiveSpans = make([]BucketSpan, zb0008) + } + for za0004 := range z.PositiveSpans { + var zb0009 uint32 + zb0009, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err, "PositiveSpans", za0004) + return + } + for zb0009 > 0 { + zb0009-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err, "PositiveSpans", za0004) + return + } + switch msgp.UnsafeString(field) { + case "Offset": + z.PositiveSpans[za0004].Offset, err = dc.ReadInt32() + if err != nil { + err = msgp.WrapError(err, "PositiveSpans", za0004, "Offset") + return + } + case "Length": + z.PositiveSpans[za0004].Length, err = dc.ReadUint32() + if err != nil { + err = msgp.WrapError(err, "PositiveSpans", za0004, "Length") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err, "PositiveSpans", za0004) + return + } + } + } + } + case "PositiveDeltas": + var zb0010 uint32 + zb0010, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "PositiveDeltas") + return + } + if cap(z.PositiveDeltas) >= int(zb0010) { + z.PositiveDeltas = (z.PositiveDeltas)[:zb0010] + } else { + z.PositiveDeltas = make([]int64, zb0010) + } + for za0005 := range z.PositiveDeltas { + z.PositiveDeltas[za0005], err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "PositiveDeltas", za0005) + return + } + } + case "PositiveCounts": + var zb0011 uint32 + zb0011, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "PositiveCounts") + return + } + if cap(z.PositiveCounts) >= int(zb0011) { + z.PositiveCounts = (z.PositiveCounts)[:zb0011] + } else { + z.PositiveCounts = make([]float64, zb0011) + } + for za0006 := range z.PositiveCounts { + z.PositiveCounts[za0006], err = dc.ReadFloat64() + if err != nil { + err = msgp.WrapError(err, "PositiveCounts", za0006) + return + } + } + case "ResetHint": + z.ResetHint, err = dc.ReadInt32() + if err != nil { + err = msgp.WrapError(err, "ResetHint") + return + } + case "TimestampMillisecond": + z.TimestampMillisecond, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "TimestampMillisecond") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *FloatHistogram) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 13 + // write "Count" + err = en.Append(0x8d, 0xa5, 0x43, 0x6f, 0x75, 0x6e, 0x74) + if err != nil { + return + } + // map header, size 3 + // write "IsInt" + err = en.Append(0x83, 0xa5, 0x49, 0x73, 0x49, 0x6e, 0x74) + if err != nil { + return + } + err = en.WriteBool(z.Count.IsInt) + if err != nil { + err = msgp.WrapError(err, "Count", "IsInt") + return + } + // write "IntValue" + err = en.Append(0xa8, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65) + if err != nil { + return + } + err = en.WriteUint64(z.Count.IntValue) + if err != nil { + err = msgp.WrapError(err, "Count", "IntValue") + return + } + // write "FloatValue" + err = en.Append(0xaa, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65) + if err != nil { + return + } + err = en.WriteFloat64(z.Count.FloatValue) + if err != nil { + err = msgp.WrapError(err, "Count", "FloatValue") + return + } + // write "Sum" + err = en.Append(0xa3, 0x53, 0x75, 0x6d) + if err != nil { + return + } + err = en.WriteFloat64(z.Sum) + if err != nil { + err = msgp.WrapError(err, "Sum") + return + } + // write "Schema" + err = en.Append(0xa6, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61) + if err != nil { + return + } + err = en.WriteInt32(z.Schema) + if err != nil { + err = msgp.WrapError(err, "Schema") + return + } + // write "ZeroThreshold" + err = en.Append(0xad, 0x5a, 0x65, 0x72, 0x6f, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64) + if err != nil { + return + } + err = en.WriteFloat64(z.ZeroThreshold) + if err != nil { + err = msgp.WrapError(err, "ZeroThreshold") + return + } + // write "ZeroCount" + err = en.Append(0xa9, 0x5a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74) + if err != nil { + return + } + // map header, size 3 + // write "IsInt" + err = en.Append(0x83, 0xa5, 0x49, 0x73, 0x49, 0x6e, 0x74) + if err != nil { + return + } + err = en.WriteBool(z.ZeroCount.IsInt) + if err != nil { + err = msgp.WrapError(err, "ZeroCount", "IsInt") + return + } + // write "IntValue" + err = en.Append(0xa8, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65) + if err != nil { + return + } + err = en.WriteUint64(z.ZeroCount.IntValue) + if err != nil { + err = msgp.WrapError(err, "ZeroCount", "IntValue") + return + } + // write "FloatValue" + err = en.Append(0xaa, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65) + if err != nil { + return + } + err = en.WriteFloat64(z.ZeroCount.FloatValue) + if err != nil { + err = msgp.WrapError(err, "ZeroCount", "FloatValue") + return + } + // write "NegativeSpans" + err = en.Append(0xad, 0x4e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.NegativeSpans))) + if err != nil { + err = msgp.WrapError(err, "NegativeSpans") + return + } + for za0001 := range z.NegativeSpans { + // map header, size 2 + // write "Offset" + err = en.Append(0x82, 0xa6, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74) + if err != nil { + return + } + err = en.WriteInt32(z.NegativeSpans[za0001].Offset) + if err != nil { + err = msgp.WrapError(err, "NegativeSpans", za0001, "Offset") + return + } + // write "Length" + err = en.Append(0xa6, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68) + if err != nil { + return + } + err = en.WriteUint32(z.NegativeSpans[za0001].Length) + if err != nil { + err = msgp.WrapError(err, "NegativeSpans", za0001, "Length") + return + } + } + // write "NegativeDeltas" + err = en.Append(0xae, 0x4e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x73) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.NegativeDeltas))) + if err != nil { + err = msgp.WrapError(err, "NegativeDeltas") + return + } + for za0002 := range z.NegativeDeltas { + err = en.WriteInt64(z.NegativeDeltas[za0002]) + if err != nil { + err = msgp.WrapError(err, "NegativeDeltas", za0002) + return + } + } + // write "NegativeCounts" + err = en.Append(0xae, 0x4e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.NegativeCounts))) + if err != nil { + err = msgp.WrapError(err, "NegativeCounts") + return + } + for za0003 := range z.NegativeCounts { + err = en.WriteFloat64(z.NegativeCounts[za0003]) + if err != nil { + err = msgp.WrapError(err, "NegativeCounts", za0003) + return + } + } + // write "PositiveSpans" + err = en.Append(0xad, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.PositiveSpans))) + if err != nil { + err = msgp.WrapError(err, "PositiveSpans") + return + } + for za0004 := range z.PositiveSpans { + // map header, size 2 + // write "Offset" + err = en.Append(0x82, 0xa6, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74) + if err != nil { + return + } + err = en.WriteInt32(z.PositiveSpans[za0004].Offset) + if err != nil { + err = msgp.WrapError(err, "PositiveSpans", za0004, "Offset") + return + } + // write "Length" + err = en.Append(0xa6, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68) + if err != nil { + return + } + err = en.WriteUint32(z.PositiveSpans[za0004].Length) + if err != nil { + err = msgp.WrapError(err, "PositiveSpans", za0004, "Length") + return + } + } + // write "PositiveDeltas" + err = en.Append(0xae, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x73) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.PositiveDeltas))) + if err != nil { + err = msgp.WrapError(err, "PositiveDeltas") + return + } + for za0005 := range z.PositiveDeltas { + err = en.WriteInt64(z.PositiveDeltas[za0005]) + if err != nil { + err = msgp.WrapError(err, "PositiveDeltas", za0005) + return + } + } + // write "PositiveCounts" + err = en.Append(0xae, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.PositiveCounts))) + if err != nil { + err = msgp.WrapError(err, "PositiveCounts") + return + } + for za0006 := range z.PositiveCounts { + err = en.WriteFloat64(z.PositiveCounts[za0006]) + if err != nil { + err = msgp.WrapError(err, "PositiveCounts", za0006) + return + } + } + // write "ResetHint" + err = en.Append(0xa9, 0x52, 0x65, 0x73, 0x65, 0x74, 0x48, 0x69, 0x6e, 0x74) + if err != nil { + return + } + err = en.WriteInt32(z.ResetHint) + if err != nil { + err = msgp.WrapError(err, "ResetHint") + return + } + // write "TimestampMillisecond" + err = en.Append(0xb4, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x69, 0x6c, 0x6c, 0x69, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64) + if err != nil { + return + } + err = en.WriteInt64(z.TimestampMillisecond) + if err != nil { + err = msgp.WrapError(err, "TimestampMillisecond") + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *FloatHistogram) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 13 + // string "Count" + o = append(o, 0x8d, 0xa5, 0x43, 0x6f, 0x75, 0x6e, 0x74) + // map header, size 3 + // string "IsInt" + o = append(o, 0x83, 0xa5, 0x49, 0x73, 0x49, 0x6e, 0x74) + o = msgp.AppendBool(o, z.Count.IsInt) + // string "IntValue" + o = append(o, 0xa8, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65) + o = msgp.AppendUint64(o, z.Count.IntValue) + // string "FloatValue" + o = append(o, 0xaa, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65) + o = msgp.AppendFloat64(o, z.Count.FloatValue) + // string "Sum" + o = append(o, 0xa3, 0x53, 0x75, 0x6d) + o = msgp.AppendFloat64(o, z.Sum) + // string "Schema" + o = append(o, 0xa6, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61) + o = msgp.AppendInt32(o, z.Schema) + // string "ZeroThreshold" + o = append(o, 0xad, 0x5a, 0x65, 0x72, 0x6f, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64) + o = msgp.AppendFloat64(o, z.ZeroThreshold) + // string "ZeroCount" + o = append(o, 0xa9, 0x5a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74) + // map header, size 3 + // string "IsInt" + o = append(o, 0x83, 0xa5, 0x49, 0x73, 0x49, 0x6e, 0x74) + o = msgp.AppendBool(o, z.ZeroCount.IsInt) + // string "IntValue" + o = append(o, 0xa8, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65) + o = msgp.AppendUint64(o, z.ZeroCount.IntValue) + // string "FloatValue" + o = append(o, 0xaa, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65) + o = msgp.AppendFloat64(o, z.ZeroCount.FloatValue) + // string "NegativeSpans" + o = append(o, 0xad, 0x4e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.NegativeSpans))) + for za0001 := range z.NegativeSpans { + // map header, size 2 + // string "Offset" + o = append(o, 0x82, 0xa6, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74) + o = msgp.AppendInt32(o, z.NegativeSpans[za0001].Offset) + // string "Length" + o = append(o, 0xa6, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68) + o = msgp.AppendUint32(o, z.NegativeSpans[za0001].Length) + } + // string "NegativeDeltas" + o = append(o, 0xae, 0x4e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.NegativeDeltas))) + for za0002 := range z.NegativeDeltas { + o = msgp.AppendInt64(o, z.NegativeDeltas[za0002]) + } + // string "NegativeCounts" + o = append(o, 0xae, 0x4e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.NegativeCounts))) + for za0003 := range z.NegativeCounts { + o = msgp.AppendFloat64(o, z.NegativeCounts[za0003]) + } + // string "PositiveSpans" + o = append(o, 0xad, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.PositiveSpans))) + for za0004 := range z.PositiveSpans { + // map header, size 2 + // string "Offset" + o = append(o, 0x82, 0xa6, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74) + o = msgp.AppendInt32(o, z.PositiveSpans[za0004].Offset) + // string "Length" + o = append(o, 0xa6, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68) + o = msgp.AppendUint32(o, z.PositiveSpans[za0004].Length) + } + // string "PositiveDeltas" + o = append(o, 0xae, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.PositiveDeltas))) + for za0005 := range z.PositiveDeltas { + o = msgp.AppendInt64(o, z.PositiveDeltas[za0005]) + } + // string "PositiveCounts" + o = append(o, 0xae, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.PositiveCounts))) + for za0006 := range z.PositiveCounts { + o = msgp.AppendFloat64(o, z.PositiveCounts[za0006]) + } + // string "ResetHint" + o = append(o, 0xa9, 0x52, 0x65, 0x73, 0x65, 0x74, 0x48, 0x69, 0x6e, 0x74) + o = msgp.AppendInt32(o, z.ResetHint) + // string "TimestampMillisecond" + o = append(o, 0xb4, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x69, 0x6c, 0x6c, 0x69, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64) + o = msgp.AppendInt64(o, z.TimestampMillisecond) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *FloatHistogram) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Count": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Count") + return + } + for zb0002 > 0 { + zb0002-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err, "Count") + return + } + switch msgp.UnsafeString(field) { + case "IsInt": + z.Count.IsInt, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Count", "IsInt") + return + } + case "IntValue": + z.Count.IntValue, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Count", "IntValue") + return + } + case "FloatValue": + z.Count.FloatValue, bts, err = msgp.ReadFloat64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Count", "FloatValue") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err, "Count") + return + } + } + } + case "Sum": + z.Sum, bts, err = msgp.ReadFloat64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Sum") + return + } + case "Schema": + z.Schema, bts, err = msgp.ReadInt32Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Schema") + return + } + case "ZeroThreshold": + z.ZeroThreshold, bts, err = msgp.ReadFloat64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "ZeroThreshold") + return + } + case "ZeroCount": + var zb0003 uint32 + zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ZeroCount") + return + } + for zb0003 > 0 { + zb0003-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err, "ZeroCount") + return + } + switch msgp.UnsafeString(field) { + case "IsInt": + z.ZeroCount.IsInt, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ZeroCount", "IsInt") + return + } + case "IntValue": + z.ZeroCount.IntValue, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "ZeroCount", "IntValue") + return + } + case "FloatValue": + z.ZeroCount.FloatValue, bts, err = msgp.ReadFloat64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "ZeroCount", "FloatValue") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err, "ZeroCount") + return + } + } + } + case "NegativeSpans": + var zb0004 uint32 + zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "NegativeSpans") + return + } + if cap(z.NegativeSpans) >= int(zb0004) { + z.NegativeSpans = (z.NegativeSpans)[:zb0004] + } else { + z.NegativeSpans = make([]BucketSpan, zb0004) + } + for za0001 := range z.NegativeSpans { + var zb0005 uint32 + zb0005, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "NegativeSpans", za0001) + return + } + for zb0005 > 0 { + zb0005-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err, "NegativeSpans", za0001) + return + } + switch msgp.UnsafeString(field) { + case "Offset": + z.NegativeSpans[za0001].Offset, bts, err = msgp.ReadInt32Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "NegativeSpans", za0001, "Offset") + return + } + case "Length": + z.NegativeSpans[za0001].Length, bts, err = msgp.ReadUint32Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "NegativeSpans", za0001, "Length") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err, "NegativeSpans", za0001) + return + } + } + } + } + case "NegativeDeltas": + var zb0006 uint32 + zb0006, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "NegativeDeltas") + return + } + if cap(z.NegativeDeltas) >= int(zb0006) { + z.NegativeDeltas = (z.NegativeDeltas)[:zb0006] + } else { + z.NegativeDeltas = make([]int64, zb0006) + } + for za0002 := range z.NegativeDeltas { + z.NegativeDeltas[za0002], bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "NegativeDeltas", za0002) + return + } + } + case "NegativeCounts": + var zb0007 uint32 + zb0007, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "NegativeCounts") + return + } + if cap(z.NegativeCounts) >= int(zb0007) { + z.NegativeCounts = (z.NegativeCounts)[:zb0007] + } else { + z.NegativeCounts = make([]float64, zb0007) + } + for za0003 := range z.NegativeCounts { + z.NegativeCounts[za0003], bts, err = msgp.ReadFloat64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "NegativeCounts", za0003) + return + } + } + case "PositiveSpans": + var zb0008 uint32 + zb0008, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "PositiveSpans") + return + } + if cap(z.PositiveSpans) >= int(zb0008) { + z.PositiveSpans = (z.PositiveSpans)[:zb0008] + } else { + z.PositiveSpans = make([]BucketSpan, zb0008) + } + for za0004 := range z.PositiveSpans { + var zb0009 uint32 + zb0009, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "PositiveSpans", za0004) + return + } + for zb0009 > 0 { + zb0009-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err, "PositiveSpans", za0004) + return + } + switch msgp.UnsafeString(field) { + case "Offset": + z.PositiveSpans[za0004].Offset, bts, err = msgp.ReadInt32Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "PositiveSpans", za0004, "Offset") + return + } + case "Length": + z.PositiveSpans[za0004].Length, bts, err = msgp.ReadUint32Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "PositiveSpans", za0004, "Length") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err, "PositiveSpans", za0004) + return + } + } + } + } + case "PositiveDeltas": + var zb0010 uint32 + zb0010, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "PositiveDeltas") + return + } + if cap(z.PositiveDeltas) >= int(zb0010) { + z.PositiveDeltas = (z.PositiveDeltas)[:zb0010] + } else { + z.PositiveDeltas = make([]int64, zb0010) + } + for za0005 := range z.PositiveDeltas { + z.PositiveDeltas[za0005], bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "PositiveDeltas", za0005) + return + } + } + case "PositiveCounts": + var zb0011 uint32 + zb0011, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "PositiveCounts") + return + } + if cap(z.PositiveCounts) >= int(zb0011) { + z.PositiveCounts = (z.PositiveCounts)[:zb0011] + } else { + z.PositiveCounts = make([]float64, zb0011) + } + for za0006 := range z.PositiveCounts { + z.PositiveCounts[za0006], bts, err = msgp.ReadFloat64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "PositiveCounts", za0006) + return + } + } + case "ResetHint": + z.ResetHint, bts, err = msgp.ReadInt32Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "ResetHint") + return + } + case "TimestampMillisecond": + z.TimestampMillisecond, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "TimestampMillisecond") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *FloatHistogram) Msgsize() (s int) { + s = 1 + 6 + 1 + 6 + msgp.BoolSize + 9 + msgp.Uint64Size + 11 + msgp.Float64Size + 4 + msgp.Float64Size + 7 + msgp.Int32Size + 14 + msgp.Float64Size + 10 + 1 + 6 + msgp.BoolSize + 9 + msgp.Uint64Size + 11 + msgp.Float64Size + 14 + msgp.ArrayHeaderSize + (len(z.NegativeSpans) * (15 + msgp.Int32Size + msgp.Uint32Size)) + 15 + msgp.ArrayHeaderSize + (len(z.NegativeDeltas) * (msgp.Int64Size)) + 15 + msgp.ArrayHeaderSize + (len(z.NegativeCounts) * (msgp.Float64Size)) + 14 + msgp.ArrayHeaderSize + (len(z.PositiveSpans) * (15 + msgp.Int32Size + msgp.Uint32Size)) + 15 + msgp.ArrayHeaderSize + (len(z.PositiveDeltas) * (msgp.Int64Size)) + 15 + msgp.ArrayHeaderSize + (len(z.PositiveCounts) * (msgp.Float64Size)) + 10 + msgp.Int32Size + 21 + msgp.Int64Size + return +} + +// DecodeMsg implements msgp.Decodable +func (z *Histogram) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Count": + var zb0002 uint32 + zb0002, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err, "Count") + return + } + for zb0002 > 0 { + zb0002-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err, "Count") + return + } + switch msgp.UnsafeString(field) { + case "IsInt": + z.Count.IsInt, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "Count", "IsInt") + return + } + case "IntValue": + z.Count.IntValue, err = dc.ReadUint64() + if err != nil { + err = msgp.WrapError(err, "Count", "IntValue") + return + } + case "FloatValue": + z.Count.FloatValue, err = dc.ReadFloat64() + if err != nil { + err = msgp.WrapError(err, "Count", "FloatValue") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err, "Count") + return + } + } + } + case "Sum": + z.Sum, err = dc.ReadFloat64() + if err != nil { + err = msgp.WrapError(err, "Sum") + return + } + case "Schema": + z.Schema, err = dc.ReadInt32() + if err != nil { + err = msgp.WrapError(err, "Schema") + return + } + case "ZeroThreshold": + z.ZeroThreshold, err = dc.ReadFloat64() + if err != nil { + err = msgp.WrapError(err, "ZeroThreshold") + return + } + case "ZeroCount": + var zb0003 uint32 + zb0003, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err, "ZeroCount") + return + } + for zb0003 > 0 { + zb0003-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err, "ZeroCount") + return + } + switch msgp.UnsafeString(field) { + case "IsInt": + z.ZeroCount.IsInt, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "ZeroCount", "IsInt") + return + } + case "IntValue": + z.ZeroCount.IntValue, err = dc.ReadUint64() + if err != nil { + err = msgp.WrapError(err, "ZeroCount", "IntValue") + return + } + case "FloatValue": + z.ZeroCount.FloatValue, err = dc.ReadFloat64() + if err != nil { + err = msgp.WrapError(err, "ZeroCount", "FloatValue") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err, "ZeroCount") + return + } + } + } + case "NegativeSpans": + var zb0004 uint32 + zb0004, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "NegativeSpans") + return + } + if cap(z.NegativeSpans) >= int(zb0004) { + z.NegativeSpans = (z.NegativeSpans)[:zb0004] + } else { + z.NegativeSpans = make([]BucketSpan, zb0004) + } + for za0001 := range z.NegativeSpans { + var zb0005 uint32 + zb0005, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err, "NegativeSpans", za0001) + return + } + for zb0005 > 0 { + zb0005-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err, "NegativeSpans", za0001) + return + } + switch msgp.UnsafeString(field) { + case "Offset": + z.NegativeSpans[za0001].Offset, err = dc.ReadInt32() + if err != nil { + err = msgp.WrapError(err, "NegativeSpans", za0001, "Offset") + return + } + case "Length": + z.NegativeSpans[za0001].Length, err = dc.ReadUint32() + if err != nil { + err = msgp.WrapError(err, "NegativeSpans", za0001, "Length") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err, "NegativeSpans", za0001) + return + } + } + } + } + case "NegativeBuckets": + var zb0006 uint32 + zb0006, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "NegativeBuckets") + return + } + if cap(z.NegativeBuckets) >= int(zb0006) { + z.NegativeBuckets = (z.NegativeBuckets)[:zb0006] + } else { + z.NegativeBuckets = make([]int64, zb0006) + } + for za0002 := range z.NegativeBuckets { + z.NegativeBuckets[za0002], err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "NegativeBuckets", za0002) + return + } + } + case "NegativeCounts": + var zb0007 uint32 + zb0007, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "NegativeCounts") + return + } + if cap(z.NegativeCounts) >= int(zb0007) { + z.NegativeCounts = (z.NegativeCounts)[:zb0007] + } else { + z.NegativeCounts = make([]float64, zb0007) + } + for za0003 := range z.NegativeCounts { + z.NegativeCounts[za0003], err = dc.ReadFloat64() + if err != nil { + err = msgp.WrapError(err, "NegativeCounts", za0003) + return + } + } + case "PositiveSpans": + var zb0008 uint32 + zb0008, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "PositiveSpans") + return + } + if cap(z.PositiveSpans) >= int(zb0008) { + z.PositiveSpans = (z.PositiveSpans)[:zb0008] + } else { + z.PositiveSpans = make([]BucketSpan, zb0008) + } + for za0004 := range z.PositiveSpans { + var zb0009 uint32 + zb0009, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err, "PositiveSpans", za0004) + return + } + for zb0009 > 0 { + zb0009-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err, "PositiveSpans", za0004) + return + } + switch msgp.UnsafeString(field) { + case "Offset": + z.PositiveSpans[za0004].Offset, err = dc.ReadInt32() + if err != nil { + err = msgp.WrapError(err, "PositiveSpans", za0004, "Offset") + return + } + case "Length": + z.PositiveSpans[za0004].Length, err = dc.ReadUint32() + if err != nil { + err = msgp.WrapError(err, "PositiveSpans", za0004, "Length") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err, "PositiveSpans", za0004) + return + } + } + } + } + case "PositiveBuckets": + var zb0010 uint32 + zb0010, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "PositiveBuckets") + return + } + if cap(z.PositiveBuckets) >= int(zb0010) { + z.PositiveBuckets = (z.PositiveBuckets)[:zb0010] + } else { + z.PositiveBuckets = make([]int64, zb0010) + } + for za0005 := range z.PositiveBuckets { + z.PositiveBuckets[za0005], err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "PositiveBuckets", za0005) + return + } + } + case "PositiveCounts": + var zb0011 uint32 + zb0011, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "PositiveCounts") + return + } + if cap(z.PositiveCounts) >= int(zb0011) { + z.PositiveCounts = (z.PositiveCounts)[:zb0011] + } else { + z.PositiveCounts = make([]float64, zb0011) + } + for za0006 := range z.PositiveCounts { + z.PositiveCounts[za0006], err = dc.ReadFloat64() + if err != nil { + err = msgp.WrapError(err, "PositiveCounts", za0006) + return + } + } + case "ResetHint": + z.ResetHint, err = dc.ReadInt32() + if err != nil { + err = msgp.WrapError(err, "ResetHint") + return + } + case "TimestampMillisecond": + z.TimestampMillisecond, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "TimestampMillisecond") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *Histogram) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 13 + // write "Count" + err = en.Append(0x8d, 0xa5, 0x43, 0x6f, 0x75, 0x6e, 0x74) + if err != nil { + return + } + // map header, size 3 + // write "IsInt" + err = en.Append(0x83, 0xa5, 0x49, 0x73, 0x49, 0x6e, 0x74) + if err != nil { + return + } + err = en.WriteBool(z.Count.IsInt) + if err != nil { + err = msgp.WrapError(err, "Count", "IsInt") + return + } + // write "IntValue" + err = en.Append(0xa8, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65) + if err != nil { + return + } + err = en.WriteUint64(z.Count.IntValue) + if err != nil { + err = msgp.WrapError(err, "Count", "IntValue") + return + } + // write "FloatValue" + err = en.Append(0xaa, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65) + if err != nil { + return + } + err = en.WriteFloat64(z.Count.FloatValue) + if err != nil { + err = msgp.WrapError(err, "Count", "FloatValue") + return + } + // write "Sum" + err = en.Append(0xa3, 0x53, 0x75, 0x6d) + if err != nil { + return + } + err = en.WriteFloat64(z.Sum) + if err != nil { + err = msgp.WrapError(err, "Sum") + return + } + // write "Schema" + err = en.Append(0xa6, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61) + if err != nil { + return + } + err = en.WriteInt32(z.Schema) + if err != nil { + err = msgp.WrapError(err, "Schema") + return + } + // write "ZeroThreshold" + err = en.Append(0xad, 0x5a, 0x65, 0x72, 0x6f, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64) + if err != nil { + return + } + err = en.WriteFloat64(z.ZeroThreshold) + if err != nil { + err = msgp.WrapError(err, "ZeroThreshold") + return + } + // write "ZeroCount" + err = en.Append(0xa9, 0x5a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74) + if err != nil { + return + } + // map header, size 3 + // write "IsInt" + err = en.Append(0x83, 0xa5, 0x49, 0x73, 0x49, 0x6e, 0x74) + if err != nil { + return + } + err = en.WriteBool(z.ZeroCount.IsInt) + if err != nil { + err = msgp.WrapError(err, "ZeroCount", "IsInt") + return + } + // write "IntValue" + err = en.Append(0xa8, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65) + if err != nil { + return + } + err = en.WriteUint64(z.ZeroCount.IntValue) + if err != nil { + err = msgp.WrapError(err, "ZeroCount", "IntValue") + return + } + // write "FloatValue" + err = en.Append(0xaa, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65) + if err != nil { + return + } + err = en.WriteFloat64(z.ZeroCount.FloatValue) + if err != nil { + err = msgp.WrapError(err, "ZeroCount", "FloatValue") + return + } + // write "NegativeSpans" + err = en.Append(0xad, 0x4e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.NegativeSpans))) + if err != nil { + err = msgp.WrapError(err, "NegativeSpans") + return + } + for za0001 := range z.NegativeSpans { + // map header, size 2 + // write "Offset" + err = en.Append(0x82, 0xa6, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74) + if err != nil { + return + } + err = en.WriteInt32(z.NegativeSpans[za0001].Offset) + if err != nil { + err = msgp.WrapError(err, "NegativeSpans", za0001, "Offset") + return + } + // write "Length" + err = en.Append(0xa6, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68) + if err != nil { + return + } + err = en.WriteUint32(z.NegativeSpans[za0001].Length) + if err != nil { + err = msgp.WrapError(err, "NegativeSpans", za0001, "Length") + return + } + } + // write "NegativeBuckets" + err = en.Append(0xaf, 0x4e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.NegativeBuckets))) + if err != nil { + err = msgp.WrapError(err, "NegativeBuckets") + return + } + for za0002 := range z.NegativeBuckets { + err = en.WriteInt64(z.NegativeBuckets[za0002]) + if err != nil { + err = msgp.WrapError(err, "NegativeBuckets", za0002) + return + } + } + // write "NegativeCounts" + err = en.Append(0xae, 0x4e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.NegativeCounts))) + if err != nil { + err = msgp.WrapError(err, "NegativeCounts") + return + } + for za0003 := range z.NegativeCounts { + err = en.WriteFloat64(z.NegativeCounts[za0003]) + if err != nil { + err = msgp.WrapError(err, "NegativeCounts", za0003) + return + } + } + // write "PositiveSpans" + err = en.Append(0xad, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.PositiveSpans))) + if err != nil { + err = msgp.WrapError(err, "PositiveSpans") + return + } + for za0004 := range z.PositiveSpans { + // map header, size 2 + // write "Offset" + err = en.Append(0x82, 0xa6, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74) + if err != nil { + return + } + err = en.WriteInt32(z.PositiveSpans[za0004].Offset) + if err != nil { + err = msgp.WrapError(err, "PositiveSpans", za0004, "Offset") + return + } + // write "Length" + err = en.Append(0xa6, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68) + if err != nil { + return + } + err = en.WriteUint32(z.PositiveSpans[za0004].Length) + if err != nil { + err = msgp.WrapError(err, "PositiveSpans", za0004, "Length") + return + } + } + // write "PositiveBuckets" + err = en.Append(0xaf, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.PositiveBuckets))) + if err != nil { + err = msgp.WrapError(err, "PositiveBuckets") + return + } + for za0005 := range z.PositiveBuckets { + err = en.WriteInt64(z.PositiveBuckets[za0005]) + if err != nil { + err = msgp.WrapError(err, "PositiveBuckets", za0005) + return + } + } + // write "PositiveCounts" + err = en.Append(0xae, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.PositiveCounts))) + if err != nil { + err = msgp.WrapError(err, "PositiveCounts") + return + } + for za0006 := range z.PositiveCounts { + err = en.WriteFloat64(z.PositiveCounts[za0006]) + if err != nil { + err = msgp.WrapError(err, "PositiveCounts", za0006) + return + } + } + // write "ResetHint" + err = en.Append(0xa9, 0x52, 0x65, 0x73, 0x65, 0x74, 0x48, 0x69, 0x6e, 0x74) + if err != nil { + return + } + err = en.WriteInt32(z.ResetHint) + if err != nil { + err = msgp.WrapError(err, "ResetHint") + return + } + // write "TimestampMillisecond" + err = en.Append(0xb4, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x69, 0x6c, 0x6c, 0x69, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64) + if err != nil { + return + } + err = en.WriteInt64(z.TimestampMillisecond) + if err != nil { + err = msgp.WrapError(err, "TimestampMillisecond") + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *Histogram) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 13 + // string "Count" + o = append(o, 0x8d, 0xa5, 0x43, 0x6f, 0x75, 0x6e, 0x74) + // map header, size 3 + // string "IsInt" + o = append(o, 0x83, 0xa5, 0x49, 0x73, 0x49, 0x6e, 0x74) + o = msgp.AppendBool(o, z.Count.IsInt) + // string "IntValue" + o = append(o, 0xa8, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65) + o = msgp.AppendUint64(o, z.Count.IntValue) + // string "FloatValue" + o = append(o, 0xaa, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65) + o = msgp.AppendFloat64(o, z.Count.FloatValue) + // string "Sum" + o = append(o, 0xa3, 0x53, 0x75, 0x6d) + o = msgp.AppendFloat64(o, z.Sum) + // string "Schema" + o = append(o, 0xa6, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61) + o = msgp.AppendInt32(o, z.Schema) + // string "ZeroThreshold" + o = append(o, 0xad, 0x5a, 0x65, 0x72, 0x6f, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64) + o = msgp.AppendFloat64(o, z.ZeroThreshold) + // string "ZeroCount" + o = append(o, 0xa9, 0x5a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74) + // map header, size 3 + // string "IsInt" + o = append(o, 0x83, 0xa5, 0x49, 0x73, 0x49, 0x6e, 0x74) + o = msgp.AppendBool(o, z.ZeroCount.IsInt) + // string "IntValue" + o = append(o, 0xa8, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65) + o = msgp.AppendUint64(o, z.ZeroCount.IntValue) + // string "FloatValue" + o = append(o, 0xaa, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65) + o = msgp.AppendFloat64(o, z.ZeroCount.FloatValue) + // string "NegativeSpans" + o = append(o, 0xad, 0x4e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.NegativeSpans))) + for za0001 := range z.NegativeSpans { + // map header, size 2 + // string "Offset" + o = append(o, 0x82, 0xa6, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74) + o = msgp.AppendInt32(o, z.NegativeSpans[za0001].Offset) + // string "Length" + o = append(o, 0xa6, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68) + o = msgp.AppendUint32(o, z.NegativeSpans[za0001].Length) + } + // string "NegativeBuckets" + o = append(o, 0xaf, 0x4e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.NegativeBuckets))) + for za0002 := range z.NegativeBuckets { + o = msgp.AppendInt64(o, z.NegativeBuckets[za0002]) + } + // string "NegativeCounts" + o = append(o, 0xae, 0x4e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.NegativeCounts))) + for za0003 := range z.NegativeCounts { + o = msgp.AppendFloat64(o, z.NegativeCounts[za0003]) + } + // string "PositiveSpans" + o = append(o, 0xad, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.PositiveSpans))) + for za0004 := range z.PositiveSpans { + // map header, size 2 + // string "Offset" + o = append(o, 0x82, 0xa6, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74) + o = msgp.AppendInt32(o, z.PositiveSpans[za0004].Offset) + // string "Length" + o = append(o, 0xa6, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68) + o = msgp.AppendUint32(o, z.PositiveSpans[za0004].Length) + } + // string "PositiveBuckets" + o = append(o, 0xaf, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.PositiveBuckets))) + for za0005 := range z.PositiveBuckets { + o = msgp.AppendInt64(o, z.PositiveBuckets[za0005]) + } + // string "PositiveCounts" + o = append(o, 0xae, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.PositiveCounts))) + for za0006 := range z.PositiveCounts { + o = msgp.AppendFloat64(o, z.PositiveCounts[za0006]) + } + // string "ResetHint" + o = append(o, 0xa9, 0x52, 0x65, 0x73, 0x65, 0x74, 0x48, 0x69, 0x6e, 0x74) + o = msgp.AppendInt32(o, z.ResetHint) + // string "TimestampMillisecond" + o = append(o, 0xb4, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x69, 0x6c, 0x6c, 0x69, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64) + o = msgp.AppendInt64(o, z.TimestampMillisecond) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *Histogram) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Count": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Count") + return + } + for zb0002 > 0 { + zb0002-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err, "Count") + return + } + switch msgp.UnsafeString(field) { + case "IsInt": + z.Count.IsInt, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Count", "IsInt") + return + } + case "IntValue": + z.Count.IntValue, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Count", "IntValue") + return + } + case "FloatValue": + z.Count.FloatValue, bts, err = msgp.ReadFloat64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Count", "FloatValue") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err, "Count") + return + } + } + } + case "Sum": + z.Sum, bts, err = msgp.ReadFloat64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Sum") + return + } + case "Schema": + z.Schema, bts, err = msgp.ReadInt32Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Schema") + return + } + case "ZeroThreshold": + z.ZeroThreshold, bts, err = msgp.ReadFloat64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "ZeroThreshold") + return + } + case "ZeroCount": + var zb0003 uint32 + zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ZeroCount") + return + } + for zb0003 > 0 { + zb0003-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err, "ZeroCount") + return + } + switch msgp.UnsafeString(field) { + case "IsInt": + z.ZeroCount.IsInt, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ZeroCount", "IsInt") + return + } + case "IntValue": + z.ZeroCount.IntValue, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "ZeroCount", "IntValue") + return + } + case "FloatValue": + z.ZeroCount.FloatValue, bts, err = msgp.ReadFloat64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "ZeroCount", "FloatValue") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err, "ZeroCount") + return + } + } + } + case "NegativeSpans": + var zb0004 uint32 + zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "NegativeSpans") + return + } + if cap(z.NegativeSpans) >= int(zb0004) { + z.NegativeSpans = (z.NegativeSpans)[:zb0004] + } else { + z.NegativeSpans = make([]BucketSpan, zb0004) + } + for za0001 := range z.NegativeSpans { + var zb0005 uint32 + zb0005, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "NegativeSpans", za0001) + return + } + for zb0005 > 0 { + zb0005-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err, "NegativeSpans", za0001) + return + } + switch msgp.UnsafeString(field) { + case "Offset": + z.NegativeSpans[za0001].Offset, bts, err = msgp.ReadInt32Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "NegativeSpans", za0001, "Offset") + return + } + case "Length": + z.NegativeSpans[za0001].Length, bts, err = msgp.ReadUint32Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "NegativeSpans", za0001, "Length") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err, "NegativeSpans", za0001) + return + } + } + } + } + case "NegativeBuckets": + var zb0006 uint32 + zb0006, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "NegativeBuckets") + return + } + if cap(z.NegativeBuckets) >= int(zb0006) { + z.NegativeBuckets = (z.NegativeBuckets)[:zb0006] + } else { + z.NegativeBuckets = make([]int64, zb0006) + } + for za0002 := range z.NegativeBuckets { + z.NegativeBuckets[za0002], bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "NegativeBuckets", za0002) + return + } + } + case "NegativeCounts": + var zb0007 uint32 + zb0007, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "NegativeCounts") + return + } + if cap(z.NegativeCounts) >= int(zb0007) { + z.NegativeCounts = (z.NegativeCounts)[:zb0007] + } else { + z.NegativeCounts = make([]float64, zb0007) + } + for za0003 := range z.NegativeCounts { + z.NegativeCounts[za0003], bts, err = msgp.ReadFloat64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "NegativeCounts", za0003) + return + } + } + case "PositiveSpans": + var zb0008 uint32 + zb0008, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "PositiveSpans") + return + } + if cap(z.PositiveSpans) >= int(zb0008) { + z.PositiveSpans = (z.PositiveSpans)[:zb0008] + } else { + z.PositiveSpans = make([]BucketSpan, zb0008) + } + for za0004 := range z.PositiveSpans { + var zb0009 uint32 + zb0009, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "PositiveSpans", za0004) + return + } + for zb0009 > 0 { + zb0009-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err, "PositiveSpans", za0004) + return + } + switch msgp.UnsafeString(field) { + case "Offset": + z.PositiveSpans[za0004].Offset, bts, err = msgp.ReadInt32Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "PositiveSpans", za0004, "Offset") + return + } + case "Length": + z.PositiveSpans[za0004].Length, bts, err = msgp.ReadUint32Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "PositiveSpans", za0004, "Length") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err, "PositiveSpans", za0004) + return + } + } + } + } + case "PositiveBuckets": + var zb0010 uint32 + zb0010, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "PositiveBuckets") + return + } + if cap(z.PositiveBuckets) >= int(zb0010) { + z.PositiveBuckets = (z.PositiveBuckets)[:zb0010] + } else { + z.PositiveBuckets = make([]int64, zb0010) + } + for za0005 := range z.PositiveBuckets { + z.PositiveBuckets[za0005], bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "PositiveBuckets", za0005) + return + } + } + case "PositiveCounts": + var zb0011 uint32 + zb0011, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "PositiveCounts") + return + } + if cap(z.PositiveCounts) >= int(zb0011) { + z.PositiveCounts = (z.PositiveCounts)[:zb0011] + } else { + z.PositiveCounts = make([]float64, zb0011) + } + for za0006 := range z.PositiveCounts { + z.PositiveCounts[za0006], bts, err = msgp.ReadFloat64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "PositiveCounts", za0006) + return + } + } + case "ResetHint": + z.ResetHint, bts, err = msgp.ReadInt32Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "ResetHint") + return + } + case "TimestampMillisecond": + z.TimestampMillisecond, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "TimestampMillisecond") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *Histogram) Msgsize() (s int) { + s = 1 + 6 + 1 + 6 + msgp.BoolSize + 9 + msgp.Uint64Size + 11 + msgp.Float64Size + 4 + msgp.Float64Size + 7 + msgp.Int32Size + 14 + msgp.Float64Size + 10 + 1 + 6 + msgp.BoolSize + 9 + msgp.Uint64Size + 11 + msgp.Float64Size + 14 + msgp.ArrayHeaderSize + (len(z.NegativeSpans) * (15 + msgp.Int32Size + msgp.Uint32Size)) + 16 + msgp.ArrayHeaderSize + (len(z.NegativeBuckets) * (msgp.Int64Size)) + 15 + msgp.ArrayHeaderSize + (len(z.NegativeCounts) * (msgp.Float64Size)) + 14 + msgp.ArrayHeaderSize + (len(z.PositiveSpans) * (15 + msgp.Int32Size + msgp.Uint32Size)) + 16 + msgp.ArrayHeaderSize + (len(z.PositiveBuckets) * (msgp.Int64Size)) + 15 + msgp.ArrayHeaderSize + (len(z.PositiveCounts) * (msgp.Float64Size)) + 10 + msgp.Int32Size + 21 + msgp.Int64Size + return +} + +// DecodeMsg implements msgp.Decodable +func (z *HistogramCount) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "IsInt": + z.IsInt, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "IsInt") + return + } + case "IntValue": + z.IntValue, err = dc.ReadUint64() + if err != nil { + err = msgp.WrapError(err, "IntValue") + return + } + case "FloatValue": + z.FloatValue, err = dc.ReadFloat64() + if err != nil { + err = msgp.WrapError(err, "FloatValue") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z HistogramCount) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 3 + // write "IsInt" + err = en.Append(0x83, 0xa5, 0x49, 0x73, 0x49, 0x6e, 0x74) + if err != nil { + return + } + err = en.WriteBool(z.IsInt) + if err != nil { + err = msgp.WrapError(err, "IsInt") + return + } + // write "IntValue" + err = en.Append(0xa8, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65) + if err != nil { + return + } + err = en.WriteUint64(z.IntValue) + if err != nil { + err = msgp.WrapError(err, "IntValue") + return + } + // write "FloatValue" + err = en.Append(0xaa, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65) + if err != nil { + return + } + err = en.WriteFloat64(z.FloatValue) + if err != nil { + err = msgp.WrapError(err, "FloatValue") + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z HistogramCount) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 3 + // string "IsInt" + o = append(o, 0x83, 0xa5, 0x49, 0x73, 0x49, 0x6e, 0x74) + o = msgp.AppendBool(o, z.IsInt) + // string "IntValue" + o = append(o, 0xa8, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65) + o = msgp.AppendUint64(o, z.IntValue) + // string "FloatValue" + o = append(o, 0xaa, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65) + o = msgp.AppendFloat64(o, z.FloatValue) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *HistogramCount) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "IsInt": + z.IsInt, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "IsInt") + return + } + case "IntValue": + z.IntValue, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "IntValue") + return + } + case "FloatValue": + z.FloatValue, bts, err = msgp.ReadFloat64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "FloatValue") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z HistogramCount) Msgsize() (s int) { + s = 1 + 6 + msgp.BoolSize + 9 + msgp.Uint64Size + 11 + msgp.Float64Size + return +} + +// DecodeMsg implements msgp.Decodable +func (z *HistogramZeroCount) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "IsInt": + z.IsInt, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "IsInt") + return + } + case "IntValue": + z.IntValue, err = dc.ReadUint64() + if err != nil { + err = msgp.WrapError(err, "IntValue") + return + } + case "FloatValue": + z.FloatValue, err = dc.ReadFloat64() + if err != nil { + err = msgp.WrapError(err, "FloatValue") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z HistogramZeroCount) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 3 + // write "IsInt" + err = en.Append(0x83, 0xa5, 0x49, 0x73, 0x49, 0x6e, 0x74) + if err != nil { + return + } + err = en.WriteBool(z.IsInt) + if err != nil { + err = msgp.WrapError(err, "IsInt") + return + } + // write "IntValue" + err = en.Append(0xa8, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65) + if err != nil { + return + } + err = en.WriteUint64(z.IntValue) + if err != nil { + err = msgp.WrapError(err, "IntValue") + return + } + // write "FloatValue" + err = en.Append(0xaa, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65) + if err != nil { + return + } + err = en.WriteFloat64(z.FloatValue) + if err != nil { + err = msgp.WrapError(err, "FloatValue") + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z HistogramZeroCount) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 3 + // string "IsInt" + o = append(o, 0x83, 0xa5, 0x49, 0x73, 0x49, 0x6e, 0x74) + o = msgp.AppendBool(o, z.IsInt) + // string "IntValue" + o = append(o, 0xa8, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65) + o = msgp.AppendUint64(o, z.IntValue) + // string "FloatValue" + o = append(o, 0xaa, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65) + o = msgp.AppendFloat64(o, z.FloatValue) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *HistogramZeroCount) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "IsInt": + z.IsInt, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "IsInt") + return + } + case "IntValue": + z.IntValue, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "IntValue") + return + } + case "FloatValue": + z.FloatValue, bts, err = msgp.ReadFloat64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "FloatValue") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z HistogramZeroCount) Msgsize() (s int) { + s = 1 + 6 + msgp.BoolSize + 9 + msgp.Uint64Size + 11 + msgp.Float64Size + return +} + +// DecodeMsg implements msgp.Decodable +func (z *Histograms) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Histogram": + if dc.IsNil() { + err = dc.ReadNil() + if err != nil { + err = msgp.WrapError(err, "Histogram") + return + } + z.Histogram = nil + } else { + if z.Histogram == nil { + z.Histogram = new(Histogram) + } + err = z.Histogram.DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "Histogram") + return + } + } + case "FloatHistogram": + if dc.IsNil() { + err = dc.ReadNil() + if err != nil { + err = msgp.WrapError(err, "FloatHistogram") + return + } + z.FloatHistogram = nil + } else { + if z.FloatHistogram == nil { + z.FloatHistogram = new(FloatHistogram) + } + err = z.FloatHistogram.DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "FloatHistogram") + return + } + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *Histograms) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 2 + // write "Histogram" + err = en.Append(0x82, 0xa9, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d) + if err != nil { + return + } + if z.Histogram == nil { + err = en.WriteNil() + if err != nil { + return + } + } else { + err = z.Histogram.EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "Histogram") + return + } + } + // write "FloatHistogram" + err = en.Append(0xae, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d) + if err != nil { + return + } + if z.FloatHistogram == nil { + err = en.WriteNil() + if err != nil { + return + } + } else { + err = z.FloatHistogram.EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "FloatHistogram") + return + } + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *Histograms) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 2 + // string "Histogram" + o = append(o, 0x82, 0xa9, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d) + if z.Histogram == nil { + o = msgp.AppendNil(o) + } else { + o, err = z.Histogram.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Histogram") + return + } + } + // string "FloatHistogram" + o = append(o, 0xae, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d) + if z.FloatHistogram == nil { + o = msgp.AppendNil(o) + } else { + o, err = z.FloatHistogram.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "FloatHistogram") + return + } + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *Histograms) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Histogram": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.Histogram = nil + } else { + if z.Histogram == nil { + z.Histogram = new(Histogram) + } + bts, err = z.Histogram.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Histogram") + return + } + } + case "FloatHistogram": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.FloatHistogram = nil + } else { + if z.FloatHistogram == nil { + z.FloatHistogram = new(FloatHistogram) + } + bts, err = z.FloatHistogram.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "FloatHistogram") + return + } + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *Histograms) Msgsize() (s int) { + s = 1 + 10 + if z.Histogram == nil { + s += msgp.NilSize + } else { + s += z.Histogram.Msgsize() + } + s += 15 + if z.FloatHistogram == nil { + s += msgp.NilSize + } else { + s += z.FloatHistogram.Msgsize() + } + return +} + +// DecodeMsg implements msgp.Decodable +func (z *SeriesGroup) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Strings": + var zb0002 uint32 + zb0002, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "Strings") + return + } + if cap(z.Strings) >= int(zb0002) { + z.Strings = (z.Strings)[:zb0002] + } else { + z.Strings = make([]string, zb0002) + } + for za0001 := range z.Strings { + z.Strings[za0001], err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Strings", za0001) + return + } + } + case "Series": + var zb0003 uint32 + zb0003, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "Series") + return + } + if cap(z.Series) >= int(zb0003) { + z.Series = (z.Series)[:zb0003] + } else { + z.Series = make([]*TimeSeriesBinary, zb0003) + } + for za0002 := range z.Series { + if dc.IsNil() { + err = dc.ReadNil() + if err != nil { + err = msgp.WrapError(err, "Series", za0002) + return + } + z.Series[za0002] = nil + } else { + if z.Series[za0002] == nil { + z.Series[za0002] = new(TimeSeriesBinary) + } + err = z.Series[za0002].DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "Series", za0002) + return + } + } + } + case "Metadata": + var zb0004 uint32 + zb0004, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "Metadata") + return + } + if cap(z.Metadata) >= int(zb0004) { + z.Metadata = (z.Metadata)[:zb0004] + } else { + z.Metadata = make([]*TimeSeriesBinary, zb0004) + } + for za0003 := range z.Metadata { + if dc.IsNil() { + err = dc.ReadNil() + if err != nil { + err = msgp.WrapError(err, "Metadata", za0003) + return + } + z.Metadata[za0003] = nil + } else { + if z.Metadata[za0003] == nil { + z.Metadata[za0003] = new(TimeSeriesBinary) + } + err = z.Metadata[za0003].DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "Metadata", za0003) + return + } + } + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *SeriesGroup) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 3 + // write "Strings" + err = en.Append(0x83, 0xa7, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x73) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.Strings))) + if err != nil { + err = msgp.WrapError(err, "Strings") + return + } + for za0001 := range z.Strings { + err = en.WriteString(z.Strings[za0001]) + if err != nil { + err = msgp.WrapError(err, "Strings", za0001) + return + } + } + // write "Series" + err = en.Append(0xa6, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.Series))) + if err != nil { + err = msgp.WrapError(err, "Series") + return + } + for za0002 := range z.Series { + if z.Series[za0002] == nil { + err = en.WriteNil() + if err != nil { + return + } + } else { + err = z.Series[za0002].EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "Series", za0002) + return + } + } + } + // write "Metadata" + err = en.Append(0xa8, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.Metadata))) + if err != nil { + err = msgp.WrapError(err, "Metadata") + return + } + for za0003 := range z.Metadata { + if z.Metadata[za0003] == nil { + err = en.WriteNil() + if err != nil { + return + } + } else { + err = z.Metadata[za0003].EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "Metadata", za0003) + return + } + } + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *SeriesGroup) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 3 + // string "Strings" + o = append(o, 0x83, 0xa7, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.Strings))) + for za0001 := range z.Strings { + o = msgp.AppendString(o, z.Strings[za0001]) + } + // string "Series" + o = append(o, 0xa6, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.Series))) + for za0002 := range z.Series { + if z.Series[za0002] == nil { + o = msgp.AppendNil(o) + } else { + o, err = z.Series[za0002].MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Series", za0002) + return + } + } + } + // string "Metadata" + o = append(o, 0xa8, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61) + o = msgp.AppendArrayHeader(o, uint32(len(z.Metadata))) + for za0003 := range z.Metadata { + if z.Metadata[za0003] == nil { + o = msgp.AppendNil(o) + } else { + o, err = z.Metadata[za0003].MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Metadata", za0003) + return + } + } + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *SeriesGroup) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Strings": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Strings") + return + } + if cap(z.Strings) >= int(zb0002) { + z.Strings = (z.Strings)[:zb0002] + } else { + z.Strings = make([]string, zb0002) + } + for za0001 := range z.Strings { + z.Strings[za0001], bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Strings", za0001) + return + } + } + case "Series": + var zb0003 uint32 + zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Series") + return + } + if cap(z.Series) >= int(zb0003) { + z.Series = (z.Series)[:zb0003] + } else { + z.Series = make([]*TimeSeriesBinary, zb0003) + } + for za0002 := range z.Series { + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.Series[za0002] = nil + } else { + if z.Series[za0002] == nil { + z.Series[za0002] = new(TimeSeriesBinary) + } + bts, err = z.Series[za0002].UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Series", za0002) + return + } + } + } + case "Metadata": + var zb0004 uint32 + zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Metadata") + return + } + if cap(z.Metadata) >= int(zb0004) { + z.Metadata = (z.Metadata)[:zb0004] + } else { + z.Metadata = make([]*TimeSeriesBinary, zb0004) + } + for za0003 := range z.Metadata { + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.Metadata[za0003] = nil + } else { + if z.Metadata[za0003] == nil { + z.Metadata[za0003] = new(TimeSeriesBinary) + } + bts, err = z.Metadata[za0003].UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Metadata", za0003) + return + } + } + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *SeriesGroup) Msgsize() (s int) { + s = 1 + 8 + msgp.ArrayHeaderSize + for za0001 := range z.Strings { + s += msgp.StringPrefixSize + len(z.Strings[za0001]) + } + s += 7 + msgp.ArrayHeaderSize + for za0002 := range z.Series { + if z.Series[za0002] == nil { + s += msgp.NilSize + } else { + s += z.Series[za0002].Msgsize() + } + } + s += 9 + msgp.ArrayHeaderSize + for za0003 := range z.Metadata { + if z.Metadata[za0003] == nil { + s += msgp.NilSize + } else { + s += z.Metadata[za0003].Msgsize() + } + } + return +} + +// DecodeMsg implements msgp.Decodable +func (z *TimeSeriesBinary) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "LabelsNames": + var zb0002 uint32 + zb0002, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "LabelsNames") + return + } + if cap(z.LabelsNames) >= int(zb0002) { + z.LabelsNames = (z.LabelsNames)[:zb0002] + } else { + z.LabelsNames = make([]uint32, zb0002) + } + for za0001 := range z.LabelsNames { + z.LabelsNames[za0001], err = dc.ReadUint32() + if err != nil { + err = msgp.WrapError(err, "LabelsNames", za0001) + return + } + } + case "LabelsValues": + var zb0003 uint32 + zb0003, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "LabelsValues") + return + } + if cap(z.LabelsValues) >= int(zb0003) { + z.LabelsValues = (z.LabelsValues)[:zb0003] + } else { + z.LabelsValues = make([]uint32, zb0003) + } + for za0002 := range z.LabelsValues { + z.LabelsValues[za0002], err = dc.ReadUint32() + if err != nil { + err = msgp.WrapError(err, "LabelsValues", za0002) + return + } + } + case "TS": + z.TS, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "TS") + return + } + case "Value": + z.Value, err = dc.ReadFloat64() + if err != nil { + err = msgp.WrapError(err, "Value") + return + } + case "Hash": + z.Hash, err = dc.ReadUint64() + if err != nil { + err = msgp.WrapError(err, "Hash") + return + } + case "Histograms": + err = z.Histograms.DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "Histograms") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *TimeSeriesBinary) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 6 + // write "LabelsNames" + err = en.Append(0x86, 0xab, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.LabelsNames))) + if err != nil { + err = msgp.WrapError(err, "LabelsNames") + return + } + for za0001 := range z.LabelsNames { + err = en.WriteUint32(z.LabelsNames[za0001]) + if err != nil { + err = msgp.WrapError(err, "LabelsNames", za0001) + return + } + } + // write "LabelsValues" + err = en.Append(0xac, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.LabelsValues))) + if err != nil { + err = msgp.WrapError(err, "LabelsValues") + return + } + for za0002 := range z.LabelsValues { + err = en.WriteUint32(z.LabelsValues[za0002]) + if err != nil { + err = msgp.WrapError(err, "LabelsValues", za0002) + return + } + } + // write "TS" + err = en.Append(0xa2, 0x54, 0x53) + if err != nil { + return + } + err = en.WriteInt64(z.TS) + if err != nil { + err = msgp.WrapError(err, "TS") + return + } + // write "Value" + err = en.Append(0xa5, 0x56, 0x61, 0x6c, 0x75, 0x65) + if err != nil { + return + } + err = en.WriteFloat64(z.Value) + if err != nil { + err = msgp.WrapError(err, "Value") + return + } + // write "Hash" + err = en.Append(0xa4, 0x48, 0x61, 0x73, 0x68) + if err != nil { + return + } + err = en.WriteUint64(z.Hash) + if err != nil { + err = msgp.WrapError(err, "Hash") + return + } + // write "Histograms" + err = en.Append(0xaa, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x73) + if err != nil { + return + } + err = z.Histograms.EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "Histograms") + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *TimeSeriesBinary) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 6 + // string "LabelsNames" + o = append(o, 0x86, 0xab, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.LabelsNames))) + for za0001 := range z.LabelsNames { + o = msgp.AppendUint32(o, z.LabelsNames[za0001]) + } + // string "LabelsValues" + o = append(o, 0xac, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.LabelsValues))) + for za0002 := range z.LabelsValues { + o = msgp.AppendUint32(o, z.LabelsValues[za0002]) + } + // string "TS" + o = append(o, 0xa2, 0x54, 0x53) + o = msgp.AppendInt64(o, z.TS) + // string "Value" + o = append(o, 0xa5, 0x56, 0x61, 0x6c, 0x75, 0x65) + o = msgp.AppendFloat64(o, z.Value) + // string "Hash" + o = append(o, 0xa4, 0x48, 0x61, 0x73, 0x68) + o = msgp.AppendUint64(o, z.Hash) + // string "Histograms" + o = append(o, 0xaa, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x73) + o, err = z.Histograms.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Histograms") + return + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *TimeSeriesBinary) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "LabelsNames": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "LabelsNames") + return + } + if cap(z.LabelsNames) >= int(zb0002) { + z.LabelsNames = (z.LabelsNames)[:zb0002] + } else { + z.LabelsNames = make([]uint32, zb0002) + } + for za0001 := range z.LabelsNames { + z.LabelsNames[za0001], bts, err = msgp.ReadUint32Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "LabelsNames", za0001) + return + } + } + case "LabelsValues": + var zb0003 uint32 + zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "LabelsValues") + return + } + if cap(z.LabelsValues) >= int(zb0003) { + z.LabelsValues = (z.LabelsValues)[:zb0003] + } else { + z.LabelsValues = make([]uint32, zb0003) + } + for za0002 := range z.LabelsValues { + z.LabelsValues[za0002], bts, err = msgp.ReadUint32Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "LabelsValues", za0002) + return + } + } + case "TS": + z.TS, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "TS") + return + } + case "Value": + z.Value, bts, err = msgp.ReadFloat64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Value") + return + } + case "Hash": + z.Hash, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Hash") + return + } + case "Histograms": + bts, err = z.Histograms.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Histograms") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *TimeSeriesBinary) Msgsize() (s int) { + s = 1 + 12 + msgp.ArrayHeaderSize + (len(z.LabelsNames) * (msgp.Uint32Size)) + 13 + msgp.ArrayHeaderSize + (len(z.LabelsValues) * (msgp.Uint32Size)) + 3 + msgp.Int64Size + 6 + msgp.Float64Size + 5 + msgp.Uint64Size + 11 + z.Histograms.Msgsize() + return +} diff --git a/internal/component/prometheus/remote/queue/types/serialization_gen_test.go b/internal/component/prometheus/remote/queue/types/serialization_gen_test.go new file mode 100644 index 0000000000..e6e18c7901 --- /dev/null +++ b/internal/component/prometheus/remote/queue/types/serialization_gen_test.go @@ -0,0 +1,914 @@ +package types + +// Code generated by github.com/tinylib/msgp DO NOT EDIT. + +import ( + "bytes" + "testing" + + "github.com/tinylib/msgp/msgp" +) + +func TestMarshalUnmarshalBucketSpan(t *testing.T) { + v := BucketSpan{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgBucketSpan(b *testing.B) { + v := BucketSpan{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgBucketSpan(b *testing.B) { + v := BucketSpan{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalBucketSpan(b *testing.B) { + v := BucketSpan{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodeBucketSpan(t *testing.T) { + v := BucketSpan{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodeBucketSpan Msgsize() is inaccurate") + } + + vn := BucketSpan{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodeBucketSpan(b *testing.B) { + v := BucketSpan{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodeBucketSpan(b *testing.B) { + v := BucketSpan{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalFloatHistogram(t *testing.T) { + v := FloatHistogram{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgFloatHistogram(b *testing.B) { + v := FloatHistogram{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgFloatHistogram(b *testing.B) { + v := FloatHistogram{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalFloatHistogram(b *testing.B) { + v := FloatHistogram{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodeFloatHistogram(t *testing.T) { + v := FloatHistogram{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodeFloatHistogram Msgsize() is inaccurate") + } + + vn := FloatHistogram{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodeFloatHistogram(b *testing.B) { + v := FloatHistogram{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodeFloatHistogram(b *testing.B) { + v := FloatHistogram{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalHistogram(t *testing.T) { + v := Histogram{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgHistogram(b *testing.B) { + v := Histogram{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgHistogram(b *testing.B) { + v := Histogram{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalHistogram(b *testing.B) { + v := Histogram{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodeHistogram(t *testing.T) { + v := Histogram{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodeHistogram Msgsize() is inaccurate") + } + + vn := Histogram{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodeHistogram(b *testing.B) { + v := Histogram{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodeHistogram(b *testing.B) { + v := Histogram{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalHistogramCount(t *testing.T) { + v := HistogramCount{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgHistogramCount(b *testing.B) { + v := HistogramCount{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgHistogramCount(b *testing.B) { + v := HistogramCount{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalHistogramCount(b *testing.B) { + v := HistogramCount{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodeHistogramCount(t *testing.T) { + v := HistogramCount{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodeHistogramCount Msgsize() is inaccurate") + } + + vn := HistogramCount{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodeHistogramCount(b *testing.B) { + v := HistogramCount{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodeHistogramCount(b *testing.B) { + v := HistogramCount{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalHistogramZeroCount(t *testing.T) { + v := HistogramZeroCount{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgHistogramZeroCount(b *testing.B) { + v := HistogramZeroCount{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgHistogramZeroCount(b *testing.B) { + v := HistogramZeroCount{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalHistogramZeroCount(b *testing.B) { + v := HistogramZeroCount{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodeHistogramZeroCount(t *testing.T) { + v := HistogramZeroCount{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodeHistogramZeroCount Msgsize() is inaccurate") + } + + vn := HistogramZeroCount{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodeHistogramZeroCount(b *testing.B) { + v := HistogramZeroCount{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodeHistogramZeroCount(b *testing.B) { + v := HistogramZeroCount{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalHistograms(t *testing.T) { + v := Histograms{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgHistograms(b *testing.B) { + v := Histograms{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgHistograms(b *testing.B) { + v := Histograms{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalHistograms(b *testing.B) { + v := Histograms{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodeHistograms(t *testing.T) { + v := Histograms{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodeHistograms Msgsize() is inaccurate") + } + + vn := Histograms{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodeHistograms(b *testing.B) { + v := Histograms{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodeHistograms(b *testing.B) { + v := Histograms{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalSeriesGroup(t *testing.T) { + v := SeriesGroup{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgSeriesGroup(b *testing.B) { + v := SeriesGroup{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgSeriesGroup(b *testing.B) { + v := SeriesGroup{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalSeriesGroup(b *testing.B) { + v := SeriesGroup{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodeSeriesGroup(t *testing.T) { + v := SeriesGroup{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodeSeriesGroup Msgsize() is inaccurate") + } + + vn := SeriesGroup{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodeSeriesGroup(b *testing.B) { + v := SeriesGroup{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodeSeriesGroup(b *testing.B) { + v := SeriesGroup{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalTimeSeriesBinary(t *testing.T) { + v := TimeSeriesBinary{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgTimeSeriesBinary(b *testing.B) { + v := TimeSeriesBinary{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgTimeSeriesBinary(b *testing.B) { + v := TimeSeriesBinary{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalTimeSeriesBinary(b *testing.B) { + v := TimeSeriesBinary{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodeTimeSeriesBinary(t *testing.T) { + v := TimeSeriesBinary{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodeTimeSeriesBinary Msgsize() is inaccurate") + } + + vn := TimeSeriesBinary{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodeTimeSeriesBinary(b *testing.B) { + v := TimeSeriesBinary{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodeTimeSeriesBinary(b *testing.B) { + v := TimeSeriesBinary{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/internal/component/prometheus/remote/queue/types/serialization_test.go b/internal/component/prometheus/remote/queue/types/serialization_test.go new file mode 100644 index 0000000000..b121110be2 --- /dev/null +++ b/internal/component/prometheus/remote/queue/types/serialization_test.go @@ -0,0 +1,59 @@ +package types + +import ( + "fmt" + "math/rand" + "testing" + + "github.com/prometheus/prometheus/model/labels" + "github.com/stretchr/testify/require" +) + +func TestLabels(t *testing.T) { + lblsMap := make(map[string]string) + unique := make(map[string]struct{}) + for i := 0; i < 1_000; i++ { + k := fmt.Sprintf("key_%d", i) + v := randString() + lblsMap[k] = v + unique[k] = struct{}{} + unique[v] = struct{}{} + } + sg := &SeriesGroup{ + Series: make([]*TimeSeriesBinary, 1), + } + sg.Series[0] = GetTimeSeriesBinary() + defer PutTimeSeriesBinary(sg.Series[0]) + sg.Series[0].Labels = labels.FromMap(lblsMap) + strMap := make(map[string]uint32) + + sg.Series[0].FillLabelMapping(strMap) + require.True(t, len(sg.Series) == len(unique)) + stringsSlice := make([]string, len(strMap)) + for k, v := range strMap { + stringsSlice[v] = k + } + sg.Strings = stringsSlice + buf, err := sg.MarshalMsg(nil) + require.NoError(t, err) + newSg := &SeriesGroup{} + newSg, _, err = DeserializeToSeriesGroup(newSg, buf) + require.NoError(t, err) + series1 := newSg.Series[0] + series2 := sg.Series[0] + require.Len(t, series2.Labels, len(series1.Labels)) + for i, lbl := range series2.Labels { + require.Equal(t, lbl.Name, series1.Labels[i].Name) + require.Equal(t, lbl.Value, series1.Labels[i].Value) + } +} + +var letterRunes = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + +func randString() string { + b := make([]rune, rand.Intn(20)) + for i := range b { + b[i] = letterRunes[rand.Intn(len(letterRunes))] + } + return string(b) +} diff --git a/internal/component/prometheus/remote/queue/types/serializer.go b/internal/component/prometheus/remote/queue/types/serializer.go new file mode 100644 index 0000000000..6919f666f4 --- /dev/null +++ b/internal/component/prometheus/remote/queue/types/serializer.go @@ -0,0 +1,22 @@ +package types + +import ( + "context" + "time" +) + +type SerializerConfig struct { + // MaxSignalsInBatch controls what the max batch size is. + MaxSignalsInBatch uint32 + // FlushFrequency controls how often to write to disk regardless of MaxSignalsInBatch. + FlushFrequency time.Duration +} + +// Serializer handles converting a set of signals into a binary representation to be written to storage. +type Serializer interface { + Start() + Stop() + SendSeries(ctx context.Context, data *TimeSeriesBinary) error + SendMetadata(ctx context.Context, data *TimeSeriesBinary) error + UpdateConfig(ctx context.Context, cfg SerializerConfig) error +} diff --git a/internal/component/prometheus/remote/queue/types/stats.go b/internal/component/prometheus/remote/queue/types/stats.go new file mode 100644 index 0000000000..c74f0953a4 --- /dev/null +++ b/internal/component/prometheus/remote/queue/types/stats.go @@ -0,0 +1,8 @@ +package types + +type SerializerStats struct { + SeriesStored int + MetadataStored int + Errors int + NewestTimestamp int64 +} From 175aafb635eea16001c2ec5abb7d9e0ce31dc052 Mon Sep 17 00:00:00 2001 From: matt durham Date: Wed, 11 Sep 2024 11:13:12 -0400 Subject: [PATCH 02/11] Dont test this with race condition since we access vars directly. --- Makefile | 2 +- .../remote/queue/serialization/seralizer_test.go | 15 +++++++++------ 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/Makefile b/Makefile index d97061ce64..3badbe3a5d 100644 --- a/Makefile +++ b/Makefile @@ -141,7 +141,7 @@ lint: alloylint # final command runs tests for all other submodules. test: $(GO_ENV) go test $(GO_FLAGS) -race $(shell go list ./... | grep -v /integration-tests/) - $(GO_ENV) go test $(GO_FLAGS) ./internal/static/integrations/node_exporter ./internal/static/logs ./internal/component/otelcol/processor/tail_sampling ./internal/component/loki/source/file ./internal/component/loki/source/docker + $(GO_ENV) go test $(GO_FLAGS) ./internal/static/integrations/node_exporter ./internal/static/logs ./internal/component/otelcol/processor/tail_sampling ./internal/component/loki/source/file ./internal/component/loki/source/docker ./internal/component/prometheus/remote/queue/serialization $(GO_ENV) find . -name go.mod -not -path "./go.mod" -execdir go test -race ./... \; test-packages: diff --git a/internal/component/prometheus/remote/queue/serialization/seralizer_test.go b/internal/component/prometheus/remote/queue/serialization/seralizer_test.go index 23fe152989..6f990452f3 100644 --- a/internal/component/prometheus/remote/queue/serialization/seralizer_test.go +++ b/internal/component/prometheus/remote/queue/serialization/seralizer_test.go @@ -1,8 +1,11 @@ +//go:build !race + package serialization import ( "context" "fmt" + "sync/atomic" "testing" "time" @@ -14,7 +17,7 @@ import ( ) func TestRoundTripSerialization(t *testing.T) { - var totalSeries = 0 + totalSeries := atomic.Int64{} f := &fqq{t: t} l := log.NewNopLogger() start := time.Now().Add(-1 * time.Second).Unix() @@ -23,7 +26,7 @@ func TestRoundTripSerialization(t *testing.T) { MaxSignalsInBatch: 10, FlushFrequency: 5 * time.Second, }, f, func(stats types.SerializerStats) { - totalSeries = totalSeries + stats.SeriesStored + totalSeries.Add(int64(stats.SeriesStored)) require.True(t, stats.SeriesStored == 10) require.True(t, stats.Errors == 0) require.True(t, stats.MetadataStored == 0) @@ -48,10 +51,10 @@ func TestRoundTripSerialization(t *testing.T) { require.NoError(t, sendErr) } require.Eventually(t, func() bool { - return f.total == 100 + return f.total.Load() == 100 }, 5*time.Second, 100*time.Millisecond) // 100 series send from the above for loop - require.True(t, totalSeries == 100) + require.True(t, totalSeries.Load() == 100) } func TestUpdateConfig(t *testing.T) { @@ -79,7 +82,7 @@ var _ types.FileStorage = (*fqq)(nil) type fqq struct { t *testing.T buf []byte - total int + total atomic.Int64 } func (f *fqq) Start() { @@ -105,6 +108,6 @@ func (f *fqq) Store(ctx context.Context, meta map[string]string, value []byte) e series.Labels[j].Value = fmt.Sprintf("value_%d_%d", int(series.Value), j) } } - f.total += len(sg.Series) + f.total.Add(int64(len(sg.Series))) return nil } From 57e6ddd5992f6bb6d2c991a2c6d5f2956df67c16 Mon Sep 17 00:00:00 2001 From: matt durham Date: Wed, 11 Sep 2024 11:37:06 -0400 Subject: [PATCH 03/11] Fix test. --- .../prometheus/remote/queue/types/serialization_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/component/prometheus/remote/queue/types/serialization_test.go b/internal/component/prometheus/remote/queue/types/serialization_test.go index b121110be2..ec14a53072 100644 --- a/internal/component/prometheus/remote/queue/types/serialization_test.go +++ b/internal/component/prometheus/remote/queue/types/serialization_test.go @@ -28,7 +28,6 @@ func TestLabels(t *testing.T) { strMap := make(map[string]uint32) sg.Series[0].FillLabelMapping(strMap) - require.True(t, len(sg.Series) == len(unique)) stringsSlice := make([]string, len(strMap)) for k, v := range strMap { stringsSlice[v] = k @@ -42,6 +41,7 @@ func TestLabels(t *testing.T) { series1 := newSg.Series[0] series2 := sg.Series[0] require.Len(t, series2.Labels, len(series1.Labels)) + // Ensure we were able to convert back and forth properly. for i, lbl := range series2.Labels { require.Equal(t, lbl.Name, series1.Labels[i].Name) require.Equal(t, lbl.Value, series1.Labels[i].Value) From 55fe162a37ed277b8b78f4998f05316d2b8c9d45 Mon Sep 17 00:00:00 2001 From: matt durham Date: Wed, 11 Sep 2024 12:24:52 -0400 Subject: [PATCH 04/11] Fix typo in file name and return early in DeserializeToSeriesGroup. --- .../serialization/{seralizer_test.go => serializer_test.go} | 0 .../component/prometheus/remote/queue/types/serialization.go | 3 +++ 2 files changed, 3 insertions(+) rename internal/component/prometheus/remote/queue/serialization/{seralizer_test.go => serializer_test.go} (100%) diff --git a/internal/component/prometheus/remote/queue/serialization/seralizer_test.go b/internal/component/prometheus/remote/queue/serialization/serializer_test.go similarity index 100% rename from internal/component/prometheus/remote/queue/serialization/seralizer_test.go rename to internal/component/prometheus/remote/queue/serialization/serializer_test.go diff --git a/internal/component/prometheus/remote/queue/types/serialization.go b/internal/component/prometheus/remote/queue/types/serialization.go index d6926b7e47..4123939656 100644 --- a/internal/component/prometheus/remote/queue/types/serialization.go +++ b/internal/component/prometheus/remote/queue/types/serialization.go @@ -246,6 +246,9 @@ func PutTimeSeriesBinary(ts *TimeSeriesBinary) { // DeserializeToSeriesGroup transforms a buffer to a SeriesGroup and converts the stringmap + indexes into actual Labels. func DeserializeToSeriesGroup(sg *SeriesGroup, buf []byte) (*SeriesGroup, []byte, error) { buffer, err := sg.UnmarshalMsg(buf) + if err != nil { + return sg, nil, err + } // Need to fill in the labels. for _, series := range sg.Series { if cap(series.Labels) < len(series.LabelsNames) { From 843ef50ed9d36a09bb366eac7c317be5e3249f17 Mon Sep 17 00:00:00 2001 From: mattdurham Date: Fri, 13 Sep 2024 10:23:37 -0400 Subject: [PATCH 05/11] Update internal/component/prometheus/remote/queue/serialization/appender.go Co-authored-by: Piotr <17101802+thampiotr@users.noreply.github.com> --- .../prometheus/remote/queue/serialization/appender.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/component/prometheus/remote/queue/serialization/appender.go b/internal/component/prometheus/remote/queue/serialization/appender.go index 541617f307..47f6bd30c4 100644 --- a/internal/component/prometheus/remote/queue/serialization/appender.go +++ b/internal/component/prometheus/remote/queue/serialization/appender.go @@ -25,8 +25,8 @@ func (a *appender) AppendCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, return ref, nil } -// NewAppender returns an Appender that writes to a given serializer. NOTE the Appender returned writes -// data immediately and does not honor commit or rollback. +// NewAppender returns an Appender that writes to a given serializer. NOTE the returned Appender writes +// data immediately, discards data older than `ttl` and does not honor commit or rollback. func NewAppender(ctx context.Context, ttl time.Duration, s types.Serializer, logger log.Logger) storage.Appender { app := &appender{ ttl: ttl, From c359236edb987a652acf24c8deb0e4700120c4c9 Mon Sep 17 00:00:00 2001 From: mattdurham Date: Fri, 13 Sep 2024 10:26:22 -0400 Subject: [PATCH 06/11] Update internal/component/prometheus/remote/queue/serialization/serializer.go Co-authored-by: Piotr <17101802+thampiotr@users.noreply.github.com> --- .../prometheus/remote/queue/serialization/serializer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/component/prometheus/remote/queue/serialization/serializer.go b/internal/component/prometheus/remote/queue/serialization/serializer.go index f702de45b9..70ac2a3546 100644 --- a/internal/component/prometheus/remote/queue/serialization/serializer.go +++ b/internal/component/prometheus/remote/queue/serialization/serializer.go @@ -12,7 +12,7 @@ import ( "github.com/vladopajic/go-actor/actor" ) -// serializer collects data from multiple appenders and will write them to file.Storage. +// serializer collects data from multiple appenders in-memory and will periodically flush the data to file.Storage. // serializer will trigger based on the last flush duration OR if it hits a certain amount of items. type serializer struct { inbox actor.Mailbox[*types.TimeSeriesBinary] From 6da119823006d31bbf458406dec52cf698b99ec2 Mon Sep 17 00:00:00 2001 From: matt durham Date: Fri, 13 Sep 2024 10:29:48 -0400 Subject: [PATCH 07/11] Rename to indicate that TimeSeries are Put/Get from a pool. --- .../prometheus/remote/queue/serialization/appender.go | 8 ++++---- .../prometheus/remote/queue/serialization/serializer.go | 4 ++-- .../remote/queue/serialization/serializer_bench_test.go | 6 +++--- .../remote/queue/serialization/serializer_test.go | 2 +- .../prometheus/remote/queue/types/serialization.go | 8 ++++---- .../prometheus/remote/queue/types/serialization_test.go | 4 ++-- 6 files changed, 16 insertions(+), 16 deletions(-) diff --git a/internal/component/prometheus/remote/queue/serialization/appender.go b/internal/component/prometheus/remote/queue/serialization/appender.go index 541617f307..33127759b0 100644 --- a/internal/component/prometheus/remote/queue/serialization/appender.go +++ b/internal/component/prometheus/remote/queue/serialization/appender.go @@ -44,7 +44,7 @@ func (a *appender) Append(ref storage.SeriesRef, l labels.Labels, t int64, v flo if t < endTime { return ref, nil } - ts := types.GetTimeSeriesBinary() + ts := types.GetTimeSeriesFromPool() ts.Labels = l ts.TS = t ts.Value = v @@ -69,7 +69,7 @@ func (a *appender) AppendExemplar(ref storage.SeriesRef, _ labels.Labels, e exem if e.HasTs && e.Ts < endTime { return ref, nil } - ts := types.GetTimeSeriesBinary() + ts := types.GetTimeSeriesFromPool() ts.Hash = e.Labels.Hash() ts.TS = e.Ts ts.Labels = e.Labels @@ -84,7 +84,7 @@ func (a *appender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int if t < endTime { return ref, nil } - ts := types.GetTimeSeriesBinary() + ts := types.GetTimeSeriesFromPool() ts.Labels = l ts.TS = t if h != nil { @@ -99,7 +99,7 @@ func (a *appender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int // UpdateMetadata updates metadata. func (a *appender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (_ storage.SeriesRef, _ error) { - ts := types.GetTimeSeriesBinary() + ts := types.GetTimeSeriesFromPool() // We are going to handle converting some strings to hopefully not reused label names. TimeSeriesBinary has a lot of work // to ensure its efficient it makes sense to encode metadata into it. combinedLabels := l.Copy() diff --git a/internal/component/prometheus/remote/queue/serialization/serializer.go b/internal/component/prometheus/remote/queue/serialization/serializer.go index f702de45b9..61bfbaca08 100644 --- a/internal/component/prometheus/remote/queue/serialization/serializer.go +++ b/internal/component/prometheus/remote/queue/serialization/serializer.go @@ -161,8 +161,8 @@ func (s *serializer) store(ctx actor.Context) error { defer func() { s.storeStats(err) // Return series to the pool, this is key to reducing allocs. - types.PutTimeSeriesBinarySlice(s.series) - types.PutTimeSeriesBinarySlice(s.meta) + types.PutTimeSeriesSliceIntoPool(s.series) + types.PutTimeSeriesSliceIntoPool(s.meta) s.series = s.series[:0] s.meta = s.series[:0] }() diff --git a/internal/component/prometheus/remote/queue/serialization/serializer_bench_test.go b/internal/component/prometheus/remote/queue/serialization/serializer_bench_test.go index f4d6ffe725..5007a6df84 100644 --- a/internal/component/prometheus/remote/queue/serialization/serializer_bench_test.go +++ b/internal/component/prometheus/remote/queue/serialization/serializer_bench_test.go @@ -52,7 +52,7 @@ func getTimeSeries(b *testing.B) []*types.TimeSeriesBinary { b.Helper() series := make([]*types.TimeSeriesBinary, 0) for j := 0; j < 10_000; j++ { - timeseries := types.GetTimeSeriesBinary() + timeseries := types.GetTimeSeriesFromPool() timeseries.TS = time.Now().Unix() timeseries.Value = rand.Float64() timeseries.Labels = getLabels() @@ -86,12 +86,12 @@ func (f *fakeSerializer) Start() {} func (f *fakeSerializer) Stop() {} func (f *fakeSerializer) SendSeries(ctx context.Context, data *types.TimeSeriesBinary) error { - types.PutTimeSeriesBinary(data) + types.PutTimeSeriesIntoPool(data) return nil } func (f *fakeSerializer) SendMetadata(ctx context.Context, data *types.TimeSeriesBinary) error { - types.PutTimeSeriesBinary(data) + types.PutTimeSeriesIntoPool(data) return nil } diff --git a/internal/component/prometheus/remote/queue/serialization/serializer_test.go b/internal/component/prometheus/remote/queue/serialization/serializer_test.go index 6f990452f3..a7967684a8 100644 --- a/internal/component/prometheus/remote/queue/serialization/serializer_test.go +++ b/internal/component/prometheus/remote/queue/serialization/serializer_test.go @@ -37,7 +37,7 @@ func TestRoundTripSerialization(t *testing.T) { s.Start() defer s.Stop() for i := 0; i < 100; i++ { - tss := types.GetTimeSeriesBinary() + tss := types.GetTimeSeriesFromPool() tss.Labels = make(labels.Labels, 10) for j := 0; j < 10; j++ { tss.Labels[j] = labels.Label{ diff --git a/internal/component/prometheus/remote/queue/types/serialization.go b/internal/component/prometheus/remote/queue/types/serialization.go index 4123939656..38c2024725 100644 --- a/internal/component/prometheus/remote/queue/types/serialization.go +++ b/internal/component/prometheus/remote/queue/types/serialization.go @@ -216,21 +216,21 @@ var tsBinaryPool = sync.Pool{ }, } -func GetTimeSeriesBinary() *TimeSeriesBinary { +func GetTimeSeriesFromPool() *TimeSeriesBinary { OutStandingTimeSeriesBinary.Inc() return tsBinaryPool.Get().(*TimeSeriesBinary) } var OutStandingTimeSeriesBinary = atomic.Int32{} -func PutTimeSeriesBinarySlice(tss []*TimeSeriesBinary) { +func PutTimeSeriesSliceIntoPool(tss []*TimeSeriesBinary) { for i := 0; i < len(tss); i++ { - PutTimeSeriesBinary(tss[i]) + PutTimeSeriesIntoPool(tss[i]) } } -func PutTimeSeriesBinary(ts *TimeSeriesBinary) { +func PutTimeSeriesIntoPool(ts *TimeSeriesBinary) { OutStandingTimeSeriesBinary.Dec() ts.LabelsNames = ts.LabelsNames[:0] ts.LabelsValues = ts.LabelsValues[:0] diff --git a/internal/component/prometheus/remote/queue/types/serialization_test.go b/internal/component/prometheus/remote/queue/types/serialization_test.go index ec14a53072..59f6d077ae 100644 --- a/internal/component/prometheus/remote/queue/types/serialization_test.go +++ b/internal/component/prometheus/remote/queue/types/serialization_test.go @@ -22,8 +22,8 @@ func TestLabels(t *testing.T) { sg := &SeriesGroup{ Series: make([]*TimeSeriesBinary, 1), } - sg.Series[0] = GetTimeSeriesBinary() - defer PutTimeSeriesBinary(sg.Series[0]) + sg.Series[0] = GetTimeSeriesFromPool() + defer PutTimeSeriesIntoPool(sg.Series[0]) sg.Series[0].Labels = labels.FromMap(lblsMap) strMap := make(map[string]uint32) From 58f2385d95a68053221d8d479e571a7564dd00bb Mon Sep 17 00:00:00 2001 From: matt durham Date: Fri, 13 Sep 2024 10:39:57 -0400 Subject: [PATCH 08/11] Remove func that was about the same number of lines as inlining. --- .../remote/queue/serialization/serializer.go | 42 +++++++------------ 1 file changed, 14 insertions(+), 28 deletions(-) diff --git a/internal/component/prometheus/remote/queue/serialization/serializer.go b/internal/component/prometheus/remote/queue/serialization/serializer.go index 93014e74f4..78386dae89 100644 --- a/internal/component/prometheus/remote/queue/serialization/serializer.go +++ b/internal/component/prometheus/remote/queue/serialization/serializer.go @@ -98,18 +98,26 @@ func (s *serializer) DoWork(ctx actor.Context) actor.WorkerStatus { if !ok { return actor.WorkerEnd } - err := s.Append(ctx, item) - if err != nil { - level.Error(s.logger).Log("msg", "unable to append to serializer", "err", err) + s.series = append(s.series, item) + // If we would go over the max size then send, or if we have hit the flush duration then send. + if len(s.meta)+len(s.series) >= s.maxItemsBeforeFlush { + err := s.store(ctx) + if err != nil { + level.Error(s.logger).Log("msg", "unable to append to serializer", "err", err) + } } + return actor.WorkerContinue case item, ok := <-s.metaInbox.ReceiveC(): if !ok { return actor.WorkerEnd } - err := s.AppendMetadata(ctx, item) - if err != nil { - level.Error(s.logger).Log("msg", "unable to append metadata to serializer", "err", err) + s.meta = append(s.meta, item) + if len(s.meta)+len(s.series) >= s.maxItemsBeforeFlush { + err := s.store(ctx) + if err != nil { + level.Error(s.logger).Log("msg", "unable to append metadata to serializer", "err", err) + } } return actor.WorkerContinue case <-s.flushTestTimer.C: @@ -123,28 +131,6 @@ func (s *serializer) DoWork(ctx actor.Context) actor.WorkerStatus { } } -func (s *serializer) AppendMetadata(ctx actor.Context, data *types.TimeSeriesBinary) error { - s.meta = append(s.meta, data) - // If we would go over the max size then send, or if we have hit the flush duration then send. - if len(s.meta)+len(s.series) >= s.maxItemsBeforeFlush { - return s.store(ctx) - } else if time.Since(s.lastFlush) > s.flushFrequency { - return s.store(ctx) - } - return nil -} - -func (s *serializer) Append(ctx actor.Context, data *types.TimeSeriesBinary) error { - s.series = append(s.series, data) - // If we would go over the max size then send, or if we have hit the flush duration then send. - if len(s.meta)+len(s.series) >= s.maxItemsBeforeFlush { - return s.store(ctx) - } else if time.Since(s.lastFlush) > s.flushFrequency { - return s.store(ctx) - } - return nil -} - func (s *serializer) store(ctx actor.Context) error { var err error defer func() { From 2fe259f92264577b07fafcb13908aac8bd146674 Mon Sep 17 00:00:00 2001 From: mattdurham Date: Fri, 13 Sep 2024 11:12:03 -0400 Subject: [PATCH 09/11] Update internal/component/prometheus/remote/queue/types/serialization.go Co-authored-by: Piotr <17101802+thampiotr@users.noreply.github.com> --- .../component/prometheus/remote/queue/types/serialization.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/component/prometheus/remote/queue/types/serialization.go b/internal/component/prometheus/remote/queue/types/serialization.go index 38c2024725..7777055433 100644 --- a/internal/component/prometheus/remote/queue/types/serialization.go +++ b/internal/component/prometheus/remote/queue/types/serialization.go @@ -21,7 +21,7 @@ type SeriesGroup struct { } type TimeSeriesBinary struct { - // Labels are not serialized to msgp but are passed in. + // Labels are not serialized to msgp, instead we store separately a dictionary of strings and use `LabelNames` and `LabelValues` to refer to the dictionary by ID. Labels labels.Labels `msg:"-"` LabelsNames []uint32 LabelsValues []uint32 From dd55897730d32657f821d439ea14a1694b0e1647 Mon Sep 17 00:00:00 2001 From: mattdurham Date: Fri, 13 Sep 2024 11:12:18 -0400 Subject: [PATCH 10/11] Update internal/component/prometheus/remote/queue/serialization/serializer.go Co-authored-by: Piotr <17101802+thampiotr@users.noreply.github.com> --- .../prometheus/remote/queue/serialization/serializer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/component/prometheus/remote/queue/serialization/serializer.go b/internal/component/prometheus/remote/queue/serialization/serializer.go index 78386dae89..78c8d1bbd2 100644 --- a/internal/component/prometheus/remote/queue/serialization/serializer.go +++ b/internal/component/prometheus/remote/queue/serialization/serializer.go @@ -13,7 +13,7 @@ import ( ) // serializer collects data from multiple appenders in-memory and will periodically flush the data to file.Storage. -// serializer will trigger based on the last flush duration OR if it hits a certain amount of items. +// serializer will flush based on configured time duration OR if it hits a certain number of items. type serializer struct { inbox actor.Mailbox[*types.TimeSeriesBinary] metaInbox actor.Mailbox[*types.TimeSeriesBinary] From 9331c64b6c46939356c0e265addc21a9bad0dd8f Mon Sep 17 00:00:00 2001 From: matt durham Date: Fri, 13 Sep 2024 11:22:02 -0400 Subject: [PATCH 11/11] Change benchmark to be more specific. --- .../remote/queue/serialization/serializer.go | 18 +++++------ .../serialization/serializer_bench_test.go | 31 +++++++++++-------- 2 files changed, 27 insertions(+), 22 deletions(-) diff --git a/internal/component/prometheus/remote/queue/serialization/serializer.go b/internal/component/prometheus/remote/queue/serialization/serializer.go index 78386dae89..d48c6fd121 100644 --- a/internal/component/prometheus/remote/queue/serialization/serializer.go +++ b/internal/component/prometheus/remote/queue/serialization/serializer.go @@ -101,7 +101,7 @@ func (s *serializer) DoWork(ctx actor.Context) actor.WorkerStatus { s.series = append(s.series, item) // If we would go over the max size then send, or if we have hit the flush duration then send. if len(s.meta)+len(s.series) >= s.maxItemsBeforeFlush { - err := s.store(ctx) + err := s.flushToDisk(ctx) if err != nil { level.Error(s.logger).Log("msg", "unable to append to serializer", "err", err) } @@ -114,7 +114,7 @@ func (s *serializer) DoWork(ctx actor.Context) actor.WorkerStatus { } s.meta = append(s.meta, item) if len(s.meta)+len(s.series) >= s.maxItemsBeforeFlush { - err := s.store(ctx) + err := s.flushToDisk(ctx) if err != nil { level.Error(s.logger).Log("msg", "unable to append metadata to serializer", "err", err) } @@ -122,7 +122,7 @@ func (s *serializer) DoWork(ctx actor.Context) actor.WorkerStatus { return actor.WorkerContinue case <-s.flushTestTimer.C: if time.Since(s.lastFlush) > s.flushFrequency { - err := s.store(ctx) + err := s.flushToDisk(ctx) if err != nil { level.Error(s.logger).Log("msg", "unable to store data", "err", err) } @@ -131,7 +131,7 @@ func (s *serializer) DoWork(ctx actor.Context) actor.WorkerStatus { } } -func (s *serializer) store(ctx actor.Context) error { +func (s *serializer) flushToDisk(ctx actor.Context) error { var err error defer func() { s.lastFlush = time.Now() @@ -154,18 +154,18 @@ func (s *serializer) store(ctx actor.Context) error { }() // This maps strings to index position in a slice. This is doing to reduce the file size of the data. - strMapToInt := make(map[string]uint32) + strMapToIndex := make(map[string]uint32) for i, ts := range s.series { - ts.FillLabelMapping(strMapToInt) + ts.FillLabelMapping(strMapToIndex) group.Series[i] = ts } for i, ts := range s.meta { - ts.FillLabelMapping(strMapToInt) + ts.FillLabelMapping(strMapToIndex) group.Metadata[i] = ts } - stringsSlice := make([]string, len(strMapToInt)) - for stringValue, index := range strMapToInt { + stringsSlice := make([]string, len(strMapToIndex)) + for stringValue, index := range strMapToIndex { stringsSlice[index] = stringValue } group.Strings = stringsSlice diff --git a/internal/component/prometheus/remote/queue/serialization/serializer_bench_test.go b/internal/component/prometheus/remote/queue/serialization/serializer_bench_test.go index 5007a6df84..ffe7c64c1b 100644 --- a/internal/component/prometheus/remote/queue/serialization/serializer_bench_test.go +++ b/internal/component/prometheus/remote/queue/serialization/serializer_bench_test.go @@ -28,24 +28,19 @@ func BenchmarkAppender(b *testing.B) { } func BenchmarkSerializer(b *testing.B) { - // This should be around 200-300 allocs 7m ns/op - series := getTimeSeries(b) b.ResetTimer() b.ReportAllocs() + // This should be ~11 allocs and 1400-1800 ns/op. logger := log.NewNopLogger() + serial, _ := NewSerializer(types.SerializerConfig{ + MaxSignalsInBatch: 1_000, + FlushFrequency: 1 * time.Second, + }, &fakeFileQueue{}, func(stats types.SerializerStats) {}, logger) + serial.Start() for i := 0; i < b.N; i++ { - serial, _ := NewSerializer(types.SerializerConfig{ - MaxSignalsInBatch: 1_000, - FlushFrequency: 1 * time.Second, - }, &fakeFileQueue{}, func(stats types.SerializerStats) { - - }, logger) - serial.Start() - for _, s := range series { - _ = serial.SendSeries(context.Background(), s) - } - serial.Stop() + _ = serial.SendSeries(context.Background(), getSingleTimeSeries(b)) } + serial.Stop() } func getTimeSeries(b *testing.B) []*types.TimeSeriesBinary { @@ -61,6 +56,16 @@ func getTimeSeries(b *testing.B) []*types.TimeSeriesBinary { return series } +func getSingleTimeSeries(b *testing.B) *types.TimeSeriesBinary { + b.Helper() + timeseries := types.GetTimeSeriesFromPool() + timeseries.TS = time.Now().Unix() + timeseries.Value = rand.Float64() + timeseries.Labels = getLabels() + return timeseries + +} + func getLabels() labels.Labels { retLbls := make(labels.Labels, 0) for i := 0; i < rand.Intn(20); i++ {