迁移项目

This commit is contained in:
2022-09-07 17:17:11 +08:00
parent 12ea86b2fb
commit 5d4d02d679
29 changed files with 11431 additions and 43 deletions

View File

@@ -0,0 +1,23 @@
package logproto
import "github.com/prometheus/prometheus/pkg/labels"
// Note, this is not very efficient and use should be minimized as it requires label construction on each comparison
type SeriesIdentifiers []SeriesIdentifier
func (ids SeriesIdentifiers) Len() int { return len(ids) }
func (ids SeriesIdentifiers) Swap(i, j int) { ids[i], ids[j] = ids[j], ids[i] }
func (ids SeriesIdentifiers) Less(i, j int) bool {
a, b := labels.FromMap(ids[i].Labels), labels.FromMap(ids[j].Labels)
return labels.Compare(a, b) <= 0
}
type Streams []Stream
func (xs Streams) Len() int { return len(xs) }
func (xs Streams) Swap(i, j int) { xs[i], xs[j] = xs[j], xs[i] }
func (xs Streams) Less(i, j int) bool { return xs[i].Labels <= xs[j].Labels }
func (s Series) Len() int { return len(s.Samples) }
func (s Series) Swap(i, j int) { s.Samples[i], s.Samples[j] = s.Samples[j], s.Samples[i] }
func (s Series) Less(i, j int) bool { return s.Samples[i].Timestamp < s.Samples[j].Timestamp }

8031
pkg/logproto/logproto.pb.go Normal file

File diff suppressed because it is too large Load Diff

166
pkg/logproto/logproto.proto Normal file
View File

@@ -0,0 +1,166 @@
syntax = "proto3";
package logproto;
option go_package = "github.com/lixh00/loki-client-go/pkg/logproto";
import "google/protobuf/timestamp.proto";
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
service Pusher {
rpc Push(PushRequest) returns (PushResponse) {};
}
service Querier {
rpc Query(QueryRequest) returns (stream QueryResponse) {};
rpc QuerySample(SampleQueryRequest) returns (stream SampleQueryResponse) {};
rpc Label(LabelRequest) returns (LabelResponse) {};
rpc Tail(TailRequest) returns (stream TailResponse) {};
rpc Series(SeriesRequest) returns (SeriesResponse) {};
rpc TailersCount(TailersCountRequest) returns (TailersCountResponse) {};
rpc GetChunkIDs(GetChunkIDsRequest) returns (GetChunkIDsResponse) {}; // GetChunkIDs returns ChunkIDs from the index store holding logs for given selectors and time-range.
}
service Ingester {
rpc TransferChunks(stream TimeSeriesChunk) returns (TransferChunksResponse) {};
}
message PushRequest {
repeated StreamAdapter streams = 1 [(gogoproto.jsontag) = "streams", (gogoproto.customtype) = "Stream"];
}
message PushResponse {
}
message QueryRequest {
string selector = 1;
uint32 limit = 2;
google.protobuf.Timestamp start = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
google.protobuf.Timestamp end = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
Direction direction = 5;
reserved 6;
repeated string shards = 7 [(gogoproto.jsontag) = "shards,omitempty"];
}
message SampleQueryRequest {
string selector = 1;
google.protobuf.Timestamp start = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
google.protobuf.Timestamp end = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
repeated string shards = 4 [(gogoproto.jsontag) = "shards,omitempty"];
}
message SampleQueryResponse {
repeated Series series = 1 [(gogoproto.customtype) = "Series", (gogoproto.nullable) = true];
}
enum Direction {
FORWARD = 0;
BACKWARD = 1;
}
message QueryResponse {
repeated StreamAdapter streams = 1 [(gogoproto.customtype) = "Stream", (gogoproto.nullable) = true];
}
message LabelRequest {
string name = 1;
bool values = 2; // True to fetch label values, false for fetch labels names.
google.protobuf.Timestamp start = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = true];
google.protobuf.Timestamp end = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = true];
}
message LabelResponse {
repeated string values = 1;
}
message StreamAdapter {
string labels = 1 [(gogoproto.jsontag) = "labels"];
repeated EntryAdapter entries = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "entries"];
}
message EntryAdapter {
google.protobuf.Timestamp timestamp = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false, (gogoproto.jsontag) = "ts"];
string line = 2 [(gogoproto.jsontag) = "line"];
}
message Sample {
int64 timestamp = 1 [(gogoproto.jsontag) = "ts"];
double value = 2 [(gogoproto.jsontag) = "value"];
uint64 hash = 3 [(gogoproto.jsontag) = "hash"];
}
message Series {
string labels = 1 [(gogoproto.jsontag) = "labels"];
repeated Sample samples = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "samples"];
}
message TailRequest {
string query = 1;
reserved 2;
uint32 delayFor = 3;
uint32 limit = 4;
google.protobuf.Timestamp start = 5 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
}
message TailResponse {
StreamAdapter stream = 1 [(gogoproto.customtype) = "Stream"];
repeated DroppedStream droppedStreams = 2;
}
message SeriesRequest {
google.protobuf.Timestamp start = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
google.protobuf.Timestamp end = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
repeated string groups = 3;
}
message SeriesResponse {
repeated SeriesIdentifier series = 1 [(gogoproto.nullable) = false];
}
message SeriesIdentifier {
map<string,string> labels = 1;
}
message DroppedStream {
google.protobuf.Timestamp from = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
google.protobuf.Timestamp to = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
string labels = 3;
}
message TimeSeriesChunk {
string from_ingester_id = 1;
string user_id = 2;
repeated LabelPair labels = 3;
repeated Chunk chunks = 4;
}
message LabelPair {
string name = 1;
string value = 2;
}
message Chunk {
bytes data = 1;
}
message TransferChunksResponse {
}
message TailersCountRequest {
}
message TailersCountResponse {
uint32 count = 1;
}
message GetChunkIDsRequest {
string matchers = 1;
google.protobuf.Timestamp start = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
google.protobuf.Timestamp end = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
}
message GetChunkIDsResponse {
repeated string chunkIDs = 1;
}

106
pkg/logproto/timestamp.go Normal file
View File

@@ -0,0 +1,106 @@
package logproto
import (
"errors"
strconv "strconv"
time "time"
"github.com/gogo/protobuf/types"
)
const (
// Seconds field of the earliest valid Timestamp.
// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
minValidSeconds = -62135596800
// Seconds field just after the latest valid Timestamp.
// This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
maxValidSeconds = 253402300800
)
// validateTimestamp determines whether a Timestamp is valid.
// A valid timestamp represents a time in the range
// [0001-01-01, 10000-01-01) and has a Nanos field
// in the range [0, 1e9).
//
// If the Timestamp is valid, validateTimestamp returns nil.
// Otherwise, it returns an error that describes
// the problem.
//
// Every valid Timestamp can be represented by a time.Time, but the converse is not true.
func validateTimestamp(ts *types.Timestamp) error {
if ts == nil {
return errors.New("timestamp: nil Timestamp")
}
if ts.Seconds < minValidSeconds {
return errors.New("timestamp: " + formatTimestamp(ts) + " before 0001-01-01")
}
if ts.Seconds >= maxValidSeconds {
return errors.New("timestamp: " + formatTimestamp(ts) + " after 10000-01-01")
}
if ts.Nanos < 0 || ts.Nanos >= 1e9 {
return errors.New("timestamp: " + formatTimestamp(ts) + ": nanos not in range [0, 1e9)")
}
return nil
}
// formatTimestamp is equivalent to fmt.Sprintf("%#v", ts)
// but avoids the escape incurred by using fmt.Sprintf, eliminating
// unnecessary heap allocations.
func formatTimestamp(ts *types.Timestamp) string {
if ts == nil {
return "nil"
}
seconds := strconv.FormatInt(ts.Seconds, 10)
nanos := strconv.FormatInt(int64(ts.Nanos), 10)
return "&types.Timestamp{Seconds: " + seconds + ",\nNanos: " + nanos + ",\n}"
}
func SizeOfStdTime(t time.Time) int {
ts, err := timestampProto(t)
if err != nil {
return 0
}
return ts.Size()
}
func StdTimeMarshalTo(t time.Time, data []byte) (int, error) {
ts, err := timestampProto(t)
if err != nil {
return 0, err
}
return ts.MarshalTo(data)
}
func StdTimeUnmarshal(t *time.Time, data []byte) error {
ts := &types.Timestamp{}
if err := ts.Unmarshal(data); err != nil {
return err
}
tt, err := timestampFromProto(ts)
if err != nil {
return err
}
*t = tt
return nil
}
func timestampFromProto(ts *types.Timestamp) (time.Time, error) {
// Don't return the zero value on error, because corresponds to a valid
// timestamp. Instead return whatever time.Unix gives us.
var t time.Time
if ts == nil {
t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
} else {
t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
}
return t, validateTimestamp(ts)
}
func timestampProto(t time.Time) (types.Timestamp, error) {
ts := types.Timestamp{
Seconds: t.Unix(),
Nanos: int32(t.Nanosecond()),
}
return ts, validateTimestamp(&ts)
}

475
pkg/logproto/types.go Normal file
View File

@@ -0,0 +1,475 @@
package logproto
import (
"fmt"
"io"
"time"
jsoniter "github.com/json-iterator/go"
"github.com/prometheus/prometheus/promql/parser"
)
// Stream contains a unique labels set as a string and a set of entries for it.
// We are not using the proto generated version but this custom one so that we
// can improve serialization see benchmark.
type Stream struct {
Labels string `protobuf:"bytes,1,opt,name=labels,proto3" json:"labels"`
Entries []Entry `protobuf:"bytes,2,rep,name=entries,proto3,customtype=EntryAdapter" json:"entries"`
}
// MarshalJSON implements the json.Marshaler interface.
func (r *PushRequest) MarshalJSON() ([]byte, error) {
stream := jsoniter.ConfigDefault.BorrowStream(nil)
defer jsoniter.ConfigDefault.ReturnStream(stream)
stream.WriteObjectStart()
stream.WriteObjectField("streams")
stream.WriteArrayStart()
for i, s := range r.Streams {
stream.WriteObjectStart()
stream.WriteObjectField("stream")
stream.WriteObjectStart()
lbs, err := parser.ParseMetric(s.Labels)
if err != nil {
continue
}
for i, lb := range lbs {
stream.WriteObjectField(lb.Name)
stream.WriteStringWithHTMLEscaped(lb.Value)
if i != len(lbs)-1 {
stream.WriteMore()
}
}
stream.WriteObjectEnd()
stream.WriteMore()
stream.WriteObjectField("values")
stream.WriteArrayStart()
for i, entry := range s.Entries {
stream.WriteArrayStart()
stream.WriteRaw(fmt.Sprintf(`"%d"`, entry.Timestamp.UnixNano()))
stream.WriteMore()
stream.WriteStringWithHTMLEscaped(entry.Line)
stream.WriteArrayEnd()
if i != len(s.Entries)-1 {
stream.WriteMore()
}
}
stream.WriteArrayEnd()
stream.WriteObjectEnd()
if i != len(r.Streams)-1 {
stream.WriteMore()
}
}
stream.WriteArrayEnd()
stream.WriteObjectEnd()
return stream.Buffer(), nil
}
// Entry is a log entry with a timestamp.
type Entry struct {
Timestamp time.Time `protobuf:"bytes,1,opt,name=timestamp,proto3,stdtime" json:"ts"`
Line string `protobuf:"bytes,2,opt,name=line,proto3" json:"line"`
}
func (m *Stream) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Stream) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Labels) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintLogproto(dAtA, i, uint64(len(m.Labels)))
i += copy(dAtA[i:], m.Labels)
}
if len(m.Entries) > 0 {
for _, msg := range m.Entries {
dAtA[i] = 0x12
i++
i = encodeVarintLogproto(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
return i, nil
}
func (m *Entry) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Entry) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
dAtA[i] = 0xa
i++
i = encodeVarintLogproto(dAtA, i, uint64(SizeOfStdTime(m.Timestamp)))
n5, err := StdTimeMarshalTo(m.Timestamp, dAtA[i:])
if err != nil {
return 0, err
}
i += n5
if len(m.Line) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintLogproto(dAtA, i, uint64(len(m.Line)))
i += copy(dAtA[i:], m.Line)
}
return i, nil
}
func (m *Stream) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowLogproto
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Stream: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Stream: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowLogproto
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthLogproto
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthLogproto
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Labels = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowLogproto
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthLogproto
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthLogproto
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Entries = append(m.Entries, Entry{})
if err := m.Entries[len(m.Entries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipLogproto(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthLogproto
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthLogproto
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Entry) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowLogproto
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Entry: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Entry: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowLogproto
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthLogproto
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthLogproto
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Line", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowLogproto
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthLogproto
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthLogproto
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Line = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipLogproto(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthLogproto
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthLogproto
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Stream) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Labels)
if l > 0 {
n += 1 + l + sovLogproto(uint64(l))
}
if len(m.Entries) > 0 {
for _, e := range m.Entries {
l = e.Size()
n += 1 + l + sovLogproto(uint64(l))
}
}
return n
}
func (m *Entry) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = SizeOfStdTime(m.Timestamp)
n += 1 + l + sovLogproto(uint64(l))
l = len(m.Line)
if l > 0 {
n += 1 + l + sovLogproto(uint64(l))
}
return n
}
func (m *Stream) Equal(that interface{}) bool {
if that == nil {
return m == nil
}
that1, ok := that.(*Stream)
if !ok {
that2, ok := that.(Stream)
if ok {
that1 = &that2
} else {
return false
}
}
if that1 == nil {
return m == nil
} else if m == nil {
return false
}
if m.Labels != that1.Labels {
return false
}
if len(m.Entries) != len(that1.Entries) {
return false
}
for i := range m.Entries {
if !m.Entries[i].Equal(that1.Entries[i]) {
return false
}
}
return true
}
func (m *Entry) Equal(that interface{}) bool {
if that == nil {
return m == nil
}
that1, ok := that.(*Entry)
if !ok {
that2, ok := that.(Entry)
if ok {
that1 = &that2
} else {
return false
}
}
if that1 == nil {
return m == nil
} else if m == nil {
return false
}
if !m.Timestamp.Equal(that1.Timestamp) {
return false
}
if m.Line != that1.Line {
return false
}
return true
}

111
pkg/logproto/types_test.go Normal file
View File

@@ -0,0 +1,111 @@
package logproto
import (
"testing"
time "time"
"github.com/stretchr/testify/require"
)
var (
now = time.Now().UTC()
line = `level=info ts=2019-12-12T15:00:08.325Z caller=compact.go:441 component=tsdb msg="compact blocks" count=3 mint=1576130400000 maxt=1576152000000 ulid=01DVX9ZHNM71GRCJS7M34Q0EV7 sources="[01DVWNC6NWY1A60AZV3Z6DGS65 01DVWW7XXX75GHA6ZDTD170CSZ 01DVX33N5W86CWJJVRPAVXJRWJ]" duration=2.897213221s`
stream = Stream{
Labels: `{job="foobar", cluster="foo-central1", namespace="bar", container_name="buzz"}`,
Entries: []Entry{
{now, line},
{now.Add(1 * time.Second), line},
{now.Add(2 * time.Second), line},
{now.Add(3 * time.Second), line},
},
}
streamAdapter = StreamAdapter{
Labels: `{job="foobar", cluster="foo-central1", namespace="bar", container_name="buzz"}`,
Entries: []EntryAdapter{
{now, line},
{now.Add(1 * time.Second), line},
{now.Add(2 * time.Second), line},
{now.Add(3 * time.Second), line},
},
}
)
func TestStream(t *testing.T) {
avg := testing.AllocsPerRun(200, func() {
b, err := stream.Marshal()
require.NoError(t, err)
var new Stream
err = new.Unmarshal(b)
require.NoError(t, err)
require.Equal(t, stream, new)
})
t.Log("avg allocs per run:", avg)
}
func TestStreamAdapter(t *testing.T) {
avg := testing.AllocsPerRun(200, func() {
b, err := streamAdapter.Marshal()
require.NoError(t, err)
var new StreamAdapter
err = new.Unmarshal(b)
require.NoError(t, err)
require.Equal(t, streamAdapter, new)
})
t.Log("avg allocs per run:", avg)
}
func TestCompatibility(t *testing.T) {
b, err := stream.Marshal()
require.NoError(t, err)
var adapter StreamAdapter
err = adapter.Unmarshal(b)
require.NoError(t, err)
require.Equal(t, streamAdapter, adapter)
ba, err := adapter.Marshal()
require.NoError(t, err)
require.Equal(t, b, ba)
var new Stream
err = new.Unmarshal(ba)
require.NoError(t, err)
require.Equal(t, stream, new)
}
func BenchmarkStream(b *testing.B) {
b.ReportAllocs()
for n := 0; n < b.N; n++ {
by, err := stream.Marshal()
if err != nil {
b.Fatal(err)
}
var new Stream
err = new.Unmarshal(by)
if err != nil {
b.Fatal(err)
}
}
}
func BenchmarkStreamAdapter(b *testing.B) {
b.ReportAllocs()
for n := 0; n < b.N; n++ {
by, err := streamAdapter.Marshal()
if err != nil {
b.Fatal(err)
}
var new StreamAdapter
err = new.Unmarshal(by)
if err != nil {
b.Fatal(err)
}
}
}