|
|
@@ -0,0 +1,1123 @@
|
|
|
+////////////////////////////////////////////////////////////////////////////////
|
|
|
+//
|
|
|
+// DO NOT MODIFY
|
|
|
+//
|
|
|
+// ┻━┻ ︵ヽ(`Д´)ノ︵ ┻━┻
|
|
|
+//
|
|
|
+//
|
|
|
+// This source file was automatically generated by bingen.
|
|
|
+//
|
|
|
+////////////////////////////////////////////////////////////////////////////////
|
|
|
+
|
|
|
+package metric
|
|
|
+
|
|
|
+import (
|
|
|
+ "fmt"
|
|
|
+ "io"
|
|
|
+ "iter"
|
|
|
+ "os"
|
|
|
+ "reflect"
|
|
|
+ "strings"
|
|
|
+ "sync"
|
|
|
+ "time"
|
|
|
+ "unsafe"
|
|
|
+
|
|
|
+ util "github.com/opencost/opencost/core/pkg/util"
|
|
|
+)
|
|
|
+
|
|
|
+const (
|
|
|
+ // GeneratorPackageName is the package the generator is targetting
|
|
|
+ GeneratorPackageName string = "metric"
|
|
|
+)
|
|
|
+
|
|
|
+// BinaryTags represent the formatting tag used for specific optimization features
|
|
|
+const (
|
|
|
+ // BinaryTagStringTable is written and/or read prior to the existence of a string
|
|
|
+ // table (where each index is encoded as a string entry in the resource
|
|
|
+ BinaryTagStringTable string = "BGST"
|
|
|
+)
|
|
|
+
|
|
|
+const (
|
|
|
+ // DefaultCodecVersion is used for any resources listed in the Default version set
|
|
|
+ DefaultCodecVersion uint8 = 1
|
|
|
+)
|
|
|
+
|
|
|
+//--------------------------------------------------------------------------
|
|
|
+// Configuration
|
|
|
+//--------------------------------------------------------------------------
|
|
|
+
|
|
|
+var (
|
|
|
+ bingenConfigLock sync.RWMutex
|
|
|
+ bingenConfig *BingenConfiguration = DefaultBingenConfiguration()
|
|
|
+)
|
|
|
+
|
|
|
+// BingenConfiguration is used to set any custom configuration in the way files are encoded
|
|
|
+// or decoded.
|
|
|
+type BingenConfiguration struct {
|
|
|
+ // FileBackedStringTableEnabled enables the use of file-backed string tables for streaming
|
|
|
+ // bingen decoding.
|
|
|
+ FileBackedStringTableEnabled bool
|
|
|
+
|
|
|
+ // FileBackedStringTableDir is the directory to write the string table files for reading.
|
|
|
+ FileBackedStringTableDir string
|
|
|
+}
|
|
|
+
|
|
|
+// DefaultBingenConfiguration creates the default implementation of the bingen configuration
|
|
|
+// and returns it.
|
|
|
+func DefaultBingenConfiguration() *BingenConfiguration {
|
|
|
+ return &BingenConfiguration{
|
|
|
+ FileBackedStringTableEnabled: false,
|
|
|
+ FileBackedStringTableDir: os.TempDir(),
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+// ConfigureBingen accepts a new *BingenConfiguration instance which updates the internal decoder
|
|
|
+// and encoder behavior.
|
|
|
+func ConfigureBingen(config *BingenConfiguration) {
|
|
|
+ bingenConfigLock.Lock()
|
|
|
+ defer bingenConfigLock.Unlock()
|
|
|
+
|
|
|
+ if config == nil {
|
|
|
+ config = DefaultBingenConfiguration()
|
|
|
+ }
|
|
|
+ bingenConfig = config
|
|
|
+}
|
|
|
+
|
|
|
+// IsBingenFileBackedStringTableEnabled accessor for file backed string table configuration
|
|
|
+func IsBingenFileBackedStringTableEnabled() bool {
|
|
|
+ bingenConfigLock.RLock()
|
|
|
+ defer bingenConfigLock.RUnlock()
|
|
|
+
|
|
|
+ return bingenConfig.FileBackedStringTableEnabled
|
|
|
+}
|
|
|
+
|
|
|
+// BingenFileBackedStringTableDir returns the directory configured for file backed string tables.
|
|
|
+func BingenFileBackedStringTableDir() string {
|
|
|
+ bingenConfigLock.RLock()
|
|
|
+ defer bingenConfigLock.RUnlock()
|
|
|
+
|
|
|
+ return bingenConfig.FileBackedStringTableDir
|
|
|
+}
|
|
|
+
|
|
|
+//--------------------------------------------------------------------------
|
|
|
+// Type Map
|
|
|
+//--------------------------------------------------------------------------
|
|
|
+
|
|
|
+// Generated type map for resolving interface implementations to
|
|
|
+// to concrete types
|
|
|
+var typeMap map[string]reflect.Type = map[string]reflect.Type{
|
|
|
+ "Update": reflect.TypeFor[Update](),
|
|
|
+ "UpdateSet": reflect.TypeFor[UpdateSet](),
|
|
|
+}
|
|
|
+
|
|
|
+//--------------------------------------------------------------------------
|
|
|
+// Type Helpers
|
|
|
+//--------------------------------------------------------------------------
|
|
|
+
|
|
|
+// isBinaryTag returns true when the first bytes in the provided binary matches the tag
|
|
|
+func isBinaryTag(data []byte, tag string) bool {
|
|
|
+ if len(data) < len(tag) {
|
|
|
+ return false
|
|
|
+ }
|
|
|
+
|
|
|
+ return string(data[:len(tag)]) == tag
|
|
|
+}
|
|
|
+
|
|
|
+// isReaderBinaryTag is used to peek the header for an io.Reader Buffer
|
|
|
+func isReaderBinaryTag(buff *util.Buffer, tag string) bool {
|
|
|
+ data, err := buff.Peek(len(tag))
|
|
|
+ if err != nil && err != io.EOF {
|
|
|
+ panic(fmt.Sprintf("called Peek() on a non buffered reader: %s", err))
|
|
|
+ }
|
|
|
+ if len(data) < len(tag) {
|
|
|
+ return false
|
|
|
+ }
|
|
|
+
|
|
|
+ return string(data[:len(tag)]) == tag
|
|
|
+}
|
|
|
+
|
|
|
+// appendBytes combines a and b into a new byte array
|
|
|
+func appendBytes(a []byte, b []byte) []byte {
|
|
|
+ al := len(a)
|
|
|
+ bl := len(b)
|
|
|
+ tl := al + bl
|
|
|
+
|
|
|
+ // allocate a new byte array for the combined
|
|
|
+ // use native copy for speedy byte copying
|
|
|
+ result := make([]byte, tl)
|
|
|
+ copy(result, a)
|
|
|
+ copy(result[al:], b)
|
|
|
+
|
|
|
+ return result
|
|
|
+}
|
|
|
+
|
|
|
+// typeToString determines the basic properties of the type, the qualifier, package path, and
|
|
|
+// type name, and returns the qualified type
|
|
|
+func typeToString(f interface{}) string {
|
|
|
+ qual := ""
|
|
|
+ t := reflect.TypeOf(f)
|
|
|
+ if t.Kind() == reflect.Ptr {
|
|
|
+ t = t.Elem()
|
|
|
+ qual = "*"
|
|
|
+ }
|
|
|
+
|
|
|
+ return fmt.Sprintf("%s%s.%s", qual, t.PkgPath(), t.Name())
|
|
|
+}
|
|
|
+
|
|
|
+// resolveType uses the name of a type and returns the package, base type name, and whether
|
|
|
+// or not it's a pointer.
|
|
|
+func resolveType(t string) (pkg string, name string, isPtr bool) {
|
|
|
+ isPtr = t[:1] == "*"
|
|
|
+ if isPtr {
|
|
|
+ t = t[1:]
|
|
|
+ }
|
|
|
+
|
|
|
+ slashIndex := strings.LastIndex(t, "/")
|
|
|
+ if slashIndex >= 0 {
|
|
|
+ t = t[slashIndex+1:]
|
|
|
+ }
|
|
|
+ parts := strings.Split(t, ".")
|
|
|
+ if parts[0] == GeneratorPackageName {
|
|
|
+ parts[0] = ""
|
|
|
+ }
|
|
|
+
|
|
|
+ pkg = parts[0]
|
|
|
+ name = parts[1]
|
|
|
+ return
|
|
|
+}
|
|
|
+
|
|
|
+//--------------------------------------------------------------------------
|
|
|
+// Stream Helpers
|
|
|
+//--------------------------------------------------------------------------
|
|
|
+
|
|
|
+// StreamFactoryFunc is an alias for a func that creates a BingenStream implementation.
|
|
|
+type StreamFactoryFunc func(io.Reader) BingenStream
|
|
|
+
|
|
|
+// Generated streamable factory map for finding the specific new stream methods
|
|
|
+// by T type
|
|
|
+var streamFactoryMap map[reflect.Type]StreamFactoryFunc = map[reflect.Type]StreamFactoryFunc{
|
|
|
+ reflect.TypeFor[UpdateSet](): NewUpdateSetStream,
|
|
|
+}
|
|
|
+
|
|
|
+// NewStreamFor accepts an io.Reader, and returns a new BingenStream for the generic T
|
|
|
+// type provided _if_ it is a registered bingen type that is annotated as 'streamable'. See
|
|
|
+// the streamFactoryMap for generated type listings.
|
|
|
+func NewStreamFor[T any](reader io.Reader) (BingenStream, error) {
|
|
|
+ typeKey := reflect.TypeFor[T]()
|
|
|
+
|
|
|
+ factory, ok := streamFactoryMap[typeKey]
|
|
|
+ if !ok {
|
|
|
+ return nil, fmt.Errorf("the type: %s is not a registered bingen streamable type", typeKey.Name())
|
|
|
+ }
|
|
|
+
|
|
|
+ return factory(reader), nil
|
|
|
+}
|
|
|
+
|
|
|
+// BingenStream is the stream interface for all streamable types
|
|
|
+type BingenStream interface {
|
|
|
+ // Stream returns the iterator which will stream each field of the target type and
|
|
|
+ // return the field info as well as the value.
|
|
|
+ Stream() iter.Seq2[BingenFieldInfo, *BingenValue]
|
|
|
+
|
|
|
+ // Close will close any dynamic io.Reader used to stream in the fields
|
|
|
+ Close()
|
|
|
+
|
|
|
+ // Error returns an error if one occurred during the process of streaming the type's fields.
|
|
|
+ // This can be checked after iterating through the Stream().
|
|
|
+ Error() error
|
|
|
+}
|
|
|
+
|
|
|
+// BingenValue contains the value of a field as well as any index/key associated with that value.
|
|
|
+type BingenValue struct {
|
|
|
+ Value any
|
|
|
+ Index any
|
|
|
+}
|
|
|
+
|
|
|
+// IsNil is just a method accessor way to check to see if the value returned was nil
|
|
|
+func (bv *BingenValue) IsNil() bool {
|
|
|
+ return bv == nil
|
|
|
+}
|
|
|
+
|
|
|
+// creates a single BingenValue instance without a key or index
|
|
|
+func singleV(value any) *BingenValue {
|
|
|
+ return &BingenValue{
|
|
|
+ Value: value,
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+// creates a pair of key/index and value.
|
|
|
+func pairV(index any, value any) *BingenValue {
|
|
|
+ return &BingenValue{
|
|
|
+ Value: value,
|
|
|
+ Index: index,
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+// BingenFieldInfo contains the type of the field being streamed as well as the name of the field.
|
|
|
+type BingenFieldInfo struct {
|
|
|
+ Type reflect.Type
|
|
|
+ Name string
|
|
|
+}
|
|
|
+
|
|
|
+//--------------------------------------------------------------------------
|
|
|
+// String Table Writer
|
|
|
+//--------------------------------------------------------------------------
|
|
|
+
|
|
|
+// StringTableWriter maps strings to specific indices for encoding
|
|
|
+type StringTableWriter struct {
|
|
|
+ l sync.Mutex
|
|
|
+ indices map[string]int
|
|
|
+ next int
|
|
|
+}
|
|
|
+
|
|
|
+// NewStringTableWriter Creates a new StringTableWriter instance with provided contents
|
|
|
+func NewStringTableWriter(contents ...string) *StringTableWriter {
|
|
|
+ st := &StringTableWriter{
|
|
|
+ indices: make(map[string]int, len(contents)),
|
|
|
+ next: len(contents),
|
|
|
+ }
|
|
|
+
|
|
|
+ for i, entry := range contents {
|
|
|
+ st.indices[entry] = i
|
|
|
+ }
|
|
|
+
|
|
|
+ return st
|
|
|
+}
|
|
|
+
|
|
|
+// AddOrGet atomically retrieves a string entry's index if it exist. Otherwise, it will
|
|
|
+// add the entry and return the index.
|
|
|
+func (st *StringTableWriter) AddOrGet(s string) int {
|
|
|
+ st.l.Lock()
|
|
|
+ defer st.l.Unlock()
|
|
|
+
|
|
|
+ if ind, ok := st.indices[s]; ok {
|
|
|
+ return ind
|
|
|
+ }
|
|
|
+
|
|
|
+ current := st.next
|
|
|
+ st.next++
|
|
|
+
|
|
|
+ st.indices[s] = current
|
|
|
+ return current
|
|
|
+}
|
|
|
+
|
|
|
+// ToSlice Converts the contents to a string array for encoding.
|
|
|
+func (st *StringTableWriter) ToSlice() []string {
|
|
|
+ st.l.Lock()
|
|
|
+ defer st.l.Unlock()
|
|
|
+
|
|
|
+ if st.next == 0 {
|
|
|
+ return []string{}
|
|
|
+ }
|
|
|
+
|
|
|
+ sl := make([]string, st.next)
|
|
|
+ for s, i := range st.indices {
|
|
|
+ sl[i] = s
|
|
|
+ }
|
|
|
+ return sl
|
|
|
+}
|
|
|
+
|
|
|
+// ToBytes Converts the contents to a binary encoded representation
|
|
|
+func (st *StringTableWriter) ToBytes() []byte {
|
|
|
+ buff := util.NewBuffer()
|
|
|
+ buff.WriteBytes([]byte(BinaryTagStringTable)) // bingen table header
|
|
|
+
|
|
|
+ strs := st.ToSlice()
|
|
|
+
|
|
|
+ buff.WriteInt(len(strs)) // table length
|
|
|
+ for _, s := range strs {
|
|
|
+ buff.WriteString(s)
|
|
|
+ }
|
|
|
+
|
|
|
+ return buff.Bytes()
|
|
|
+}
|
|
|
+
|
|
|
+//--------------------------------------------------------------------------
|
|
|
+// String Table Reader
|
|
|
+//--------------------------------------------------------------------------
|
|
|
+
|
|
|
+// StringTableReader is the interface used to read the string table from the decoding.
|
|
|
+type StringTableReader interface {
|
|
|
+ // At returns the string entry at a specific index, or panics on out of bounds.
|
|
|
+ At(index int) string
|
|
|
+
|
|
|
+ // Len returns the total number of strings loaded in the string table.
|
|
|
+ Len() int
|
|
|
+
|
|
|
+ // Close will clear the loaded table, and drop any external resources used.
|
|
|
+ Close() error
|
|
|
+}
|
|
|
+
|
|
|
+// SliceStringTableReader is a basic pre-loaded []string that provides index-based access.
|
|
|
+// The cost of this implementation is holding all strings in memory, which provides faster
|
|
|
+// lookup performance for memory usage.
|
|
|
+type SliceStringTableReader struct {
|
|
|
+ table []string
|
|
|
+}
|
|
|
+
|
|
|
+// NewSliceStringTableReaderFrom creates a new SliceStringTableReader instance loading
|
|
|
+// data directly from the buffer. The buffer's position should start at the table length.
|
|
|
+func NewSliceStringTableReaderFrom(buffer *util.Buffer) StringTableReader {
|
|
|
+ // table length
|
|
|
+ tl := buffer.ReadInt()
|
|
|
+
|
|
|
+ var table []string
|
|
|
+ if tl > 0 {
|
|
|
+ table = make([]string, tl)
|
|
|
+ for i := range tl {
|
|
|
+ table[i] = buffer.ReadString()
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ return &SliceStringTableReader{
|
|
|
+ table: table,
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+// At returns the string entry at a specific index, or panics on out of bounds.
|
|
|
+func (sstr *SliceStringTableReader) At(index int) string {
|
|
|
+ if index < 0 || index >= len(sstr.table) {
|
|
|
+ panic(fmt.Errorf("%s: string table index out of bounds: %d", GeneratorPackageName, index))
|
|
|
+ }
|
|
|
+
|
|
|
+ return sstr.table[index]
|
|
|
+}
|
|
|
+
|
|
|
+// Len returns the total number of strings loaded in the string table.
|
|
|
+func (sstr *SliceStringTableReader) Len() int {
|
|
|
+ if sstr == nil {
|
|
|
+ return 0
|
|
|
+ }
|
|
|
+
|
|
|
+ return len(sstr.table)
|
|
|
+}
|
|
|
+
|
|
|
+// Close for the slice tables just nils out the slice and returns
|
|
|
+func (sstr *SliceStringTableReader) Close() error {
|
|
|
+ sstr.table = nil
|
|
|
+ return nil
|
|
|
+}
|
|
|
+
|
|
|
+// fileStringRef maps a bingen string-table index to a payload stored in a temp file.
|
|
|
+type fileStringRef struct {
|
|
|
+ off int64
|
|
|
+ length int
|
|
|
+}
|
|
|
+
|
|
|
+// FileStringTableReader leverages a local file to write string table data for lookup. On
|
|
|
+// memory focused systems, this allows a slower parse with a significant decrease in memory
|
|
|
+// usage. This implementation is often pair with streaming readers for high throughput with
|
|
|
+// reduced memory usage.
|
|
|
+type FileStringTableReader struct {
|
|
|
+ f *os.File
|
|
|
+ refs []fileStringRef
|
|
|
+}
|
|
|
+
|
|
|
+// NewFileStringTableFromBuffer reads exactly tl length-prefixed (uint16) string payloads from buffer
|
|
|
+// and appends each payload to a new temp file. It does not retain full strings in memory.
|
|
|
+func NewFileStringTableReaderFrom(buffer *util.Buffer, dir string) StringTableReader {
|
|
|
+ // helper func to cast a string in-place to a byte slice.
|
|
|
+ // NOTE: Return value is READ-ONLY. DO NOT MODIFY!
|
|
|
+ byteSliceFor := func(s string) []byte {
|
|
|
+ return unsafe.Slice(unsafe.StringData(s), len(s))
|
|
|
+ }
|
|
|
+
|
|
|
+ err := os.MkdirAll(dir, 0755)
|
|
|
+ if err != nil {
|
|
|
+ panic(fmt.Errorf("%s: failed to create string table directory: %w", GeneratorPackageName, err))
|
|
|
+ }
|
|
|
+
|
|
|
+ f, err := os.CreateTemp(dir, fmt.Sprintf("%s-bgst-*", GeneratorPackageName))
|
|
|
+ if err != nil {
|
|
|
+ panic(fmt.Errorf("%s: failed to create string table file: %w", GeneratorPackageName, err))
|
|
|
+ }
|
|
|
+
|
|
|
+ var writeErr error
|
|
|
+ defer func() {
|
|
|
+ if writeErr != nil {
|
|
|
+ _ = f.Close()
|
|
|
+ }
|
|
|
+ }()
|
|
|
+
|
|
|
+ // table length
|
|
|
+ tl := buffer.ReadInt()
|
|
|
+
|
|
|
+ var refs []fileStringRef
|
|
|
+ if tl > 0 {
|
|
|
+ refs = make([]fileStringRef, tl)
|
|
|
+
|
|
|
+ for i := range tl {
|
|
|
+ payload := byteSliceFor(buffer.ReadString())
|
|
|
+
|
|
|
+ var off int64
|
|
|
+ if len(payload) > 0 {
|
|
|
+ off, err = f.Seek(0, io.SeekEnd)
|
|
|
+ if err != nil {
|
|
|
+ writeErr = fmt.Errorf("%s: failed to seek string table file: %w", GeneratorPackageName, err)
|
|
|
+ panic(writeErr)
|
|
|
+ }
|
|
|
+ if _, err := f.Write(payload); err != nil {
|
|
|
+ writeErr = fmt.Errorf("%s: failed to write string table entry %d: %w", GeneratorPackageName, i, err)
|
|
|
+ panic(writeErr)
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ refs[i] = fileStringRef{
|
|
|
+ off: off,
|
|
|
+ length: len(payload),
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ return &FileStringTableReader{
|
|
|
+ f: f,
|
|
|
+ refs: refs,
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+// At returns the string from the internal file using the reference's offset and length.
|
|
|
+func (fstr *FileStringTableReader) At(index int) string {
|
|
|
+ if fstr == nil || fstr.f == nil {
|
|
|
+ panic(fmt.Errorf("%s: failed to read file string table data", GeneratorPackageName))
|
|
|
+ }
|
|
|
+ if index < 0 || index >= len(fstr.refs) {
|
|
|
+ panic(fmt.Errorf("%s: string table index out of bounds: %d", GeneratorPackageName, index))
|
|
|
+ }
|
|
|
+
|
|
|
+ ref := fstr.refs[index]
|
|
|
+ if ref.length == 0 {
|
|
|
+ return ""
|
|
|
+ }
|
|
|
+
|
|
|
+ b := make([]byte, ref.length)
|
|
|
+ _, err := fstr.f.ReadAt(b, ref.off)
|
|
|
+ if err != nil {
|
|
|
+ return ""
|
|
|
+ }
|
|
|
+
|
|
|
+ // cast the allocated bytes to a string in-place, as we
|
|
|
+ // were the ones that allocated the bytes
|
|
|
+ return unsafe.String(unsafe.SliceData(b), len(b))
|
|
|
+}
|
|
|
+
|
|
|
+// Len returns the total number of strings loaded in the string table.
|
|
|
+func (fstr *FileStringTableReader) Len() int {
|
|
|
+ if fstr == nil {
|
|
|
+ return 0
|
|
|
+ }
|
|
|
+
|
|
|
+ return len(fstr.refs)
|
|
|
+}
|
|
|
+
|
|
|
+// Close for the file string table reader closes the file and deletes it.
|
|
|
+func (fstr *FileStringTableReader) Close() error {
|
|
|
+ if fstr == nil || fstr.f == nil {
|
|
|
+ return nil
|
|
|
+ }
|
|
|
+
|
|
|
+ path := fstr.f.Name()
|
|
|
+ err := fstr.f.Close()
|
|
|
+ fstr.f = nil
|
|
|
+ fstr.refs = nil
|
|
|
+
|
|
|
+ if path != "" {
|
|
|
+ _ = os.Remove(path)
|
|
|
+ }
|
|
|
+
|
|
|
+ return err
|
|
|
+}
|
|
|
+
|
|
|
+//--------------------------------------------------------------------------
|
|
|
+// Codec Context
|
|
|
+//--------------------------------------------------------------------------
|
|
|
+
|
|
|
+// EncodingContext is a context object passed to the encoders to ensure reuse of buffer
|
|
|
+// and table data
|
|
|
+type EncodingContext struct {
|
|
|
+ Buffer *util.Buffer
|
|
|
+ Table *StringTableWriter
|
|
|
+}
|
|
|
+
|
|
|
+// IsStringTable returns true if the table is available
|
|
|
+func (ec *EncodingContext) IsStringTable() bool {
|
|
|
+ return ec.Table != nil
|
|
|
+}
|
|
|
+
|
|
|
+// DecodingContext is a context object passed to the decoders to ensure parent objects
|
|
|
+// reuse as much data as possible
|
|
|
+type DecodingContext struct {
|
|
|
+ Buffer *util.Buffer
|
|
|
+ Table StringTableReader
|
|
|
+}
|
|
|
+
|
|
|
+// NewDecodingContextFromBytes creates a new DecodingContext instance using an byte slice
|
|
|
+func NewDecodingContextFromBytes(data []byte) *DecodingContext {
|
|
|
+ var table StringTableReader
|
|
|
+
|
|
|
+ buff := util.NewBufferFromBytes(data)
|
|
|
+
|
|
|
+ // string table header validation
|
|
|
+ if isBinaryTag(data, BinaryTagStringTable) {
|
|
|
+ buff.ReadBytes(len(BinaryTagStringTable)) // strip tag length
|
|
|
+
|
|
|
+ // always use a slice string table with a byte array since the
|
|
|
+ // data is already in memory
|
|
|
+ table = NewSliceStringTableReaderFrom(buff)
|
|
|
+ }
|
|
|
+
|
|
|
+ return &DecodingContext{
|
|
|
+ Buffer: buff,
|
|
|
+ Table: table,
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+// NewDecodingContextFromReader creates a new DecodingContext instance using an io.Reader
|
|
|
+// implementation
|
|
|
+func NewDecodingContextFromReader(reader io.Reader) *DecodingContext {
|
|
|
+ var table StringTableReader
|
|
|
+
|
|
|
+ buff := util.NewBufferFromReader(reader)
|
|
|
+
|
|
|
+ if isReaderBinaryTag(buff, BinaryTagStringTable) {
|
|
|
+ buff.ReadBytes(len(BinaryTagStringTable)) // strip tag length
|
|
|
+
|
|
|
+ // create correct string table implementation
|
|
|
+ if IsBingenFileBackedStringTableEnabled() {
|
|
|
+ table = NewFileStringTableReaderFrom(buff, BingenFileBackedStringTableDir())
|
|
|
+ } else {
|
|
|
+ table = NewSliceStringTableReaderFrom(buff)
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ return &DecodingContext{
|
|
|
+ Buffer: buff,
|
|
|
+ Table: table,
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+// IsStringTable returns true if the table is available
|
|
|
+func (dc *DecodingContext) IsStringTable() bool {
|
|
|
+ return dc.Table != nil && dc.Table.Len() > 0
|
|
|
+}
|
|
|
+
|
|
|
+// Close will ensure that any string table resources and buffer resources are
|
|
|
+// cleaned up.
|
|
|
+func (dc *DecodingContext) Close() {
|
|
|
+ if dc.Table != nil {
|
|
|
+ _ = dc.Table.Close()
|
|
|
+ dc.Table = nil
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+//--------------------------------------------------------------------------
|
|
|
+// Binary Codec
|
|
|
+//--------------------------------------------------------------------------
|
|
|
+
|
|
|
+// BinEncoder is an encoding interface which defines a context based marshal contract.
|
|
|
+type BinEncoder interface {
|
|
|
+ MarshalBinaryWithContext(*EncodingContext) error
|
|
|
+}
|
|
|
+
|
|
|
+// BinDecoder is a decoding interface which defines a context based unmarshal contract.
|
|
|
+type BinDecoder interface {
|
|
|
+ UnmarshalBinaryWithContext(*DecodingContext) error
|
|
|
+}
|
|
|
+
|
|
|
+//--------------------------------------------------------------------------
|
|
|
+// Update
|
|
|
+//--------------------------------------------------------------------------
|
|
|
+
|
|
|
+// MarshalBinary serializes the internal properties of this Update instance
|
|
|
+// into a byte array
|
|
|
+func (target *Update) MarshalBinary() (data []byte, err error) {
|
|
|
+ ctx := &EncodingContext{
|
|
|
+ Buffer: util.NewBuffer(),
|
|
|
+ Table: nil,
|
|
|
+ }
|
|
|
+
|
|
|
+ e := target.MarshalBinaryWithContext(ctx)
|
|
|
+ if e != nil {
|
|
|
+ return nil, e
|
|
|
+ }
|
|
|
+
|
|
|
+ encBytes := ctx.Buffer.Bytes()
|
|
|
+ return encBytes, nil
|
|
|
+}
|
|
|
+
|
|
|
+// MarshalBinaryWithContext serializes the internal properties of this Update instance
|
|
|
+// into a byte array leveraging a predefined context.
|
|
|
+func (target *Update) MarshalBinaryWithContext(ctx *EncodingContext) (err error) {
|
|
|
+ // panics are recovered and propagated as errors
|
|
|
+ defer func() {
|
|
|
+ if r := recover(); r != nil {
|
|
|
+ if e, ok := r.(error); ok {
|
|
|
+ err = e
|
|
|
+ } else if s, ok := r.(string); ok {
|
|
|
+ err = fmt.Errorf("Unexpected panic: %s", s)
|
|
|
+ } else {
|
|
|
+ err = fmt.Errorf("Unexpected panic: %+v", r)
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }()
|
|
|
+
|
|
|
+ buff := ctx.Buffer
|
|
|
+ buff.WriteUInt8(DefaultCodecVersion) // version
|
|
|
+
|
|
|
+ if ctx.IsStringTable() {
|
|
|
+ a := ctx.Table.AddOrGet(target.Name)
|
|
|
+ buff.WriteInt(a) // write table index
|
|
|
+ } else {
|
|
|
+ buff.WriteString(target.Name) // write string
|
|
|
+ }
|
|
|
+ if target.Labels == nil {
|
|
|
+ buff.WriteUInt8(uint8(0)) // write nil byte
|
|
|
+ } else {
|
|
|
+ buff.WriteUInt8(uint8(1)) // write non-nil byte
|
|
|
+
|
|
|
+ // --- [begin][write][map](map[string]string) ---
|
|
|
+ buff.WriteInt(len(target.Labels)) // map length
|
|
|
+ for v, z := range target.Labels {
|
|
|
+ if ctx.IsStringTable() {
|
|
|
+ b := ctx.Table.AddOrGet(v)
|
|
|
+ buff.WriteInt(b) // write table index
|
|
|
+ } else {
|
|
|
+ buff.WriteString(v) // write string
|
|
|
+ }
|
|
|
+ if ctx.IsStringTable() {
|
|
|
+ c := ctx.Table.AddOrGet(z)
|
|
|
+ buff.WriteInt(c) // write table index
|
|
|
+ } else {
|
|
|
+ buff.WriteString(z) // write string
|
|
|
+ }
|
|
|
+ }
|
|
|
+ // --- [end][write][map](map[string]string) ---
|
|
|
+
|
|
|
+ }
|
|
|
+ buff.WriteFloat64(target.Value) // write float64
|
|
|
+ if target.AdditionalInfo == nil {
|
|
|
+ buff.WriteUInt8(uint8(0)) // write nil byte
|
|
|
+ } else {
|
|
|
+ buff.WriteUInt8(uint8(1)) // write non-nil byte
|
|
|
+
|
|
|
+ // --- [begin][write][map](map[string]string) ---
|
|
|
+ buff.WriteInt(len(target.AdditionalInfo)) // map length
|
|
|
+ for vv, zz := range target.AdditionalInfo {
|
|
|
+ if ctx.IsStringTable() {
|
|
|
+ d := ctx.Table.AddOrGet(vv)
|
|
|
+ buff.WriteInt(d) // write table index
|
|
|
+ } else {
|
|
|
+ buff.WriteString(vv) // write string
|
|
|
+ }
|
|
|
+ if ctx.IsStringTable() {
|
|
|
+ e := ctx.Table.AddOrGet(zz)
|
|
|
+ buff.WriteInt(e) // write table index
|
|
|
+ } else {
|
|
|
+ buff.WriteString(zz) // write string
|
|
|
+ }
|
|
|
+ }
|
|
|
+ // --- [end][write][map](map[string]string) ---
|
|
|
+
|
|
|
+ }
|
|
|
+ return nil
|
|
|
+}
|
|
|
+
|
|
|
+// UnmarshalBinary uses the data passed byte array to set all the internal properties of
|
|
|
+// the Update type
|
|
|
+func (target *Update) UnmarshalBinary(data []byte) error {
|
|
|
+ ctx := NewDecodingContextFromBytes(data)
|
|
|
+ defer ctx.Close()
|
|
|
+ err := target.UnmarshalBinaryWithContext(ctx)
|
|
|
+ if err != nil {
|
|
|
+ return err
|
|
|
+ }
|
|
|
+
|
|
|
+ return nil
|
|
|
+}
|
|
|
+
|
|
|
+// UnmarshalBinaryFromReader uses the io.Reader data to set all the internal properties of
|
|
|
+// the Update type
|
|
|
+func (target *Update) UnmarshalBinaryFromReader(reader io.Reader) error {
|
|
|
+ ctx := NewDecodingContextFromReader(reader)
|
|
|
+ defer ctx.Close()
|
|
|
+ err := target.UnmarshalBinaryWithContext(ctx)
|
|
|
+ if err != nil {
|
|
|
+ return err
|
|
|
+ }
|
|
|
+
|
|
|
+ return nil
|
|
|
+}
|
|
|
+
|
|
|
+// UnmarshalBinaryWithContext uses the context containing a string table and binary buffer to set all the internal properties of
|
|
|
+// the Update type
|
|
|
+func (target *Update) UnmarshalBinaryWithContext(ctx *DecodingContext) (err error) {
|
|
|
+ // panics are recovered and propagated as errors
|
|
|
+ defer func() {
|
|
|
+ if r := recover(); r != nil {
|
|
|
+ if e, ok := r.(error); ok {
|
|
|
+ err = e
|
|
|
+ } else if s, ok := r.(string); ok {
|
|
|
+ err = fmt.Errorf("Unexpected panic: %s", s)
|
|
|
+ } else {
|
|
|
+ err = fmt.Errorf("Unexpected panic: %+v", r)
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }()
|
|
|
+
|
|
|
+ buff := ctx.Buffer
|
|
|
+ version := buff.ReadUInt8()
|
|
|
+
|
|
|
+ if version > DefaultCodecVersion {
|
|
|
+ return fmt.Errorf("Invalid Version Unmarshaling Update. Expected %d or less, got %d", DefaultCodecVersion, version)
|
|
|
+ }
|
|
|
+
|
|
|
+ var b string
|
|
|
+ if ctx.IsStringTable() {
|
|
|
+ c := buff.ReadInt() // read string index
|
|
|
+ b = ctx.Table.At(c)
|
|
|
+ } else {
|
|
|
+ b = buff.ReadString() // read string
|
|
|
+ }
|
|
|
+ a := b
|
|
|
+ target.Name = a
|
|
|
+
|
|
|
+ if buff.ReadUInt8() == uint8(0) {
|
|
|
+ target.Labels = nil
|
|
|
+ } else {
|
|
|
+ // --- [begin][read][map](map[string]string) ---
|
|
|
+ e := buff.ReadInt() // map len
|
|
|
+ d := make(map[string]string, e)
|
|
|
+ for i := 0; i < e; i++ {
|
|
|
+ var v string
|
|
|
+ var g string
|
|
|
+ if ctx.IsStringTable() {
|
|
|
+ h := buff.ReadInt() // read string index
|
|
|
+ g = ctx.Table.At(h)
|
|
|
+ } else {
|
|
|
+ g = buff.ReadString() // read string
|
|
|
+ }
|
|
|
+ f := g
|
|
|
+ v = f
|
|
|
+
|
|
|
+ var z string
|
|
|
+ var m string
|
|
|
+ if ctx.IsStringTable() {
|
|
|
+ n := buff.ReadInt() // read string index
|
|
|
+ m = ctx.Table.At(n)
|
|
|
+ } else {
|
|
|
+ m = buff.ReadString() // read string
|
|
|
+ }
|
|
|
+ l := m
|
|
|
+ z = l
|
|
|
+
|
|
|
+ d[v] = z
|
|
|
+ }
|
|
|
+ target.Labels = d
|
|
|
+ // --- [end][read][map](map[string]string) ---
|
|
|
+
|
|
|
+ }
|
|
|
+ o := buff.ReadFloat64() // read float64
|
|
|
+ target.Value = o
|
|
|
+
|
|
|
+ if buff.ReadUInt8() == uint8(0) {
|
|
|
+ target.AdditionalInfo = nil
|
|
|
+ } else {
|
|
|
+ // --- [begin][read][map](map[string]string) ---
|
|
|
+ q := buff.ReadInt() // map len
|
|
|
+ p := make(map[string]string, q)
|
|
|
+ for j := 0; j < q; j++ {
|
|
|
+ var vv string
|
|
|
+ var s string
|
|
|
+ if ctx.IsStringTable() {
|
|
|
+ t := buff.ReadInt() // read string index
|
|
|
+ s = ctx.Table.At(t)
|
|
|
+ } else {
|
|
|
+ s = buff.ReadString() // read string
|
|
|
+ }
|
|
|
+ r := s
|
|
|
+ vv = r
|
|
|
+
|
|
|
+ var zz string
|
|
|
+ var w string
|
|
|
+ if ctx.IsStringTable() {
|
|
|
+ x := buff.ReadInt() // read string index
|
|
|
+ w = ctx.Table.At(x)
|
|
|
+ } else {
|
|
|
+ w = buff.ReadString() // read string
|
|
|
+ }
|
|
|
+ u := w
|
|
|
+ zz = u
|
|
|
+
|
|
|
+ p[vv] = zz
|
|
|
+ }
|
|
|
+ target.AdditionalInfo = p
|
|
|
+ // --- [end][read][map](map[string]string) ---
|
|
|
+
|
|
|
+ }
|
|
|
+ return nil
|
|
|
+}
|
|
|
+
|
|
|
+//--------------------------------------------------------------------------
|
|
|
+// UpdateSet
|
|
|
+//--------------------------------------------------------------------------
|
|
|
+
|
|
|
+// MarshalBinary serializes the internal properties of this UpdateSet instance
|
|
|
+// into a byte array
|
|
|
+func (target *UpdateSet) MarshalBinary() (data []byte, err error) {
|
|
|
+ ctx := &EncodingContext{
|
|
|
+ Buffer: util.NewBuffer(),
|
|
|
+ Table: NewStringTableWriter(),
|
|
|
+ }
|
|
|
+
|
|
|
+ e := target.MarshalBinaryWithContext(ctx)
|
|
|
+ if e != nil {
|
|
|
+ return nil, e
|
|
|
+ }
|
|
|
+
|
|
|
+ encBytes := ctx.Buffer.Bytes()
|
|
|
+ sTableBytes := ctx.Table.ToBytes()
|
|
|
+ merged := appendBytes(sTableBytes, encBytes)
|
|
|
+ return merged, nil
|
|
|
+}
|
|
|
+
|
|
|
+// MarshalBinaryWithContext serializes the internal properties of this UpdateSet instance
|
|
|
+// into a byte array leveraging a predefined context.
|
|
|
+func (target *UpdateSet) MarshalBinaryWithContext(ctx *EncodingContext) (err error) {
|
|
|
+ // panics are recovered and propagated as errors
|
|
|
+ defer func() {
|
|
|
+ if r := recover(); r != nil {
|
|
|
+ if e, ok := r.(error); ok {
|
|
|
+ err = e
|
|
|
+ } else if s, ok := r.(string); ok {
|
|
|
+ err = fmt.Errorf("Unexpected panic: %s", s)
|
|
|
+ } else {
|
|
|
+ err = fmt.Errorf("Unexpected panic: %+v", r)
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }()
|
|
|
+
|
|
|
+ buff := ctx.Buffer
|
|
|
+ buff.WriteUInt8(DefaultCodecVersion) // version
|
|
|
+
|
|
|
+ // --- [begin][write][reference](time.Time) ---
|
|
|
+ a, errA := target.Timestamp.MarshalBinary()
|
|
|
+ if errA != nil {
|
|
|
+ return errA
|
|
|
+ }
|
|
|
+ buff.WriteInt(len(a))
|
|
|
+ buff.WriteBytes(a)
|
|
|
+ // --- [end][write][reference](time.Time) ---
|
|
|
+
|
|
|
+ if target.Updates == nil {
|
|
|
+ buff.WriteUInt8(uint8(0)) // write nil byte
|
|
|
+ } else {
|
|
|
+ buff.WriteUInt8(uint8(1)) // write non-nil byte
|
|
|
+
|
|
|
+ // --- [begin][write][slice]([]Update) ---
|
|
|
+ buff.WriteInt(len(target.Updates)) // array length
|
|
|
+ for i := 0; i < len(target.Updates); i++ {
|
|
|
+ // --- [begin][write][struct](Update) ---
|
|
|
+ buff.WriteInt(0) // [compatibility, unused]
|
|
|
+ errB := target.Updates[i].MarshalBinaryWithContext(ctx)
|
|
|
+ if errB != nil {
|
|
|
+ return errB
|
|
|
+ }
|
|
|
+ // --- [end][write][struct](Update) ---
|
|
|
+
|
|
|
+ }
|
|
|
+ // --- [end][write][slice]([]Update) ---
|
|
|
+
|
|
|
+ }
|
|
|
+ return nil
|
|
|
+}
|
|
|
+
|
|
|
+// UnmarshalBinary uses the data passed byte array to set all the internal properties of
|
|
|
+// the UpdateSet type
|
|
|
+func (target *UpdateSet) UnmarshalBinary(data []byte) error {
|
|
|
+ ctx := NewDecodingContextFromBytes(data)
|
|
|
+ defer ctx.Close()
|
|
|
+ err := target.UnmarshalBinaryWithContext(ctx)
|
|
|
+ if err != nil {
|
|
|
+ return err
|
|
|
+ }
|
|
|
+
|
|
|
+ return nil
|
|
|
+}
|
|
|
+
|
|
|
+// UnmarshalBinaryFromReader uses the io.Reader data to set all the internal properties of
|
|
|
+// the UpdateSet type
|
|
|
+func (target *UpdateSet) UnmarshalBinaryFromReader(reader io.Reader) error {
|
|
|
+ ctx := NewDecodingContextFromReader(reader)
|
|
|
+ defer ctx.Close()
|
|
|
+ err := target.UnmarshalBinaryWithContext(ctx)
|
|
|
+ if err != nil {
|
|
|
+ return err
|
|
|
+ }
|
|
|
+
|
|
|
+ return nil
|
|
|
+}
|
|
|
+
|
|
|
+// UnmarshalBinaryWithContext uses the context containing a string table and binary buffer to set all the internal properties of
|
|
|
+// the UpdateSet type
|
|
|
+func (target *UpdateSet) UnmarshalBinaryWithContext(ctx *DecodingContext) (err error) {
|
|
|
+ // panics are recovered and propagated as errors
|
|
|
+ defer func() {
|
|
|
+ if r := recover(); r != nil {
|
|
|
+ if e, ok := r.(error); ok {
|
|
|
+ err = e
|
|
|
+ } else if s, ok := r.(string); ok {
|
|
|
+ err = fmt.Errorf("Unexpected panic: %s", s)
|
|
|
+ } else {
|
|
|
+ err = fmt.Errorf("Unexpected panic: %+v", r)
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }()
|
|
|
+
|
|
|
+ buff := ctx.Buffer
|
|
|
+ version := buff.ReadUInt8()
|
|
|
+
|
|
|
+ if version > DefaultCodecVersion {
|
|
|
+ return fmt.Errorf("Invalid Version Unmarshaling UpdateSet. Expected %d or less, got %d", DefaultCodecVersion, version)
|
|
|
+ }
|
|
|
+
|
|
|
+ // --- [begin][read][reference](time.Time) ---
|
|
|
+ a := &time.Time{}
|
|
|
+ b := buff.ReadInt() // byte array length
|
|
|
+ c := buff.ReadBytes(b) // byte array
|
|
|
+ errA := a.UnmarshalBinary(c)
|
|
|
+ if errA != nil {
|
|
|
+ return errA
|
|
|
+ }
|
|
|
+ target.Timestamp = *a
|
|
|
+ // --- [end][read][reference](time.Time) ---
|
|
|
+
|
|
|
+ if buff.ReadUInt8() == uint8(0) {
|
|
|
+ target.Updates = nil
|
|
|
+ } else {
|
|
|
+ // --- [begin][read][slice]([]Update) ---
|
|
|
+ e := buff.ReadInt() // array len
|
|
|
+ d := make([]Update, e)
|
|
|
+ for i := 0; i < e; i++ {
|
|
|
+ // --- [begin][read][struct](Update) ---
|
|
|
+ g := &Update{}
|
|
|
+ buff.ReadInt() // [compatibility, unused]
|
|
|
+ errB := g.UnmarshalBinaryWithContext(ctx)
|
|
|
+ if errB != nil {
|
|
|
+ return errB
|
|
|
+ }
|
|
|
+ f := *g
|
|
|
+ // --- [end][read][struct](Update) ---
|
|
|
+
|
|
|
+ d[i] = f
|
|
|
+ }
|
|
|
+ target.Updates = d
|
|
|
+ // --- [end][read][slice]([]Update) ---
|
|
|
+
|
|
|
+ }
|
|
|
+ return nil
|
|
|
+}
|
|
|
+
|
|
|
+//--------------------------------------------------------------------------
|
|
|
+// UpdateSetStream
|
|
|
+//--------------------------------------------------------------------------
|
|
|
+
|
|
|
+// UpdateSetStream is a single use field stream for the contents of an UpdateSet instance. Instead of creating an instance and populating
|
|
|
+// the fields on that instance, we provide a streaming iterator which yields (BingenFieldInfo, *BingenValue) tuples for each
|
|
|
+// stremable element. All slices and maps will be flattened one depth and each element streamed individually.
|
|
|
+type UpdateSetStream struct {
|
|
|
+ reader io.Reader
|
|
|
+ ctx *DecodingContext
|
|
|
+ err error
|
|
|
+}
|
|
|
+
|
|
|
+// Closes closes the internal io.Reader used to read and parse the UpdateSet fields.
|
|
|
+// This should be called once the stream is no longer needed.
|
|
|
+func (stream *UpdateSetStream) Close() {
|
|
|
+ if closer, ok := stream.reader.(io.Closer); ok {
|
|
|
+ closer.Close()
|
|
|
+ }
|
|
|
+ stream.ctx.Close()
|
|
|
+}
|
|
|
+
|
|
|
+// Error returns an error if one occurred during the process of streaming the UpdateSet
|
|
|
+// This can be checked after iterating through the Stream().
|
|
|
+func (stream *UpdateSetStream) Error() error {
|
|
|
+ return stream.err
|
|
|
+}
|
|
|
+
|
|
|
+// NewUpdateSetStream creates a new UpdateSetStream, which uses the io.Reader data to stream all internal fields of an UpdateSet instance
|
|
|
+func NewUpdateSetStream(reader io.Reader) BingenStream {
|
|
|
+ ctx := NewDecodingContextFromReader(reader)
|
|
|
+
|
|
|
+ return &UpdateSetStream{
|
|
|
+ ctx: ctx,
|
|
|
+ reader: reader,
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+// Stream returns the iterator which will stream each field of the target type.
|
|
|
+func (stream *UpdateSetStream) Stream() iter.Seq2[BingenFieldInfo, *BingenValue] {
|
|
|
+ return func(yield func(BingenFieldInfo, *BingenValue) bool) {
|
|
|
+ var fi BingenFieldInfo
|
|
|
+
|
|
|
+ ctx := stream.ctx
|
|
|
+ buff := ctx.Buffer
|
|
|
+ version := buff.ReadUInt8()
|
|
|
+
|
|
|
+ if version > DefaultCodecVersion {
|
|
|
+ stream.err = fmt.Errorf("Invalid Version Unmarshaling UpdateSet. Expected %d or less, got %d", DefaultCodecVersion, version)
|
|
|
+ return
|
|
|
+ }
|
|
|
+
|
|
|
+ fi = BingenFieldInfo{
|
|
|
+ Type: reflect.TypeFor[time.Time](),
|
|
|
+ Name: "Timestamp",
|
|
|
+ }
|
|
|
+
|
|
|
+ // --- [begin][read][reference](time.Time) ---
|
|
|
+ b := &time.Time{}
|
|
|
+ c := buff.ReadInt() // byte array length
|
|
|
+ d := buff.ReadBytes(c) // byte array
|
|
|
+ errA := b.UnmarshalBinary(d)
|
|
|
+ if errA != nil {
|
|
|
+ stream.err = errA
|
|
|
+ return
|
|
|
+ }
|
|
|
+ a := *b
|
|
|
+ // --- [end][read][reference](time.Time) ---
|
|
|
+
|
|
|
+ if !yield(fi, singleV(a)) {
|
|
|
+ return
|
|
|
+ }
|
|
|
+ fi = BingenFieldInfo{
|
|
|
+ Type: reflect.TypeFor[[]Update](),
|
|
|
+ Name: "Updates",
|
|
|
+ }
|
|
|
+
|
|
|
+ if buff.ReadUInt8() == uint8(0) {
|
|
|
+ if !yield(fi, nil) {
|
|
|
+ return
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ // --- [begin][read][streaming-slice]([]Update) ---
|
|
|
+ e := buff.ReadInt() // array len
|
|
|
+ for i := 0; i < e; i++ {
|
|
|
+ // --- [begin][read][struct](Update) ---
|
|
|
+ g := &Update{}
|
|
|
+ buff.ReadInt() // [compatibility, unused]
|
|
|
+ errB := g.UnmarshalBinaryWithContext(ctx)
|
|
|
+ if errB != nil {
|
|
|
+ stream.err = errB
|
|
|
+ return
|
|
|
+ }
|
|
|
+ f := *g
|
|
|
+ // --- [end][read][struct](Update) ---
|
|
|
+
|
|
|
+ if !yield(fi, pairV(i, f)) {
|
|
|
+ return
|
|
|
+ }
|
|
|
+ }
|
|
|
+ // --- [end][read][streaming-slice]([]Update) ---
|
|
|
+
|
|
|
+ }
|
|
|
+ }
|
|
|
+}
|