metric_codecs.go 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123
  1. ////////////////////////////////////////////////////////////////////////////////
  2. //
  3. // DO NOT MODIFY
  4. //
  5. // ┻━┻ ︵ヽ(`Д´)ノ︵ ┻━┻
  6. //
  7. //
  8. // This source file was automatically generated by bingen.
  9. //
  10. ////////////////////////////////////////////////////////////////////////////////
  11. package metric
  12. import (
  13. "fmt"
  14. "io"
  15. "iter"
  16. "os"
  17. "reflect"
  18. "strings"
  19. "sync"
  20. "time"
  21. "unsafe"
  22. util "github.com/opencost/opencost/core/pkg/util"
  23. )
  24. const (
  25. // GeneratorPackageName is the package the generator is targetting
  26. GeneratorPackageName string = "metric"
  27. )
  28. // BinaryTags represent the formatting tag used for specific optimization features
  29. const (
  30. // BinaryTagStringTable is written and/or read prior to the existence of a string
  31. // table (where each index is encoded as a string entry in the resource
  32. BinaryTagStringTable string = "BGST"
  33. )
  34. const (
  35. // DefaultCodecVersion is used for any resources listed in the Default version set
  36. DefaultCodecVersion uint8 = 1
  37. )
  38. //--------------------------------------------------------------------------
  39. // Configuration
  40. //--------------------------------------------------------------------------
  41. var (
  42. bingenConfigLock sync.RWMutex
  43. bingenConfig *BingenConfiguration = DefaultBingenConfiguration()
  44. )
  45. // BingenConfiguration is used to set any custom configuration in the way files are encoded
  46. // or decoded.
  47. type BingenConfiguration struct {
  48. // FileBackedStringTableEnabled enables the use of file-backed string tables for streaming
  49. // bingen decoding.
  50. FileBackedStringTableEnabled bool
  51. // FileBackedStringTableDir is the directory to write the string table files for reading.
  52. FileBackedStringTableDir string
  53. }
  54. // DefaultBingenConfiguration creates the default implementation of the bingen configuration
  55. // and returns it.
  56. func DefaultBingenConfiguration() *BingenConfiguration {
  57. return &BingenConfiguration{
  58. FileBackedStringTableEnabled: false,
  59. FileBackedStringTableDir: os.TempDir(),
  60. }
  61. }
  62. // ConfigureBingen accepts a new *BingenConfiguration instance which updates the internal decoder
  63. // and encoder behavior.
  64. func ConfigureBingen(config *BingenConfiguration) {
  65. bingenConfigLock.Lock()
  66. defer bingenConfigLock.Unlock()
  67. if config == nil {
  68. config = DefaultBingenConfiguration()
  69. }
  70. bingenConfig = config
  71. }
  72. // IsBingenFileBackedStringTableEnabled accessor for file backed string table configuration
  73. func IsBingenFileBackedStringTableEnabled() bool {
  74. bingenConfigLock.RLock()
  75. defer bingenConfigLock.RUnlock()
  76. return bingenConfig.FileBackedStringTableEnabled
  77. }
  78. // BingenFileBackedStringTableDir returns the directory configured for file backed string tables.
  79. func BingenFileBackedStringTableDir() string {
  80. bingenConfigLock.RLock()
  81. defer bingenConfigLock.RUnlock()
  82. return bingenConfig.FileBackedStringTableDir
  83. }
  84. //--------------------------------------------------------------------------
  85. // Type Map
  86. //--------------------------------------------------------------------------
  87. // Generated type map for resolving interface implementations to
  88. // to concrete types
  89. var typeMap map[string]reflect.Type = map[string]reflect.Type{
  90. "Update": reflect.TypeFor[Update](),
  91. "UpdateSet": reflect.TypeFor[UpdateSet](),
  92. }
  93. //--------------------------------------------------------------------------
  94. // Type Helpers
  95. //--------------------------------------------------------------------------
  96. // isBinaryTag returns true when the first bytes in the provided binary matches the tag
  97. func isBinaryTag(data []byte, tag string) bool {
  98. if len(data) < len(tag) {
  99. return false
  100. }
  101. return string(data[:len(tag)]) == tag
  102. }
  103. // isReaderBinaryTag is used to peek the header for an io.Reader Buffer
  104. func isReaderBinaryTag(buff *util.Buffer, tag string) bool {
  105. data, err := buff.Peek(len(tag))
  106. if err != nil && err != io.EOF {
  107. panic(fmt.Sprintf("called Peek() on a non buffered reader: %s", err))
  108. }
  109. if len(data) < len(tag) {
  110. return false
  111. }
  112. return string(data[:len(tag)]) == tag
  113. }
  114. // appendBytes combines a and b into a new byte array
  115. func appendBytes(a []byte, b []byte) []byte {
  116. al := len(a)
  117. bl := len(b)
  118. tl := al + bl
  119. // allocate a new byte array for the combined
  120. // use native copy for speedy byte copying
  121. result := make([]byte, tl)
  122. copy(result, a)
  123. copy(result[al:], b)
  124. return result
  125. }
  126. // typeToString determines the basic properties of the type, the qualifier, package path, and
  127. // type name, and returns the qualified type
  128. func typeToString(f interface{}) string {
  129. qual := ""
  130. t := reflect.TypeOf(f)
  131. if t.Kind() == reflect.Ptr {
  132. t = t.Elem()
  133. qual = "*"
  134. }
  135. return fmt.Sprintf("%s%s.%s", qual, t.PkgPath(), t.Name())
  136. }
  137. // resolveType uses the name of a type and returns the package, base type name, and whether
  138. // or not it's a pointer.
  139. func resolveType(t string) (pkg string, name string, isPtr bool) {
  140. isPtr = t[:1] == "*"
  141. if isPtr {
  142. t = t[1:]
  143. }
  144. slashIndex := strings.LastIndex(t, "/")
  145. if slashIndex >= 0 {
  146. t = t[slashIndex+1:]
  147. }
  148. parts := strings.Split(t, ".")
  149. if parts[0] == GeneratorPackageName {
  150. parts[0] = ""
  151. }
  152. pkg = parts[0]
  153. name = parts[1]
  154. return
  155. }
  156. //--------------------------------------------------------------------------
  157. // Stream Helpers
  158. //--------------------------------------------------------------------------
  159. // StreamFactoryFunc is an alias for a func that creates a BingenStream implementation.
  160. type StreamFactoryFunc func(io.Reader) BingenStream
  161. // Generated streamable factory map for finding the specific new stream methods
  162. // by T type
  163. var streamFactoryMap map[reflect.Type]StreamFactoryFunc = map[reflect.Type]StreamFactoryFunc{
  164. reflect.TypeFor[UpdateSet](): NewUpdateSetStream,
  165. }
  166. // NewStreamFor accepts an io.Reader, and returns a new BingenStream for the generic T
  167. // type provided _if_ it is a registered bingen type that is annotated as 'streamable'. See
  168. // the streamFactoryMap for generated type listings.
  169. func NewStreamFor[T any](reader io.Reader) (BingenStream, error) {
  170. typeKey := reflect.TypeFor[T]()
  171. factory, ok := streamFactoryMap[typeKey]
  172. if !ok {
  173. return nil, fmt.Errorf("the type: %s is not a registered bingen streamable type", typeKey.Name())
  174. }
  175. return factory(reader), nil
  176. }
  177. // BingenStream is the stream interface for all streamable types
  178. type BingenStream interface {
  179. // Stream returns the iterator which will stream each field of the target type and
  180. // return the field info as well as the value.
  181. Stream() iter.Seq2[BingenFieldInfo, *BingenValue]
  182. // Close will close any dynamic io.Reader used to stream in the fields
  183. Close()
  184. // Error returns an error if one occurred during the process of streaming the type's fields.
  185. // This can be checked after iterating through the Stream().
  186. Error() error
  187. }
  188. // BingenValue contains the value of a field as well as any index/key associated with that value.
  189. type BingenValue struct {
  190. Value any
  191. Index any
  192. }
  193. // IsNil is just a method accessor way to check to see if the value returned was nil
  194. func (bv *BingenValue) IsNil() bool {
  195. return bv == nil
  196. }
  197. // creates a single BingenValue instance without a key or index
  198. func singleV(value any) *BingenValue {
  199. return &BingenValue{
  200. Value: value,
  201. }
  202. }
  203. // creates a pair of key/index and value.
  204. func pairV(index any, value any) *BingenValue {
  205. return &BingenValue{
  206. Value: value,
  207. Index: index,
  208. }
  209. }
  210. // BingenFieldInfo contains the type of the field being streamed as well as the name of the field.
  211. type BingenFieldInfo struct {
  212. Type reflect.Type
  213. Name string
  214. }
  215. //--------------------------------------------------------------------------
  216. // String Table Writer
  217. //--------------------------------------------------------------------------
  218. // StringTableWriter maps strings to specific indices for encoding
  219. type StringTableWriter struct {
  220. l sync.Mutex
  221. indices map[string]int
  222. next int
  223. }
  224. // NewStringTableWriter Creates a new StringTableWriter instance with provided contents
  225. func NewStringTableWriter(contents ...string) *StringTableWriter {
  226. st := &StringTableWriter{
  227. indices: make(map[string]int, len(contents)),
  228. next: len(contents),
  229. }
  230. for i, entry := range contents {
  231. st.indices[entry] = i
  232. }
  233. return st
  234. }
  235. // AddOrGet atomically retrieves a string entry's index if it exist. Otherwise, it will
  236. // add the entry and return the index.
  237. func (st *StringTableWriter) AddOrGet(s string) int {
  238. st.l.Lock()
  239. defer st.l.Unlock()
  240. if ind, ok := st.indices[s]; ok {
  241. return ind
  242. }
  243. current := st.next
  244. st.next++
  245. st.indices[s] = current
  246. return current
  247. }
  248. // ToSlice Converts the contents to a string array for encoding.
  249. func (st *StringTableWriter) ToSlice() []string {
  250. st.l.Lock()
  251. defer st.l.Unlock()
  252. if st.next == 0 {
  253. return []string{}
  254. }
  255. sl := make([]string, st.next)
  256. for s, i := range st.indices {
  257. sl[i] = s
  258. }
  259. return sl
  260. }
  261. // ToBytes Converts the contents to a binary encoded representation
  262. func (st *StringTableWriter) ToBytes() []byte {
  263. buff := util.NewBuffer()
  264. buff.WriteBytes([]byte(BinaryTagStringTable)) // bingen table header
  265. strs := st.ToSlice()
  266. buff.WriteInt(len(strs)) // table length
  267. for _, s := range strs {
  268. buff.WriteString(s)
  269. }
  270. return buff.Bytes()
  271. }
  272. //--------------------------------------------------------------------------
  273. // String Table Reader
  274. //--------------------------------------------------------------------------
  275. // StringTableReader is the interface used to read the string table from the decoding.
  276. type StringTableReader interface {
  277. // At returns the string entry at a specific index, or panics on out of bounds.
  278. At(index int) string
  279. // Len returns the total number of strings loaded in the string table.
  280. Len() int
  281. // Close will clear the loaded table, and drop any external resources used.
  282. Close() error
  283. }
  284. // SliceStringTableReader is a basic pre-loaded []string that provides index-based access.
  285. // The cost of this implementation is holding all strings in memory, which provides faster
  286. // lookup performance for memory usage.
  287. type SliceStringTableReader struct {
  288. table []string
  289. }
  290. // NewSliceStringTableReaderFrom creates a new SliceStringTableReader instance loading
  291. // data directly from the buffer. The buffer's position should start at the table length.
  292. func NewSliceStringTableReaderFrom(buffer *util.Buffer) StringTableReader {
  293. // table length
  294. tl := buffer.ReadInt()
  295. var table []string
  296. if tl > 0 {
  297. table = make([]string, tl)
  298. for i := range tl {
  299. table[i] = buffer.ReadString()
  300. }
  301. }
  302. return &SliceStringTableReader{
  303. table: table,
  304. }
  305. }
  306. // At returns the string entry at a specific index, or panics on out of bounds.
  307. func (sstr *SliceStringTableReader) At(index int) string {
  308. if index < 0 || index >= len(sstr.table) {
  309. panic(fmt.Errorf("%s: string table index out of bounds: %d", GeneratorPackageName, index))
  310. }
  311. return sstr.table[index]
  312. }
  313. // Len returns the total number of strings loaded in the string table.
  314. func (sstr *SliceStringTableReader) Len() int {
  315. if sstr == nil {
  316. return 0
  317. }
  318. return len(sstr.table)
  319. }
  320. // Close for the slice tables just nils out the slice and returns
  321. func (sstr *SliceStringTableReader) Close() error {
  322. sstr.table = nil
  323. return nil
  324. }
  325. // fileStringRef maps a bingen string-table index to a payload stored in a temp file.
  326. type fileStringRef struct {
  327. off int64
  328. length int
  329. }
  330. // FileStringTableReader leverages a local file to write string table data for lookup. On
  331. // memory focused systems, this allows a slower parse with a significant decrease in memory
  332. // usage. This implementation is often pair with streaming readers for high throughput with
  333. // reduced memory usage.
  334. type FileStringTableReader struct {
  335. f *os.File
  336. refs []fileStringRef
  337. }
  338. // NewFileStringTableFromBuffer reads exactly tl length-prefixed (uint16) string payloads from buffer
  339. // and appends each payload to a new temp file. It does not retain full strings in memory.
  340. func NewFileStringTableReaderFrom(buffer *util.Buffer, dir string) StringTableReader {
  341. // helper func to cast a string in-place to a byte slice.
  342. // NOTE: Return value is READ-ONLY. DO NOT MODIFY!
  343. byteSliceFor := func(s string) []byte {
  344. return unsafe.Slice(unsafe.StringData(s), len(s))
  345. }
  346. err := os.MkdirAll(dir, 0755)
  347. if err != nil {
  348. panic(fmt.Errorf("%s: failed to create string table directory: %w", GeneratorPackageName, err))
  349. }
  350. f, err := os.CreateTemp(dir, fmt.Sprintf("%s-bgst-*", GeneratorPackageName))
  351. if err != nil {
  352. panic(fmt.Errorf("%s: failed to create string table file: %w", GeneratorPackageName, err))
  353. }
  354. var writeErr error
  355. defer func() {
  356. if writeErr != nil {
  357. _ = f.Close()
  358. }
  359. }()
  360. // table length
  361. tl := buffer.ReadInt()
  362. var refs []fileStringRef
  363. if tl > 0 {
  364. refs = make([]fileStringRef, tl)
  365. for i := range tl {
  366. payload := byteSliceFor(buffer.ReadString())
  367. var off int64
  368. if len(payload) > 0 {
  369. off, err = f.Seek(0, io.SeekEnd)
  370. if err != nil {
  371. writeErr = fmt.Errorf("%s: failed to seek string table file: %w", GeneratorPackageName, err)
  372. panic(writeErr)
  373. }
  374. if _, err := f.Write(payload); err != nil {
  375. writeErr = fmt.Errorf("%s: failed to write string table entry %d: %w", GeneratorPackageName, i, err)
  376. panic(writeErr)
  377. }
  378. }
  379. refs[i] = fileStringRef{
  380. off: off,
  381. length: len(payload),
  382. }
  383. }
  384. }
  385. return &FileStringTableReader{
  386. f: f,
  387. refs: refs,
  388. }
  389. }
  390. // At returns the string from the internal file using the reference's offset and length.
  391. func (fstr *FileStringTableReader) At(index int) string {
  392. if fstr == nil || fstr.f == nil {
  393. panic(fmt.Errorf("%s: failed to read file string table data", GeneratorPackageName))
  394. }
  395. if index < 0 || index >= len(fstr.refs) {
  396. panic(fmt.Errorf("%s: string table index out of bounds: %d", GeneratorPackageName, index))
  397. }
  398. ref := fstr.refs[index]
  399. if ref.length == 0 {
  400. return ""
  401. }
  402. b := make([]byte, ref.length)
  403. _, err := fstr.f.ReadAt(b, ref.off)
  404. if err != nil {
  405. return ""
  406. }
  407. // cast the allocated bytes to a string in-place, as we
  408. // were the ones that allocated the bytes
  409. return unsafe.String(unsafe.SliceData(b), len(b))
  410. }
  411. // Len returns the total number of strings loaded in the string table.
  412. func (fstr *FileStringTableReader) Len() int {
  413. if fstr == nil {
  414. return 0
  415. }
  416. return len(fstr.refs)
  417. }
  418. // Close for the file string table reader closes the file and deletes it.
  419. func (fstr *FileStringTableReader) Close() error {
  420. if fstr == nil || fstr.f == nil {
  421. return nil
  422. }
  423. path := fstr.f.Name()
  424. err := fstr.f.Close()
  425. fstr.f = nil
  426. fstr.refs = nil
  427. if path != "" {
  428. _ = os.Remove(path)
  429. }
  430. return err
  431. }
  432. //--------------------------------------------------------------------------
  433. // Codec Context
  434. //--------------------------------------------------------------------------
  435. // EncodingContext is a context object passed to the encoders to ensure reuse of buffer
  436. // and table data
  437. type EncodingContext struct {
  438. Buffer *util.Buffer
  439. Table *StringTableWriter
  440. }
  441. // IsStringTable returns true if the table is available
  442. func (ec *EncodingContext) IsStringTable() bool {
  443. return ec.Table != nil
  444. }
  445. // DecodingContext is a context object passed to the decoders to ensure parent objects
  446. // reuse as much data as possible
  447. type DecodingContext struct {
  448. Buffer *util.Buffer
  449. Table StringTableReader
  450. }
  451. // NewDecodingContextFromBytes creates a new DecodingContext instance using an byte slice
  452. func NewDecodingContextFromBytes(data []byte) *DecodingContext {
  453. var table StringTableReader
  454. buff := util.NewBufferFromBytes(data)
  455. // string table header validation
  456. if isBinaryTag(data, BinaryTagStringTable) {
  457. buff.ReadBytes(len(BinaryTagStringTable)) // strip tag length
  458. // always use a slice string table with a byte array since the
  459. // data is already in memory
  460. table = NewSliceStringTableReaderFrom(buff)
  461. }
  462. return &DecodingContext{
  463. Buffer: buff,
  464. Table: table,
  465. }
  466. }
  467. // NewDecodingContextFromReader creates a new DecodingContext instance using an io.Reader
  468. // implementation
  469. func NewDecodingContextFromReader(reader io.Reader) *DecodingContext {
  470. var table StringTableReader
  471. buff := util.NewBufferFromReader(reader)
  472. if isReaderBinaryTag(buff, BinaryTagStringTable) {
  473. buff.ReadBytes(len(BinaryTagStringTable)) // strip tag length
  474. // create correct string table implementation
  475. if IsBingenFileBackedStringTableEnabled() {
  476. table = NewFileStringTableReaderFrom(buff, BingenFileBackedStringTableDir())
  477. } else {
  478. table = NewSliceStringTableReaderFrom(buff)
  479. }
  480. }
  481. return &DecodingContext{
  482. Buffer: buff,
  483. Table: table,
  484. }
  485. }
  486. // IsStringTable returns true if the table is available
  487. func (dc *DecodingContext) IsStringTable() bool {
  488. return dc.Table != nil && dc.Table.Len() > 0
  489. }
  490. // Close will ensure that any string table resources and buffer resources are
  491. // cleaned up.
  492. func (dc *DecodingContext) Close() {
  493. if dc.Table != nil {
  494. _ = dc.Table.Close()
  495. dc.Table = nil
  496. }
  497. }
  498. //--------------------------------------------------------------------------
  499. // Binary Codec
  500. //--------------------------------------------------------------------------
  501. // BinEncoder is an encoding interface which defines a context based marshal contract.
  502. type BinEncoder interface {
  503. MarshalBinaryWithContext(*EncodingContext) error
  504. }
  505. // BinDecoder is a decoding interface which defines a context based unmarshal contract.
  506. type BinDecoder interface {
  507. UnmarshalBinaryWithContext(*DecodingContext) error
  508. }
  509. //--------------------------------------------------------------------------
  510. // Update
  511. //--------------------------------------------------------------------------
  512. // MarshalBinary serializes the internal properties of this Update instance
  513. // into a byte array
  514. func (target *Update) MarshalBinary() (data []byte, err error) {
  515. ctx := &EncodingContext{
  516. Buffer: util.NewBuffer(),
  517. Table: nil,
  518. }
  519. e := target.MarshalBinaryWithContext(ctx)
  520. if e != nil {
  521. return nil, e
  522. }
  523. encBytes := ctx.Buffer.Bytes()
  524. return encBytes, nil
  525. }
  526. // MarshalBinaryWithContext serializes the internal properties of this Update instance
  527. // into a byte array leveraging a predefined context.
  528. func (target *Update) MarshalBinaryWithContext(ctx *EncodingContext) (err error) {
  529. // panics are recovered and propagated as errors
  530. defer func() {
  531. if r := recover(); r != nil {
  532. if e, ok := r.(error); ok {
  533. err = e
  534. } else if s, ok := r.(string); ok {
  535. err = fmt.Errorf("Unexpected panic: %s", s)
  536. } else {
  537. err = fmt.Errorf("Unexpected panic: %+v", r)
  538. }
  539. }
  540. }()
  541. buff := ctx.Buffer
  542. buff.WriteUInt8(DefaultCodecVersion) // version
  543. if ctx.IsStringTable() {
  544. a := ctx.Table.AddOrGet(target.Name)
  545. buff.WriteInt(a) // write table index
  546. } else {
  547. buff.WriteString(target.Name) // write string
  548. }
  549. if target.Labels == nil {
  550. buff.WriteUInt8(uint8(0)) // write nil byte
  551. } else {
  552. buff.WriteUInt8(uint8(1)) // write non-nil byte
  553. // --- [begin][write][map](map[string]string) ---
  554. buff.WriteInt(len(target.Labels)) // map length
  555. for v, z := range target.Labels {
  556. if ctx.IsStringTable() {
  557. b := ctx.Table.AddOrGet(v)
  558. buff.WriteInt(b) // write table index
  559. } else {
  560. buff.WriteString(v) // write string
  561. }
  562. if ctx.IsStringTable() {
  563. c := ctx.Table.AddOrGet(z)
  564. buff.WriteInt(c) // write table index
  565. } else {
  566. buff.WriteString(z) // write string
  567. }
  568. }
  569. // --- [end][write][map](map[string]string) ---
  570. }
  571. buff.WriteFloat64(target.Value) // write float64
  572. if target.AdditionalInfo == nil {
  573. buff.WriteUInt8(uint8(0)) // write nil byte
  574. } else {
  575. buff.WriteUInt8(uint8(1)) // write non-nil byte
  576. // --- [begin][write][map](map[string]string) ---
  577. buff.WriteInt(len(target.AdditionalInfo)) // map length
  578. for vv, zz := range target.AdditionalInfo {
  579. if ctx.IsStringTable() {
  580. d := ctx.Table.AddOrGet(vv)
  581. buff.WriteInt(d) // write table index
  582. } else {
  583. buff.WriteString(vv) // write string
  584. }
  585. if ctx.IsStringTable() {
  586. e := ctx.Table.AddOrGet(zz)
  587. buff.WriteInt(e) // write table index
  588. } else {
  589. buff.WriteString(zz) // write string
  590. }
  591. }
  592. // --- [end][write][map](map[string]string) ---
  593. }
  594. return nil
  595. }
  596. // UnmarshalBinary uses the data passed byte array to set all the internal properties of
  597. // the Update type
  598. func (target *Update) UnmarshalBinary(data []byte) error {
  599. ctx := NewDecodingContextFromBytes(data)
  600. defer ctx.Close()
  601. err := target.UnmarshalBinaryWithContext(ctx)
  602. if err != nil {
  603. return err
  604. }
  605. return nil
  606. }
  607. // UnmarshalBinaryFromReader uses the io.Reader data to set all the internal properties of
  608. // the Update type
  609. func (target *Update) UnmarshalBinaryFromReader(reader io.Reader) error {
  610. ctx := NewDecodingContextFromReader(reader)
  611. defer ctx.Close()
  612. err := target.UnmarshalBinaryWithContext(ctx)
  613. if err != nil {
  614. return err
  615. }
  616. return nil
  617. }
  618. // UnmarshalBinaryWithContext uses the context containing a string table and binary buffer to set all the internal properties of
  619. // the Update type
  620. func (target *Update) UnmarshalBinaryWithContext(ctx *DecodingContext) (err error) {
  621. // panics are recovered and propagated as errors
  622. defer func() {
  623. if r := recover(); r != nil {
  624. if e, ok := r.(error); ok {
  625. err = e
  626. } else if s, ok := r.(string); ok {
  627. err = fmt.Errorf("Unexpected panic: %s", s)
  628. } else {
  629. err = fmt.Errorf("Unexpected panic: %+v", r)
  630. }
  631. }
  632. }()
  633. buff := ctx.Buffer
  634. version := buff.ReadUInt8()
  635. if version > DefaultCodecVersion {
  636. return fmt.Errorf("Invalid Version Unmarshaling Update. Expected %d or less, got %d", DefaultCodecVersion, version)
  637. }
  638. var b string
  639. if ctx.IsStringTable() {
  640. c := buff.ReadInt() // read string index
  641. b = ctx.Table.At(c)
  642. } else {
  643. b = buff.ReadString() // read string
  644. }
  645. a := b
  646. target.Name = a
  647. if buff.ReadUInt8() == uint8(0) {
  648. target.Labels = nil
  649. } else {
  650. // --- [begin][read][map](map[string]string) ---
  651. e := buff.ReadInt() // map len
  652. d := make(map[string]string, e)
  653. for i := 0; i < e; i++ {
  654. var v string
  655. var g string
  656. if ctx.IsStringTable() {
  657. h := buff.ReadInt() // read string index
  658. g = ctx.Table.At(h)
  659. } else {
  660. g = buff.ReadString() // read string
  661. }
  662. f := g
  663. v = f
  664. var z string
  665. var m string
  666. if ctx.IsStringTable() {
  667. n := buff.ReadInt() // read string index
  668. m = ctx.Table.At(n)
  669. } else {
  670. m = buff.ReadString() // read string
  671. }
  672. l := m
  673. z = l
  674. d[v] = z
  675. }
  676. target.Labels = d
  677. // --- [end][read][map](map[string]string) ---
  678. }
  679. o := buff.ReadFloat64() // read float64
  680. target.Value = o
  681. if buff.ReadUInt8() == uint8(0) {
  682. target.AdditionalInfo = nil
  683. } else {
  684. // --- [begin][read][map](map[string]string) ---
  685. q := buff.ReadInt() // map len
  686. p := make(map[string]string, q)
  687. for j := 0; j < q; j++ {
  688. var vv string
  689. var s string
  690. if ctx.IsStringTable() {
  691. t := buff.ReadInt() // read string index
  692. s = ctx.Table.At(t)
  693. } else {
  694. s = buff.ReadString() // read string
  695. }
  696. r := s
  697. vv = r
  698. var zz string
  699. var w string
  700. if ctx.IsStringTable() {
  701. x := buff.ReadInt() // read string index
  702. w = ctx.Table.At(x)
  703. } else {
  704. w = buff.ReadString() // read string
  705. }
  706. u := w
  707. zz = u
  708. p[vv] = zz
  709. }
  710. target.AdditionalInfo = p
  711. // --- [end][read][map](map[string]string) ---
  712. }
  713. return nil
  714. }
  715. //--------------------------------------------------------------------------
  716. // UpdateSet
  717. //--------------------------------------------------------------------------
  718. // MarshalBinary serializes the internal properties of this UpdateSet instance
  719. // into a byte array
  720. func (target *UpdateSet) MarshalBinary() (data []byte, err error) {
  721. ctx := &EncodingContext{
  722. Buffer: util.NewBuffer(),
  723. Table: NewStringTableWriter(),
  724. }
  725. e := target.MarshalBinaryWithContext(ctx)
  726. if e != nil {
  727. return nil, e
  728. }
  729. encBytes := ctx.Buffer.Bytes()
  730. sTableBytes := ctx.Table.ToBytes()
  731. merged := appendBytes(sTableBytes, encBytes)
  732. return merged, nil
  733. }
  734. // MarshalBinaryWithContext serializes the internal properties of this UpdateSet instance
  735. // into a byte array leveraging a predefined context.
  736. func (target *UpdateSet) MarshalBinaryWithContext(ctx *EncodingContext) (err error) {
  737. // panics are recovered and propagated as errors
  738. defer func() {
  739. if r := recover(); r != nil {
  740. if e, ok := r.(error); ok {
  741. err = e
  742. } else if s, ok := r.(string); ok {
  743. err = fmt.Errorf("Unexpected panic: %s", s)
  744. } else {
  745. err = fmt.Errorf("Unexpected panic: %+v", r)
  746. }
  747. }
  748. }()
  749. buff := ctx.Buffer
  750. buff.WriteUInt8(DefaultCodecVersion) // version
  751. // --- [begin][write][reference](time.Time) ---
  752. a, errA := target.Timestamp.MarshalBinary()
  753. if errA != nil {
  754. return errA
  755. }
  756. buff.WriteInt(len(a))
  757. buff.WriteBytes(a)
  758. // --- [end][write][reference](time.Time) ---
  759. if target.Updates == nil {
  760. buff.WriteUInt8(uint8(0)) // write nil byte
  761. } else {
  762. buff.WriteUInt8(uint8(1)) // write non-nil byte
  763. // --- [begin][write][slice]([]Update) ---
  764. buff.WriteInt(len(target.Updates)) // array length
  765. for i := 0; i < len(target.Updates); i++ {
  766. // --- [begin][write][struct](Update) ---
  767. buff.WriteInt(0) // [compatibility, unused]
  768. errB := target.Updates[i].MarshalBinaryWithContext(ctx)
  769. if errB != nil {
  770. return errB
  771. }
  772. // --- [end][write][struct](Update) ---
  773. }
  774. // --- [end][write][slice]([]Update) ---
  775. }
  776. return nil
  777. }
  778. // UnmarshalBinary uses the data passed byte array to set all the internal properties of
  779. // the UpdateSet type
  780. func (target *UpdateSet) UnmarshalBinary(data []byte) error {
  781. ctx := NewDecodingContextFromBytes(data)
  782. defer ctx.Close()
  783. err := target.UnmarshalBinaryWithContext(ctx)
  784. if err != nil {
  785. return err
  786. }
  787. return nil
  788. }
  789. // UnmarshalBinaryFromReader uses the io.Reader data to set all the internal properties of
  790. // the UpdateSet type
  791. func (target *UpdateSet) UnmarshalBinaryFromReader(reader io.Reader) error {
  792. ctx := NewDecodingContextFromReader(reader)
  793. defer ctx.Close()
  794. err := target.UnmarshalBinaryWithContext(ctx)
  795. if err != nil {
  796. return err
  797. }
  798. return nil
  799. }
  800. // UnmarshalBinaryWithContext uses the context containing a string table and binary buffer to set all the internal properties of
  801. // the UpdateSet type
  802. func (target *UpdateSet) UnmarshalBinaryWithContext(ctx *DecodingContext) (err error) {
  803. // panics are recovered and propagated as errors
  804. defer func() {
  805. if r := recover(); r != nil {
  806. if e, ok := r.(error); ok {
  807. err = e
  808. } else if s, ok := r.(string); ok {
  809. err = fmt.Errorf("Unexpected panic: %s", s)
  810. } else {
  811. err = fmt.Errorf("Unexpected panic: %+v", r)
  812. }
  813. }
  814. }()
  815. buff := ctx.Buffer
  816. version := buff.ReadUInt8()
  817. if version > DefaultCodecVersion {
  818. return fmt.Errorf("Invalid Version Unmarshaling UpdateSet. Expected %d or less, got %d", DefaultCodecVersion, version)
  819. }
  820. // --- [begin][read][reference](time.Time) ---
  821. a := &time.Time{}
  822. b := buff.ReadInt() // byte array length
  823. c := buff.ReadBytes(b) // byte array
  824. errA := a.UnmarshalBinary(c)
  825. if errA != nil {
  826. return errA
  827. }
  828. target.Timestamp = *a
  829. // --- [end][read][reference](time.Time) ---
  830. if buff.ReadUInt8() == uint8(0) {
  831. target.Updates = nil
  832. } else {
  833. // --- [begin][read][slice]([]Update) ---
  834. e := buff.ReadInt() // array len
  835. d := make([]Update, e)
  836. for i := 0; i < e; i++ {
  837. // --- [begin][read][struct](Update) ---
  838. g := &Update{}
  839. buff.ReadInt() // [compatibility, unused]
  840. errB := g.UnmarshalBinaryWithContext(ctx)
  841. if errB != nil {
  842. return errB
  843. }
  844. f := *g
  845. // --- [end][read][struct](Update) ---
  846. d[i] = f
  847. }
  848. target.Updates = d
  849. // --- [end][read][slice]([]Update) ---
  850. }
  851. return nil
  852. }
  853. //--------------------------------------------------------------------------
  854. // UpdateSetStream
  855. //--------------------------------------------------------------------------
  856. // UpdateSetStream is a single use field stream for the contents of an UpdateSet instance. Instead of creating an instance and populating
  857. // the fields on that instance, we provide a streaming iterator which yields (BingenFieldInfo, *BingenValue) tuples for each
  858. // stremable element. All slices and maps will be flattened one depth and each element streamed individually.
  859. type UpdateSetStream struct {
  860. reader io.Reader
  861. ctx *DecodingContext
  862. err error
  863. }
  864. // Closes closes the internal io.Reader used to read and parse the UpdateSet fields.
  865. // This should be called once the stream is no longer needed.
  866. func (stream *UpdateSetStream) Close() {
  867. if closer, ok := stream.reader.(io.Closer); ok {
  868. closer.Close()
  869. }
  870. stream.ctx.Close()
  871. }
  872. // Error returns an error if one occurred during the process of streaming the UpdateSet
  873. // This can be checked after iterating through the Stream().
  874. func (stream *UpdateSetStream) Error() error {
  875. return stream.err
  876. }
  877. // NewUpdateSetStream creates a new UpdateSetStream, which uses the io.Reader data to stream all internal fields of an UpdateSet instance
  878. func NewUpdateSetStream(reader io.Reader) BingenStream {
  879. ctx := NewDecodingContextFromReader(reader)
  880. return &UpdateSetStream{
  881. ctx: ctx,
  882. reader: reader,
  883. }
  884. }
  885. // Stream returns the iterator which will stream each field of the target type.
  886. func (stream *UpdateSetStream) Stream() iter.Seq2[BingenFieldInfo, *BingenValue] {
  887. return func(yield func(BingenFieldInfo, *BingenValue) bool) {
  888. var fi BingenFieldInfo
  889. ctx := stream.ctx
  890. buff := ctx.Buffer
  891. version := buff.ReadUInt8()
  892. if version > DefaultCodecVersion {
  893. stream.err = fmt.Errorf("Invalid Version Unmarshaling UpdateSet. Expected %d or less, got %d", DefaultCodecVersion, version)
  894. return
  895. }
  896. fi = BingenFieldInfo{
  897. Type: reflect.TypeFor[time.Time](),
  898. Name: "Timestamp",
  899. }
  900. // --- [begin][read][reference](time.Time) ---
  901. b := &time.Time{}
  902. c := buff.ReadInt() // byte array length
  903. d := buff.ReadBytes(c) // byte array
  904. errA := b.UnmarshalBinary(d)
  905. if errA != nil {
  906. stream.err = errA
  907. return
  908. }
  909. a := *b
  910. // --- [end][read][reference](time.Time) ---
  911. if !yield(fi, singleV(a)) {
  912. return
  913. }
  914. fi = BingenFieldInfo{
  915. Type: reflect.TypeFor[[]Update](),
  916. Name: "Updates",
  917. }
  918. if buff.ReadUInt8() == uint8(0) {
  919. if !yield(fi, nil) {
  920. return
  921. }
  922. } else {
  923. // --- [begin][read][streaming-slice]([]Update) ---
  924. e := buff.ReadInt() // array len
  925. for i := 0; i < e; i++ {
  926. // --- [begin][read][struct](Update) ---
  927. g := &Update{}
  928. buff.ReadInt() // [compatibility, unused]
  929. errB := g.UnmarshalBinaryWithContext(ctx)
  930. if errB != nil {
  931. stream.err = errB
  932. return
  933. }
  934. f := *g
  935. // --- [end][read][struct](Update) ---
  936. if !yield(fi, pairV(i, f)) {
  937. return
  938. }
  939. }
  940. // --- [end][read][streaming-slice]([]Update) ---
  941. }
  942. }
  943. }