2
0

csv_export.go 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547
  1. package costmodel
  2. import (
  3. "context"
  4. "encoding/csv"
  5. "encoding/json"
  6. "errors"
  7. "fmt"
  8. "io"
  9. "os"
  10. "sort"
  11. "strconv"
  12. "time"
  13. "github.com/opencost/opencost/core/pkg/log"
  14. "github.com/opencost/opencost/core/pkg/opencost"
  15. "github.com/opencost/opencost/pkg/env"
  16. "github.com/opencost/opencost/pkg/filemanager"
  17. )
  18. type AllocationModel interface {
  19. ComputeAllocation(start, end time.Time, resolution time.Duration) (*opencost.AllocationSet, error)
  20. DateRange(limitDays int) (time.Time, time.Time, error)
  21. }
  22. var errNoData = errors.New("no data")
  23. func UpdateCSV(ctx context.Context, fileManager filemanager.FileManager, model AllocationModel, labelsAll bool, labels []string) error {
  24. exporter := &csvExporter{
  25. FileManager: fileManager,
  26. Model: model,
  27. LabelsAll: labelsAll,
  28. Labels: labels,
  29. }
  30. return exporter.Update(ctx)
  31. }
  32. type csvExporter struct {
  33. FileManager filemanager.FileManager
  34. Model AllocationModel
  35. Labels []string // If not empty, create a column for each label prefixed with "Label_"
  36. LabelsAll bool // if true, export all labels to a "Labels" column in JSON format
  37. }
  38. // Update updates CSV file in cloud storage with new allocation data
  39. func (e *csvExporter) Update(ctx context.Context) error {
  40. allocationDates, err := e.availableAllocationDates()
  41. if err != nil {
  42. return err
  43. }
  44. if len(allocationDates) == 0 {
  45. return errors.New("no data to export from prometheus")
  46. }
  47. resultTmp, err := os.CreateTemp("", "opencost-export-*.csv")
  48. if err != nil {
  49. return err
  50. }
  51. defer closeAndDelete(resultTmp)
  52. previousExportTmp, err := os.CreateTemp("", "opencost-previous-export-*.csv")
  53. if err != nil {
  54. return err
  55. }
  56. defer closeAndDelete(previousExportTmp)
  57. err = e.FileManager.Download(ctx, previousExportTmp)
  58. switch {
  59. case errors.Is(err, filemanager.ErrNotFound):
  60. // there is no previous file, so we need to create it
  61. err := e.writeCSVToWriter(ctx, resultTmp, mapTimeToSlice(allocationDates))
  62. if err != nil {
  63. return err
  64. }
  65. case err != nil:
  66. return err
  67. default:
  68. // existing export file exists
  69. // scan through it and ignore all dates that are already in the file
  70. // avoid modifying existing data or producing duplicates
  71. err := e.updateExportCSV(ctx, previousExportTmp, allocationDates, resultTmp)
  72. if err != nil {
  73. return err
  74. }
  75. }
  76. // we just wrote to the file, so we need to seek to the beginning, so we can read from it
  77. _, err = resultTmp.Seek(0, io.SeekStart)
  78. if err != nil {
  79. return err
  80. }
  81. err = e.FileManager.Upload(ctx, resultTmp)
  82. if err != nil {
  83. return err
  84. }
  85. log.Info("CSV export updated")
  86. return nil
  87. }
  88. func (e *csvExporter) updateExportCSV(ctx context.Context, previousExportTmp *os.File, allocationDates map[time.Time]struct{}, result *os.File) error {
  89. previousDates, err := e.loadDates(previousExportTmp)
  90. if err != nil {
  91. return err
  92. }
  93. for date := range previousDates {
  94. delete(allocationDates, date)
  95. }
  96. if len(allocationDates) == 0 {
  97. log.Info("export file in cloud storage already contain data for all dates, skipping update")
  98. return errNoData
  99. }
  100. newExportTmp, err := os.CreateTemp("", "opencost-new-export-*.csv")
  101. if err != nil {
  102. return err
  103. }
  104. defer closeAndDelete(newExportTmp)
  105. err = e.writeCSVToWriter(ctx, newExportTmp, mapTimeToSlice(allocationDates))
  106. if err != nil {
  107. return err
  108. }
  109. err = mergeCSV([]*os.File{previousExportTmp, newExportTmp}, result)
  110. if err != nil {
  111. return err
  112. }
  113. return nil
  114. }
  115. func (e *csvExporter) availableAllocationDates() (map[time.Time]struct{}, error) {
  116. start, end, err := e.Model.DateRange(env.GetExportCSVMaxDays())
  117. if err != nil {
  118. return nil, err
  119. }
  120. if start != time.Date(start.Year(), start.Month(), start.Day(), 0, 0, 0, 0, time.UTC) {
  121. // start doesn't start from 00:00 UTC, it could be truncated by prometheus retention policy
  122. // skip incomplete data and begin from the day after, otherwise it may corrupt existing data
  123. start = time.Date(start.Year(), start.Month(), start.Day(), 0, 0, 0, 0, time.UTC).AddDate(0, 0, 1)
  124. }
  125. end = time.Date(end.Year(), end.Month(), end.Day(), 0, 0, 0, 0, time.UTC)
  126. dates := make(map[time.Time]struct{})
  127. for date := start; date.Before(end); date = date.AddDate(0, 0, 1) {
  128. dates[date] = struct{}{}
  129. }
  130. if len(dates) == 0 {
  131. return nil, errNoData
  132. }
  133. return dates, nil
  134. }
  135. func (e *csvExporter) writeCSVToWriter(ctx context.Context, w io.Writer, dates []time.Time) error {
  136. fmtFloat := func(f float64) string {
  137. return strconv.FormatFloat(f, 'f', -1, 64)
  138. }
  139. type rowData struct {
  140. date time.Time
  141. alloc *opencost.Allocation
  142. }
  143. type columnDef struct {
  144. column string
  145. value func(data rowData) string
  146. }
  147. csvDef := []columnDef{
  148. {
  149. column: "Date",
  150. value: func(data rowData) string {
  151. return data.date.Format("2006-01-02")
  152. },
  153. },
  154. {
  155. column: "Namespace",
  156. value: func(data rowData) string {
  157. return data.alloc.Properties.Namespace
  158. },
  159. },
  160. {
  161. column: "ControllerKind",
  162. value: func(data rowData) string {
  163. return data.alloc.Properties.ControllerKind
  164. },
  165. },
  166. {
  167. column: "ControllerName",
  168. value: func(data rowData) string {
  169. return data.alloc.Properties.Controller
  170. },
  171. },
  172. {
  173. column: "Pod",
  174. value: func(data rowData) string {
  175. return data.alloc.Properties.Pod
  176. },
  177. },
  178. {
  179. column: "Container",
  180. value: func(data rowData) string {
  181. return data.alloc.Properties.Container
  182. },
  183. },
  184. {
  185. column: "CPUCoreUsageAverage",
  186. value: func(data rowData) string {
  187. return fmtFloat(data.alloc.CPUCoreUsageAverage)
  188. },
  189. },
  190. {
  191. column: "CPUCoreRequestAverage",
  192. value: func(data rowData) string {
  193. return fmtFloat(data.alloc.CPUCoreRequestAverage)
  194. },
  195. },
  196. {
  197. column: "RAMBytesUsageAverage",
  198. value: func(data rowData) string {
  199. return fmtFloat(data.alloc.RAMBytesUsageAverage)
  200. },
  201. },
  202. {
  203. column: "RAMBytesRequestAverage",
  204. value: func(data rowData) string {
  205. return fmtFloat(data.alloc.RAMBytesRequestAverage)
  206. },
  207. },
  208. {
  209. column: "NetworkReceiveBytes",
  210. value: func(data rowData) string {
  211. return fmtFloat(data.alloc.NetworkReceiveBytes)
  212. },
  213. },
  214. {
  215. column: "NetworkTransferBytes",
  216. value: func(data rowData) string {
  217. return fmtFloat(data.alloc.NetworkTransferBytes)
  218. },
  219. },
  220. {
  221. column: "GPUs",
  222. value: func(data rowData) string {
  223. return fmtFloat(data.alloc.GPUs())
  224. },
  225. },
  226. {
  227. column: "PVBytes",
  228. value: func(data rowData) string {
  229. return fmtFloat(data.alloc.PVBytes())
  230. },
  231. },
  232. {
  233. column: "CPUCost",
  234. value: func(data rowData) string {
  235. return fmtFloat(data.alloc.CPUTotalCost())
  236. },
  237. },
  238. {
  239. column: "RAMCost",
  240. value: func(data rowData) string {
  241. return fmtFloat(data.alloc.RAMTotalCost())
  242. },
  243. },
  244. {
  245. column: "NetworkCost",
  246. value: func(data rowData) string {
  247. return fmtFloat(data.alloc.NetworkTotalCost())
  248. },
  249. },
  250. {
  251. column: "PVCost",
  252. value: func(data rowData) string {
  253. return fmtFloat(data.alloc.PVTotalCost())
  254. },
  255. },
  256. {
  257. column: "GPUCost",
  258. value: func(data rowData) string {
  259. return fmtFloat(data.alloc.GPUTotalCost())
  260. },
  261. },
  262. {
  263. column: "TotalCost",
  264. value: func(data rowData) string {
  265. return fmtFloat(data.alloc.TotalCost())
  266. },
  267. },
  268. }
  269. if e.LabelsAll {
  270. csvDef = append(csvDef, columnDef{
  271. column: "Labels",
  272. value: func(data rowData) string {
  273. return fmtLabelsCSV(data.alloc.Properties.Labels)
  274. },
  275. })
  276. }
  277. for i := range e.Labels {
  278. label := e.Labels[i] // it's important to copy the label name, otherwise all closures will reference the same label
  279. csvDef = append(csvDef, columnDef{
  280. column: "Label_" + label,
  281. value: func(data rowData) string {
  282. value := data.alloc.Properties.Labels[label]
  283. return value
  284. },
  285. })
  286. }
  287. header := make([]string, 0, len(csvDef))
  288. for _, def := range csvDef {
  289. header = append(header, def.column)
  290. }
  291. csvWriter := csv.NewWriter(w)
  292. lines := 0
  293. err := csvWriter.Write(header)
  294. if err != nil {
  295. return fmt.Errorf("failed to write header: %w", err)
  296. }
  297. log.Infof("writing CSV with header: %v", header)
  298. for _, date := range dates {
  299. start := time.Date(date.Year(), date.Month(), date.Day(), 0, 0, 0, 0, time.UTC)
  300. end := start.AddDate(0, 0, 1)
  301. data, err := e.Model.ComputeAllocation(start, end, 5*time.Minute)
  302. if err != nil {
  303. return err
  304. }
  305. log.Infof("fetched %d records for %s", len(data.Allocations), date.Format("2006-01-02"))
  306. for _, alloc := range data.Allocations {
  307. if err := ctx.Err(); err != nil {
  308. return err
  309. }
  310. row := make([]string, 0, len(csvDef))
  311. for _, def := range csvDef {
  312. row = append(row, def.value(rowData{date: date, alloc: alloc}))
  313. }
  314. err := csvWriter.Write(row)
  315. if err != nil {
  316. return fmt.Errorf("failed to write csv row: %w", err)
  317. }
  318. lines++
  319. }
  320. }
  321. if lines == 0 {
  322. return errNoData
  323. }
  324. csvWriter.Flush()
  325. if err := csvWriter.Error(); err != nil {
  326. return err
  327. }
  328. log.Infof("exported %d lines", lines)
  329. return nil
  330. }
  331. func fmtLabelsCSV(labels map[string]string) string {
  332. if len(labels) == 0 {
  333. return ""
  334. }
  335. data, err := json.Marshal(labels)
  336. if err != nil {
  337. log.Errorf("failed to marshal labels: %s", err)
  338. return ""
  339. }
  340. return string(data)
  341. }
  342. // loadDate scans through CSV export file and extract all dates from "Date" column
  343. func (e *csvExporter) loadDates(csvFile *os.File) (map[time.Time]struct{}, error) {
  344. _, err := csvFile.Seek(0, io.SeekStart)
  345. if err != nil {
  346. return nil, fmt.Errorf("seeking to the beginning of csv file: %w", err)
  347. }
  348. csvReader := csv.NewReader(csvFile)
  349. header, err := csvReader.Read()
  350. if err != nil {
  351. return nil, fmt.Errorf("reading csv header: %w", err)
  352. }
  353. dateColIndex := 0
  354. for i, col := range header {
  355. if col == "Date" {
  356. dateColIndex = i
  357. break
  358. }
  359. }
  360. dates := make(map[time.Time]struct{})
  361. for {
  362. row, err := csvReader.Read()
  363. if errors.Is(err, io.EOF) {
  364. break
  365. }
  366. if err != nil {
  367. return nil, fmt.Errorf("reading csv row: %w", err)
  368. }
  369. date, err := time.Parse("2006-01-02", row[dateColIndex])
  370. if err != nil {
  371. return nil, fmt.Errorf("parsing date: %w", err)
  372. }
  373. dates[date] = struct{}{}
  374. }
  375. return dates, nil
  376. }
  377. // mergeCSV merges multiple csv files into one.
  378. // Files may have different headers, but the result will have a header that is a union of all headers.
  379. // The main goal here is to allow changing CSV format without breaking or loosing existing data.
  380. func mergeCSV(input []*os.File, output *os.File) error {
  381. var err error
  382. headers := make([][]string, 0, len(input))
  383. csvReaders := make([]*csv.Reader, 0, len(input))
  384. // first, get information about the result header
  385. for _, file := range input {
  386. _, err = file.Seek(0, io.SeekStart)
  387. if err != nil {
  388. return fmt.Errorf("seeking to the beginning of csv file: %w", err)
  389. }
  390. csvReader := csv.NewReader(file)
  391. header, err := csvReader.Read()
  392. if errors.Is(err, io.EOF) {
  393. // ignore empty files
  394. continue
  395. }
  396. if err != nil {
  397. return fmt.Errorf("reading header of csv file: %w", err)
  398. }
  399. headers = append(headers, header)
  400. csvReaders = append(csvReaders, csvReader)
  401. }
  402. mapping, header := combineHeaders(headers)
  403. csvWriter := csv.NewWriter(output)
  404. err = csvWriter.Write(mergeHeaders(headers))
  405. if err != nil {
  406. return fmt.Errorf("writing header to csv file: %w", err)
  407. }
  408. for csvIndex, csvReader := range csvReaders {
  409. for {
  410. inputLine, err := csvReader.Read()
  411. if errors.Is(err, io.EOF) {
  412. break
  413. }
  414. if err != nil {
  415. return fmt.Errorf("reading csv file line: %w", err)
  416. }
  417. outputLine := make([]string, len(header))
  418. for colIndex := range header {
  419. destColIndex, ok := mapping[csvIndex][colIndex]
  420. if !ok {
  421. continue
  422. }
  423. outputLine[destColIndex] = inputLine[colIndex]
  424. }
  425. err = csvWriter.Write(outputLine)
  426. if err != nil {
  427. return fmt.Errorf("writing line to csv file: %w", err)
  428. }
  429. }
  430. }
  431. csvWriter.Flush()
  432. // check for errors from the Flush
  433. if csvWriter.Error() != nil {
  434. return fmt.Errorf("flushing csv file: %w", csvWriter.Error())
  435. }
  436. return nil
  437. }
  438. func combineHeaders(headers [][]string) ([]map[int]int, []string) {
  439. result := make([]string, 0)
  440. indices := make([]map[int]int, len(headers))
  441. for i, header := range headers {
  442. indices[i] = make(map[int]int)
  443. for j, column := range header {
  444. if !contains(result, column) {
  445. result = append(result, column)
  446. indices[i][j] = len(result) - 1
  447. } else {
  448. indices[i][j] = indexOf(result, column)
  449. }
  450. }
  451. }
  452. return indices, result
  453. }
  454. func mergeHeaders(headers [][]string) []string {
  455. result := make([]string, 0)
  456. for _, header := range headers {
  457. for _, column := range header {
  458. if !contains(result, column) {
  459. result = append(result, column)
  460. }
  461. }
  462. }
  463. return result
  464. }
  465. func contains(slice []string, item string) bool {
  466. for _, element := range slice {
  467. if element == item {
  468. return true
  469. }
  470. }
  471. return false
  472. }
  473. func indexOf(slice []string, element string) int {
  474. for i, e := range slice {
  475. if e == element {
  476. return i
  477. }
  478. }
  479. return -1
  480. }
  481. func mapTimeToSlice(data map[time.Time]struct{}) []time.Time {
  482. result := make([]time.Time, 0, len(data))
  483. for key := range data {
  484. result = append(result, key)
  485. }
  486. sort.Slice(result, func(i, j int) bool {
  487. return result[i].Before(result[j])
  488. })
  489. return result
  490. }
  491. func closeAndDelete(f *os.File) {
  492. if err := f.Close(); err != nil {
  493. log.Errorf("error closing file: %v", err)
  494. }
  495. if err := os.Remove(f.Name()); err != nil {
  496. log.Errorf("error deleting file: %v", err)
  497. }
  498. }