aggregation.go 86 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346
  1. package costmodel
  2. import (
  3. "fmt"
  4. "math"
  5. "net/http"
  6. "regexp"
  7. "sort"
  8. "strconv"
  9. "strings"
  10. "time"
  11. "github.com/julienschmidt/httprouter"
  12. "github.com/opencost/opencost/pkg/cloud/provider"
  13. "github.com/patrickmn/go-cache"
  14. prometheusClient "github.com/prometheus/client_golang/api"
  15. "github.com/opencost/opencost/core/pkg/log"
  16. "github.com/opencost/opencost/core/pkg/opencost"
  17. "github.com/opencost/opencost/core/pkg/util"
  18. "github.com/opencost/opencost/core/pkg/util/httputil"
  19. "github.com/opencost/opencost/core/pkg/util/json"
  20. "github.com/opencost/opencost/core/pkg/util/promutil"
  21. "github.com/opencost/opencost/core/pkg/util/timeutil"
  22. "github.com/opencost/opencost/pkg/cloud/models"
  23. "github.com/opencost/opencost/pkg/env"
  24. "github.com/opencost/opencost/pkg/errors"
  25. "github.com/opencost/opencost/pkg/prom"
  26. "github.com/opencost/opencost/pkg/thanos"
  27. )
  28. const (
  29. // SplitTypeWeighted signals that shared costs should be shared
  30. // proportionally, rather than evenly
  31. SplitTypeWeighted = "weighted"
  32. // UnallocatedSubfield indicates an allocation datum that does not have the
  33. // chosen Aggregator; e.g. during aggregation by some label, there may be
  34. // cost data that do not have the given label.
  35. UnallocatedSubfield = "__unallocated__"
  36. clusterCostsCacheMinutes = 5.0
  37. )
  38. // Aggregation describes aggregated cost data, containing cumulative cost and
  39. // allocation data per resource, vectors of rate data per resource, efficiency
  40. // data, and metadata describing the type of aggregation operation.
  41. type Aggregation struct {
  42. Aggregator string `json:"aggregation"`
  43. Subfields []string `json:"subfields,omitempty"`
  44. Environment string `json:"environment"`
  45. Cluster string `json:"cluster,omitempty"`
  46. Properties *opencost.AllocationProperties `json:"-"`
  47. Start time.Time `json:"-"`
  48. End time.Time `json:"-"`
  49. CPUAllocationHourlyAverage float64 `json:"cpuAllocationAverage"`
  50. CPUAllocationVectors []*util.Vector `json:"-"`
  51. CPUAllocationTotal float64 `json:"-"`
  52. CPUCost float64 `json:"cpuCost"`
  53. CPUCostVector []*util.Vector `json:"cpuCostVector,omitempty"`
  54. CPUEfficiency float64 `json:"cpuEfficiency"`
  55. CPURequestedVectors []*util.Vector `json:"-"`
  56. CPUUsedVectors []*util.Vector `json:"-"`
  57. Efficiency float64 `json:"efficiency"`
  58. GPUAllocationHourlyAverage float64 `json:"gpuAllocationAverage"`
  59. GPUAllocationVectors []*util.Vector `json:"-"`
  60. GPUCost float64 `json:"gpuCost"`
  61. GPUCostVector []*util.Vector `json:"gpuCostVector,omitempty"`
  62. GPUAllocationTotal float64 `json:"-"`
  63. RAMAllocationHourlyAverage float64 `json:"ramAllocationAverage"`
  64. RAMAllocationVectors []*util.Vector `json:"-"`
  65. RAMAllocationTotal float64 `json:"-"`
  66. RAMCost float64 `json:"ramCost"`
  67. RAMCostVector []*util.Vector `json:"ramCostVector,omitempty"`
  68. RAMEfficiency float64 `json:"ramEfficiency"`
  69. RAMRequestedVectors []*util.Vector `json:"-"`
  70. RAMUsedVectors []*util.Vector `json:"-"`
  71. PVAllocationHourlyAverage float64 `json:"pvAllocationAverage"`
  72. PVAllocationVectors []*util.Vector `json:"-"`
  73. PVAllocationTotal float64 `json:"-"`
  74. PVCost float64 `json:"pvCost"`
  75. PVCostVector []*util.Vector `json:"pvCostVector,omitempty"`
  76. NetworkCost float64 `json:"networkCost"`
  77. NetworkCostVector []*util.Vector `json:"networkCostVector,omitempty"`
  78. SharedCost float64 `json:"sharedCost"`
  79. TotalCost float64 `json:"totalCost"`
  80. TotalCostVector []*util.Vector `json:"totalCostVector,omitempty"`
  81. }
  82. // TotalHours determines the amount of hours the Aggregation covers, as a
  83. // function of the cost vectors and the resolution of those vectors' data
  84. func (a *Aggregation) TotalHours(resolutionHours float64) float64 {
  85. length := 1
  86. if length < len(a.CPUCostVector) {
  87. length = len(a.CPUCostVector)
  88. }
  89. if length < len(a.RAMCostVector) {
  90. length = len(a.RAMCostVector)
  91. }
  92. if length < len(a.PVCostVector) {
  93. length = len(a.PVCostVector)
  94. }
  95. if length < len(a.GPUCostVector) {
  96. length = len(a.GPUCostVector)
  97. }
  98. if length < len(a.NetworkCostVector) {
  99. length = len(a.NetworkCostVector)
  100. }
  101. return float64(length) * resolutionHours
  102. }
  103. // RateCoefficient computes the coefficient by which the total cost needs to be
  104. // multiplied in order to convert totals costs into per-rate costs.
  105. func (a *Aggregation) RateCoefficient(rateStr string, resolutionHours float64) float64 {
  106. // monthly rate = (730.0)*(total cost)/(total hours)
  107. // daily rate = (24.0)*(total cost)/(total hours)
  108. // hourly rate = (1.0)*(total cost)/(total hours)
  109. // default to hourly rate
  110. coeff := 1.0
  111. switch rateStr {
  112. case "daily":
  113. coeff = timeutil.HoursPerDay
  114. case "monthly":
  115. coeff = timeutil.HoursPerMonth
  116. }
  117. return coeff / a.TotalHours(resolutionHours)
  118. }
  119. type SharedResourceInfo struct {
  120. ShareResources bool
  121. SharedNamespace map[string]bool
  122. LabelSelectors map[string]map[string]bool
  123. }
  124. type SharedCostInfo struct {
  125. Name string
  126. Cost float64
  127. ShareType string
  128. }
  129. func (s *SharedResourceInfo) IsSharedResource(costDatum *CostData) bool {
  130. // exists in a shared namespace
  131. if _, ok := s.SharedNamespace[costDatum.Namespace]; ok {
  132. return true
  133. }
  134. // has at least one shared label (OR, not AND in the case of multiple labels)
  135. for labelName, labelValues := range s.LabelSelectors {
  136. if val, ok := costDatum.Labels[labelName]; ok && labelValues[val] {
  137. return true
  138. }
  139. }
  140. return false
  141. }
  142. func NewSharedResourceInfo(shareResources bool, sharedNamespaces []string, labelNames []string, labelValues []string) *SharedResourceInfo {
  143. sr := &SharedResourceInfo{
  144. ShareResources: shareResources,
  145. SharedNamespace: make(map[string]bool),
  146. LabelSelectors: make(map[string]map[string]bool),
  147. }
  148. for _, ns := range sharedNamespaces {
  149. sr.SharedNamespace[strings.Trim(ns, " ")] = true
  150. }
  151. // Creating a map of label name to label value, but only if
  152. // the cardinality matches
  153. if len(labelNames) == len(labelValues) {
  154. for i := range labelNames {
  155. cleanedLname := promutil.SanitizeLabelName(strings.Trim(labelNames[i], " "))
  156. if values, ok := sr.LabelSelectors[cleanedLname]; ok {
  157. values[strings.Trim(labelValues[i], " ")] = true
  158. } else {
  159. sr.LabelSelectors[cleanedLname] = map[string]bool{strings.Trim(labelValues[i], " "): true}
  160. }
  161. }
  162. }
  163. return sr
  164. }
  165. func GetTotalContainerCost(costData map[string]*CostData, rate string, cp models.Provider, discount float64, customDiscount float64, idleCoefficients map[string]float64) float64 {
  166. totalContainerCost := 0.0
  167. for _, costDatum := range costData {
  168. clusterID := costDatum.ClusterID
  169. cpuv, ramv, gpuv, pvvs, netv := getPriceVectors(cp, costDatum, rate, discount, customDiscount, idleCoefficients[clusterID])
  170. totalContainerCost += totalVectors(cpuv)
  171. totalContainerCost += totalVectors(ramv)
  172. totalContainerCost += totalVectors(gpuv)
  173. for _, pv := range pvvs {
  174. totalContainerCost += totalVectors(pv)
  175. }
  176. totalContainerCost += totalVectors(netv)
  177. }
  178. return totalContainerCost
  179. }
  180. func (a *Accesses) ComputeIdleCoefficient(costData map[string]*CostData, cli prometheusClient.Client, cp models.Provider, discount float64, customDiscount float64, window, offset time.Duration) (map[string]float64, error) {
  181. coefficients := make(map[string]float64)
  182. profileName := "ComputeIdleCoefficient: ComputeClusterCosts"
  183. profileStart := time.Now()
  184. var clusterCosts map[string]*ClusterCosts
  185. var err error
  186. fmtWindow, fmtOffset := timeutil.DurationOffsetStrings(window, offset)
  187. key := fmt.Sprintf("%s:%s", fmtWindow, fmtOffset)
  188. if data, valid := a.ClusterCostsCache.Get(key); valid {
  189. clusterCosts = data.(map[string]*ClusterCosts)
  190. } else {
  191. clusterCosts, err = a.ComputeClusterCosts(cli, cp, window, offset, false)
  192. if err != nil {
  193. return nil, err
  194. }
  195. }
  196. measureTime(profileStart, profileThreshold, profileName)
  197. for cid, costs := range clusterCosts {
  198. if costs.CPUCumulative == 0 && costs.RAMCumulative == 0 && costs.StorageCumulative == 0 {
  199. log.Warnf("No ClusterCosts data for cluster '%s'. Is it emitting data?", cid)
  200. coefficients[cid] = 1.0
  201. continue
  202. }
  203. if costs.TotalCumulative == 0 {
  204. return nil, fmt.Errorf("TotalCumulative cluster cost for cluster '%s' returned 0 over window '%s' offset '%s'", cid, fmtWindow, fmtOffset)
  205. }
  206. totalContainerCost := 0.0
  207. for _, costDatum := range costData {
  208. if costDatum.ClusterID == cid {
  209. cpuv, ramv, gpuv, pvvs, _ := getPriceVectors(cp, costDatum, "", discount, customDiscount, 1)
  210. totalContainerCost += totalVectors(cpuv)
  211. totalContainerCost += totalVectors(ramv)
  212. totalContainerCost += totalVectors(gpuv)
  213. for _, pv := range pvvs {
  214. totalContainerCost += totalVectors(pv)
  215. }
  216. }
  217. }
  218. coeff := totalContainerCost / costs.TotalCumulative
  219. coefficients[cid] = coeff
  220. }
  221. return coefficients, nil
  222. }
  223. // AggregationOptions provides optional parameters to AggregateCostData, allowing callers to perform more complex operations
  224. type AggregationOptions struct {
  225. Discount float64 // percent by which to discount CPU, RAM, and GPU cost
  226. CustomDiscount float64 // additional custom discount applied to all prices
  227. IdleCoefficients map[string]float64 // scales costs by amount of idle resources on a per-cluster basis
  228. IncludeEfficiency bool // set to true to receive efficiency/usage data
  229. IncludeTimeSeries bool // set to true to receive time series data
  230. Rate string // set to "hourly", "daily", or "monthly" to receive cost rate, rather than cumulative cost
  231. ResolutionHours float64
  232. SharedResourceInfo *SharedResourceInfo
  233. SharedCosts map[string]*SharedCostInfo
  234. FilteredContainerCount int
  235. FilteredEnvironments map[string]int
  236. SharedSplit string
  237. TotalContainerCost float64
  238. }
  239. // Helper method to test request/usgae values against allocation averages for efficiency scores. Generate a warning log if
  240. // clamp is required
  241. func clampAverage(requestsAvg float64, usedAverage float64, allocationAvg float64, resource string) (float64, float64) {
  242. rAvg := requestsAvg
  243. if rAvg > allocationAvg {
  244. log.Debugf("Average %s Requested (%f) > Average %s Allocated (%f). Clamping.", resource, rAvg, resource, allocationAvg)
  245. rAvg = allocationAvg
  246. }
  247. uAvg := usedAverage
  248. if uAvg > allocationAvg {
  249. log.Debugf(" Average %s Used (%f) > Average %s Allocated (%f). Clamping.", resource, uAvg, resource, allocationAvg)
  250. uAvg = allocationAvg
  251. }
  252. return rAvg, uAvg
  253. }
  254. // AggregateCostData aggregates raw cost data by field; e.g. namespace, cluster, service, or label. In the case of label, callers
  255. // must pass a slice of subfields indicating the labels by which to group. Provider is used to define custom resource pricing.
  256. // See AggregationOptions for optional parameters.
  257. func AggregateCostData(costData map[string]*CostData, field string, subfields []string, cp models.Provider, opts *AggregationOptions) map[string]*Aggregation {
  258. discount := opts.Discount
  259. customDiscount := opts.CustomDiscount
  260. idleCoefficients := opts.IdleCoefficients
  261. includeTimeSeries := opts.IncludeTimeSeries
  262. includeEfficiency := opts.IncludeEfficiency
  263. rate := opts.Rate
  264. sr := opts.SharedResourceInfo
  265. resolutionHours := 1.0
  266. if opts.ResolutionHours > 0.0 {
  267. resolutionHours = opts.ResolutionHours
  268. }
  269. if idleCoefficients == nil {
  270. idleCoefficients = make(map[string]float64)
  271. }
  272. // aggregations collects key-value pairs of resource group-to-aggregated data
  273. // e.g. namespace-to-data or label-value-to-data
  274. aggregations := make(map[string]*Aggregation)
  275. // sharedResourceCost is the running total cost of resources that should be reported
  276. // as shared across all other resources, rather than reported as a stand-alone category
  277. sharedResourceCost := 0.0
  278. for _, costDatum := range costData {
  279. idleCoefficient, ok := idleCoefficients[costDatum.ClusterID]
  280. if !ok {
  281. idleCoefficient = 1.0
  282. }
  283. if sr != nil && sr.ShareResources && sr.IsSharedResource(costDatum) {
  284. cpuv, ramv, gpuv, pvvs, netv := getPriceVectors(cp, costDatum, rate, discount, customDiscount, idleCoefficient)
  285. sharedResourceCost += totalVectors(cpuv)
  286. sharedResourceCost += totalVectors(ramv)
  287. sharedResourceCost += totalVectors(gpuv)
  288. sharedResourceCost += totalVectors(netv)
  289. for _, pv := range pvvs {
  290. sharedResourceCost += totalVectors(pv)
  291. }
  292. } else {
  293. if field == "cluster" {
  294. aggregateDatum(cp, aggregations, costDatum, field, subfields, rate, costDatum.ClusterID, discount, customDiscount, idleCoefficient, false)
  295. } else if field == "node" {
  296. aggregateDatum(cp, aggregations, costDatum, field, subfields, rate, costDatum.NodeName, discount, customDiscount, idleCoefficient, false)
  297. } else if field == "namespace" {
  298. aggregateDatum(cp, aggregations, costDatum, field, subfields, rate, costDatum.Namespace, discount, customDiscount, idleCoefficient, false)
  299. } else if field == "service" {
  300. if len(costDatum.Services) > 0 {
  301. aggregateDatum(cp, aggregations, costDatum, field, subfields, rate, costDatum.Namespace+"/"+costDatum.Services[0], discount, customDiscount, idleCoefficient, false)
  302. } else {
  303. aggregateDatum(cp, aggregations, costDatum, field, subfields, rate, UnallocatedSubfield, discount, customDiscount, idleCoefficient, false)
  304. }
  305. } else if field == "deployment" {
  306. if len(costDatum.Deployments) > 0 {
  307. aggregateDatum(cp, aggregations, costDatum, field, subfields, rate, costDatum.Namespace+"/"+costDatum.Deployments[0], discount, customDiscount, idleCoefficient, false)
  308. } else {
  309. aggregateDatum(cp, aggregations, costDatum, field, subfields, rate, UnallocatedSubfield, discount, customDiscount, idleCoefficient, false)
  310. }
  311. } else if field == "statefulset" {
  312. if len(costDatum.Statefulsets) > 0 {
  313. aggregateDatum(cp, aggregations, costDatum, field, subfields, rate, costDatum.Namespace+"/"+costDatum.Statefulsets[0], discount, customDiscount, idleCoefficient, false)
  314. } else {
  315. aggregateDatum(cp, aggregations, costDatum, field, subfields, rate, UnallocatedSubfield, discount, customDiscount, idleCoefficient, false)
  316. }
  317. } else if field == "daemonset" {
  318. if len(costDatum.Daemonsets) > 0 {
  319. aggregateDatum(cp, aggregations, costDatum, field, subfields, rate, costDatum.Namespace+"/"+costDatum.Daemonsets[0], discount, customDiscount, idleCoefficient, false)
  320. } else {
  321. aggregateDatum(cp, aggregations, costDatum, field, subfields, rate, UnallocatedSubfield, discount, customDiscount, idleCoefficient, false)
  322. }
  323. } else if field == "controller" {
  324. if controller, kind, hasController := costDatum.GetController(); hasController {
  325. key := fmt.Sprintf("%s/%s:%s", costDatum.Namespace, kind, controller)
  326. aggregateDatum(cp, aggregations, costDatum, field, subfields, rate, key, discount, customDiscount, idleCoefficient, false)
  327. } else {
  328. aggregateDatum(cp, aggregations, costDatum, field, subfields, rate, UnallocatedSubfield, discount, customDiscount, idleCoefficient, false)
  329. }
  330. } else if field == "label" {
  331. found := false
  332. if costDatum.Labels != nil {
  333. for _, sf := range subfields {
  334. if subfieldName, ok := costDatum.Labels[sf]; ok {
  335. aggregateDatum(cp, aggregations, costDatum, field, subfields, rate, subfieldName, discount, customDiscount, idleCoefficient, false)
  336. found = true
  337. break
  338. }
  339. }
  340. }
  341. if !found {
  342. aggregateDatum(cp, aggregations, costDatum, field, subfields, rate, UnallocatedSubfield, discount, customDiscount, idleCoefficient, false)
  343. }
  344. } else if field == "annotation" {
  345. found := false
  346. if costDatum.Annotations != nil {
  347. for _, sf := range subfields {
  348. if subfieldName, ok := costDatum.Annotations[sf]; ok {
  349. aggregateDatum(cp, aggregations, costDatum, field, subfields, rate, subfieldName, discount, customDiscount, idleCoefficient, false)
  350. found = true
  351. break
  352. }
  353. }
  354. }
  355. if !found {
  356. aggregateDatum(cp, aggregations, costDatum, field, subfields, rate, UnallocatedSubfield, discount, customDiscount, idleCoefficient, false)
  357. }
  358. } else if field == "pod" {
  359. aggregateDatum(cp, aggregations, costDatum, field, subfields, rate, costDatum.Namespace+"/"+costDatum.PodName, discount, customDiscount, idleCoefficient, false)
  360. } else if field == "container" {
  361. key := fmt.Sprintf("%s/%s/%s/%s", costDatum.ClusterID, costDatum.Namespace, costDatum.PodName, costDatum.Name)
  362. aggregateDatum(cp, aggregations, costDatum, field, subfields, rate, key, discount, customDiscount, idleCoefficient, true)
  363. }
  364. }
  365. }
  366. for key, agg := range aggregations {
  367. sharedCoefficient := 1 / float64(len(opts.FilteredEnvironments)+len(aggregations))
  368. agg.CPUCost = totalVectors(agg.CPUCostVector)
  369. agg.RAMCost = totalVectors(agg.RAMCostVector)
  370. agg.GPUCost = totalVectors(agg.GPUCostVector)
  371. agg.PVCost = totalVectors(agg.PVCostVector)
  372. agg.NetworkCost = totalVectors(agg.NetworkCostVector)
  373. if opts.SharedSplit == SplitTypeWeighted {
  374. d := opts.TotalContainerCost - sharedResourceCost
  375. if d == 0 {
  376. log.Warnf("Total container cost '%f' and shared resource cost '%f are the same'. Setting sharedCoefficient to 1", opts.TotalContainerCost, sharedResourceCost)
  377. sharedCoefficient = 1.0
  378. } else {
  379. sharedCoefficient = (agg.CPUCost + agg.RAMCost + agg.GPUCost + agg.PVCost + agg.NetworkCost) / d
  380. }
  381. }
  382. agg.SharedCost = sharedResourceCost * sharedCoefficient
  383. for _, v := range opts.SharedCosts {
  384. agg.SharedCost += v.Cost * sharedCoefficient
  385. }
  386. if rate != "" {
  387. rateCoeff := agg.RateCoefficient(rate, resolutionHours)
  388. agg.CPUCost *= rateCoeff
  389. agg.RAMCost *= rateCoeff
  390. agg.GPUCost *= rateCoeff
  391. agg.PVCost *= rateCoeff
  392. agg.NetworkCost *= rateCoeff
  393. agg.SharedCost *= rateCoeff
  394. }
  395. agg.TotalCost = agg.CPUCost + agg.RAMCost + agg.GPUCost + agg.PVCost + agg.NetworkCost + agg.SharedCost
  396. // Evicted and Completed Pods can still show up here, but have 0 cost.
  397. // Filter these by default. Any reason to keep them?
  398. if agg.TotalCost == 0 {
  399. delete(aggregations, key)
  400. continue
  401. }
  402. // CPU, RAM, and PV allocation are cumulative per-datum, whereas GPU is rate per-datum
  403. agg.CPUAllocationHourlyAverage = totalVectors(agg.CPUAllocationVectors) / agg.TotalHours(resolutionHours)
  404. agg.RAMAllocationHourlyAverage = totalVectors(agg.RAMAllocationVectors) / agg.TotalHours(resolutionHours)
  405. agg.GPUAllocationHourlyAverage = averageVectors(agg.GPUAllocationVectors)
  406. agg.PVAllocationHourlyAverage = totalVectors(agg.PVAllocationVectors) / agg.TotalHours(resolutionHours)
  407. // TODO niko/etl does this check out for GPU data? Do we need to rewrite GPU queries to be
  408. // cumulative?
  409. agg.CPUAllocationTotal = totalVectors(agg.CPUAllocationVectors)
  410. agg.GPUAllocationTotal = totalVectors(agg.GPUAllocationVectors)
  411. agg.PVAllocationTotal = totalVectors(agg.PVAllocationVectors)
  412. agg.RAMAllocationTotal = totalVectors(agg.RAMAllocationVectors)
  413. if includeEfficiency {
  414. // Default both RAM and CPU to 0% efficiency so that a 0-requested, 0-allocated, 0-used situation
  415. // returns 0% efficiency, which should be a red-flag.
  416. //
  417. // If non-zero numbers are available, then efficiency is defined as:
  418. // idlePercentage = (requested - used) / allocated
  419. // efficiency = (1.0 - idlePercentage)
  420. //
  421. // It is possible to score > 100% efficiency, which is meant to be interpreted as a red flag.
  422. // It is not possible to score < 0% efficiency.
  423. agg.CPUEfficiency = 0.0
  424. CPUIdle := 0.0
  425. if agg.CPUAllocationHourlyAverage > 0.0 {
  426. avgCPURequested := averageVectors(agg.CPURequestedVectors)
  427. avgCPUUsed := averageVectors(agg.CPUUsedVectors)
  428. // Clamp averages, log range violations
  429. avgCPURequested, avgCPUUsed = clampAverage(avgCPURequested, avgCPUUsed, agg.CPUAllocationHourlyAverage, "CPU")
  430. CPUIdle = ((avgCPURequested - avgCPUUsed) / agg.CPUAllocationHourlyAverage)
  431. agg.CPUEfficiency = 1.0 - CPUIdle
  432. }
  433. agg.RAMEfficiency = 0.0
  434. RAMIdle := 0.0
  435. if agg.RAMAllocationHourlyAverage > 0.0 {
  436. avgRAMRequested := averageVectors(agg.RAMRequestedVectors)
  437. avgRAMUsed := averageVectors(agg.RAMUsedVectors)
  438. // Clamp averages, log range violations
  439. avgRAMRequested, avgRAMUsed = clampAverage(avgRAMRequested, avgRAMUsed, agg.RAMAllocationHourlyAverage, "RAM")
  440. RAMIdle = ((avgRAMRequested - avgRAMUsed) / agg.RAMAllocationHourlyAverage)
  441. agg.RAMEfficiency = 1.0 - RAMIdle
  442. }
  443. // Score total efficiency by the sum of CPU and RAM efficiency, weighted by their
  444. // respective total costs.
  445. agg.Efficiency = 0.0
  446. if (agg.CPUCost + agg.RAMCost) > 0 {
  447. agg.Efficiency = ((agg.CPUCost * agg.CPUEfficiency) + (agg.RAMCost * agg.RAMEfficiency)) / (agg.CPUCost + agg.RAMCost)
  448. }
  449. }
  450. // convert RAM from bytes to GiB
  451. agg.RAMAllocationHourlyAverage = agg.RAMAllocationHourlyAverage / 1024 / 1024 / 1024
  452. // convert storage from bytes to GiB
  453. agg.PVAllocationHourlyAverage = agg.PVAllocationHourlyAverage / 1024 / 1024 / 1024
  454. // remove time series data if it is not explicitly requested
  455. if !includeTimeSeries {
  456. agg.CPUCostVector = nil
  457. agg.RAMCostVector = nil
  458. agg.GPUCostVector = nil
  459. agg.PVCostVector = nil
  460. agg.NetworkCostVector = nil
  461. agg.TotalCostVector = nil
  462. } else { // otherwise compute a totalcostvector
  463. v1 := addVectors(agg.CPUCostVector, agg.RAMCostVector)
  464. v2 := addVectors(v1, agg.GPUCostVector)
  465. v3 := addVectors(v2, agg.PVCostVector)
  466. v4 := addVectors(v3, agg.NetworkCostVector)
  467. agg.TotalCostVector = v4
  468. }
  469. // Typesafety checks
  470. if math.IsNaN(agg.CPUAllocationHourlyAverage) || math.IsInf(agg.CPUAllocationHourlyAverage, 0) {
  471. log.Warnf("CPUAllocationHourlyAverage is %f for '%s: %s/%s'", agg.CPUAllocationHourlyAverage, agg.Cluster, agg.Aggregator, agg.Environment)
  472. agg.CPUAllocationHourlyAverage = 0
  473. }
  474. if math.IsNaN(agg.CPUCost) || math.IsInf(agg.CPUCost, 0) {
  475. log.Warnf("CPUCost is %f for '%s: %s/%s'", agg.CPUCost, agg.Cluster, agg.Aggregator, agg.Environment)
  476. agg.CPUCost = 0
  477. }
  478. if math.IsNaN(agg.CPUEfficiency) || math.IsInf(agg.CPUEfficiency, 0) {
  479. log.Warnf("CPUEfficiency is %f for '%s: %s/%s'", agg.CPUEfficiency, agg.Cluster, agg.Aggregator, agg.Environment)
  480. agg.CPUEfficiency = 0
  481. }
  482. if math.IsNaN(agg.Efficiency) || math.IsInf(agg.Efficiency, 0) {
  483. log.Warnf("Efficiency is %f for '%s: %s/%s'", agg.Efficiency, agg.Cluster, agg.Aggregator, agg.Environment)
  484. agg.Efficiency = 0
  485. }
  486. if math.IsNaN(agg.GPUAllocationHourlyAverage) || math.IsInf(agg.GPUAllocationHourlyAverage, 0) {
  487. log.Warnf("GPUAllocationHourlyAverage is %f for '%s: %s/%s'", agg.GPUAllocationHourlyAverage, agg.Cluster, agg.Aggregator, agg.Environment)
  488. agg.GPUAllocationHourlyAverage = 0
  489. }
  490. if math.IsNaN(agg.GPUCost) || math.IsInf(agg.GPUCost, 0) {
  491. log.Warnf("GPUCost is %f for '%s: %s/%s'", agg.GPUCost, agg.Cluster, agg.Aggregator, agg.Environment)
  492. agg.GPUCost = 0
  493. }
  494. if math.IsNaN(agg.RAMAllocationHourlyAverage) || math.IsInf(agg.RAMAllocationHourlyAverage, 0) {
  495. log.Warnf("RAMAllocationHourlyAverage is %f for '%s: %s/%s'", agg.RAMAllocationHourlyAverage, agg.Cluster, agg.Aggregator, agg.Environment)
  496. agg.RAMAllocationHourlyAverage = 0
  497. }
  498. if math.IsNaN(agg.RAMCost) || math.IsInf(agg.RAMCost, 0) {
  499. log.Warnf("RAMCost is %f for '%s: %s/%s'", agg.RAMCost, agg.Cluster, agg.Aggregator, agg.Environment)
  500. agg.RAMCost = 0
  501. }
  502. if math.IsNaN(agg.RAMEfficiency) || math.IsInf(agg.RAMEfficiency, 0) {
  503. log.Warnf("RAMEfficiency is %f for '%s: %s/%s'", agg.RAMEfficiency, agg.Cluster, agg.Aggregator, agg.Environment)
  504. agg.RAMEfficiency = 0
  505. }
  506. if math.IsNaN(agg.PVAllocationHourlyAverage) || math.IsInf(agg.PVAllocationHourlyAverage, 0) {
  507. log.Warnf("PVAllocationHourlyAverage is %f for '%s: %s/%s'", agg.PVAllocationHourlyAverage, agg.Cluster, agg.Aggregator, agg.Environment)
  508. agg.PVAllocationHourlyAverage = 0
  509. }
  510. if math.IsNaN(agg.PVCost) || math.IsInf(agg.PVCost, 0) {
  511. log.Warnf("PVCost is %f for '%s: %s/%s'", agg.PVCost, agg.Cluster, agg.Aggregator, agg.Environment)
  512. agg.PVCost = 0
  513. }
  514. if math.IsNaN(agg.NetworkCost) || math.IsInf(agg.NetworkCost, 0) {
  515. log.Warnf("NetworkCost is %f for '%s: %s/%s'", agg.NetworkCost, agg.Cluster, agg.Aggregator, agg.Environment)
  516. agg.NetworkCost = 0
  517. }
  518. if math.IsNaN(agg.SharedCost) || math.IsInf(agg.SharedCost, 0) {
  519. log.Warnf("SharedCost is %f for '%s: %s/%s'", agg.SharedCost, agg.Cluster, agg.Aggregator, agg.Environment)
  520. agg.SharedCost = 0
  521. }
  522. if math.IsNaN(agg.TotalCost) || math.IsInf(agg.TotalCost, 0) {
  523. log.Warnf("TotalCost is %f for '%s: %s/%s'", agg.TotalCost, agg.Cluster, agg.Aggregator, agg.Environment)
  524. agg.TotalCost = 0
  525. }
  526. }
  527. return aggregations
  528. }
  529. func aggregateDatum(cp models.Provider, aggregations map[string]*Aggregation, costDatum *CostData, field string, subfields []string, rate string, key string, discount float64, customDiscount float64, idleCoefficient float64, includeProperties bool) {
  530. // add new entry to aggregation results if a new key is encountered
  531. if _, ok := aggregations[key]; !ok {
  532. agg := &Aggregation{
  533. Aggregator: field,
  534. Environment: key,
  535. }
  536. if len(subfields) > 0 {
  537. agg.Subfields = subfields
  538. }
  539. if includeProperties {
  540. props := &opencost.AllocationProperties{}
  541. props.Cluster = costDatum.ClusterID
  542. props.Node = costDatum.NodeName
  543. if controller, kind, hasController := costDatum.GetController(); hasController {
  544. props.Controller = controller
  545. props.ControllerKind = kind
  546. }
  547. props.Labels = costDatum.Labels
  548. props.Annotations = costDatum.Annotations
  549. props.Namespace = costDatum.Namespace
  550. props.Pod = costDatum.PodName
  551. props.Services = costDatum.Services
  552. props.Container = costDatum.Name
  553. agg.Properties = props
  554. }
  555. aggregations[key] = agg
  556. }
  557. mergeVectors(cp, costDatum, aggregations[key], rate, discount, customDiscount, idleCoefficient)
  558. }
  559. func mergeVectors(cp models.Provider, costDatum *CostData, aggregation *Aggregation, rate string, discount float64, customDiscount float64, idleCoefficient float64) {
  560. aggregation.CPUAllocationVectors = addVectors(costDatum.CPUAllocation, aggregation.CPUAllocationVectors)
  561. aggregation.CPURequestedVectors = addVectors(costDatum.CPUReq, aggregation.CPURequestedVectors)
  562. aggregation.CPUUsedVectors = addVectors(costDatum.CPUUsed, aggregation.CPUUsedVectors)
  563. aggregation.RAMAllocationVectors = addVectors(costDatum.RAMAllocation, aggregation.RAMAllocationVectors)
  564. aggregation.RAMRequestedVectors = addVectors(costDatum.RAMReq, aggregation.RAMRequestedVectors)
  565. aggregation.RAMUsedVectors = addVectors(costDatum.RAMUsed, aggregation.RAMUsedVectors)
  566. aggregation.GPUAllocationVectors = addVectors(costDatum.GPUReq, aggregation.GPUAllocationVectors)
  567. for _, pvcd := range costDatum.PVCData {
  568. aggregation.PVAllocationVectors = addVectors(pvcd.Values, aggregation.PVAllocationVectors)
  569. }
  570. cpuv, ramv, gpuv, pvvs, netv := getPriceVectors(cp, costDatum, rate, discount, customDiscount, idleCoefficient)
  571. aggregation.CPUCostVector = addVectors(cpuv, aggregation.CPUCostVector)
  572. aggregation.RAMCostVector = addVectors(ramv, aggregation.RAMCostVector)
  573. aggregation.GPUCostVector = addVectors(gpuv, aggregation.GPUCostVector)
  574. aggregation.NetworkCostVector = addVectors(netv, aggregation.NetworkCostVector)
  575. for _, vectorList := range pvvs {
  576. aggregation.PVCostVector = addVectors(aggregation.PVCostVector, vectorList)
  577. }
  578. }
  579. // Returns the blended discounts applied to the node as a result of global discounts and reserved instance
  580. // discounts
  581. func getDiscounts(costDatum *CostData, cpuCost float64, ramCost float64, discount float64) (float64, float64) {
  582. if costDatum.NodeData == nil {
  583. return discount, discount
  584. }
  585. if costDatum.NodeData.IsSpot() {
  586. return 0, 0
  587. }
  588. reserved := costDatum.NodeData.Reserved
  589. // blended discounts
  590. blendedCPUDiscount := discount
  591. blendedRAMDiscount := discount
  592. if reserved != nil && reserved.CPUCost > 0 && reserved.RAMCost > 0 {
  593. reservedCPUDiscount := 0.0
  594. if cpuCost == 0 {
  595. log.Warnf("No cpu cost found for cluster '%s' node '%s'", costDatum.ClusterID, costDatum.NodeName)
  596. } else {
  597. reservedCPUDiscount = 1.0 - (reserved.CPUCost / cpuCost)
  598. }
  599. reservedRAMDiscount := 0.0
  600. if ramCost == 0 {
  601. log.Warnf("No ram cost found for cluster '%s' node '%s'", costDatum.ClusterID, costDatum.NodeName)
  602. } else {
  603. reservedRAMDiscount = 1.0 - (reserved.RAMCost / ramCost)
  604. }
  605. // AWS passes the # of reserved CPU and RAM as -1 to represent "All"
  606. if reserved.ReservedCPU < 0 && reserved.ReservedRAM < 0 {
  607. blendedCPUDiscount = reservedCPUDiscount
  608. blendedRAMDiscount = reservedRAMDiscount
  609. } else {
  610. nodeCPU, ierr := strconv.ParseInt(costDatum.NodeData.VCPU, 10, 64)
  611. nodeRAM, ferr := strconv.ParseFloat(costDatum.NodeData.RAMBytes, 64)
  612. if ierr == nil && ferr == nil {
  613. nodeRAMGB := nodeRAM / 1024 / 1024 / 1024
  614. reservedRAMGB := float64(reserved.ReservedRAM) / 1024 / 1024 / 1024
  615. nonReservedCPU := nodeCPU - reserved.ReservedCPU
  616. nonReservedRAM := nodeRAMGB - reservedRAMGB
  617. if nonReservedCPU == 0 {
  618. blendedCPUDiscount = reservedCPUDiscount
  619. } else {
  620. if nodeCPU == 0 {
  621. log.Warnf("No ram found for cluster '%s' node '%s'", costDatum.ClusterID, costDatum.NodeName)
  622. } else {
  623. blendedCPUDiscount = (float64(reserved.ReservedCPU) * reservedCPUDiscount) + (float64(nonReservedCPU)*discount)/float64(nodeCPU)
  624. }
  625. }
  626. if nonReservedRAM == 0 {
  627. blendedRAMDiscount = reservedRAMDiscount
  628. } else {
  629. if nodeRAMGB == 0 {
  630. log.Warnf("No ram found for cluster '%s' node '%s'", costDatum.ClusterID, costDatum.NodeName)
  631. } else {
  632. blendedRAMDiscount = (reservedRAMGB * reservedRAMDiscount) + (nonReservedRAM*discount)/nodeRAMGB
  633. }
  634. }
  635. }
  636. }
  637. }
  638. return blendedCPUDiscount, blendedRAMDiscount
  639. }
  640. func parseVectorPricing(cfg *models.CustomPricing, costDatum *CostData, cpuCostStr, ramCostStr, gpuCostStr, pvCostStr string) (float64, float64, float64, float64, bool) {
  641. usesCustom := false
  642. cpuCost, err := strconv.ParseFloat(cpuCostStr, 64)
  643. if err != nil || math.IsNaN(cpuCost) || math.IsInf(cpuCost, 0) || cpuCost == 0 {
  644. cpuCost, err = strconv.ParseFloat(cfg.CPU, 64)
  645. usesCustom = true
  646. if err != nil || math.IsNaN(cpuCost) || math.IsInf(cpuCost, 0) {
  647. cpuCost = 0
  648. }
  649. }
  650. ramCost, err := strconv.ParseFloat(ramCostStr, 64)
  651. if err != nil || math.IsNaN(ramCost) || math.IsInf(ramCost, 0) || ramCost == 0 {
  652. ramCost, err = strconv.ParseFloat(cfg.RAM, 64)
  653. usesCustom = true
  654. if err != nil || math.IsNaN(ramCost) || math.IsInf(ramCost, 0) {
  655. ramCost = 0
  656. }
  657. }
  658. gpuCost, err := strconv.ParseFloat(gpuCostStr, 64)
  659. if err != nil || math.IsNaN(gpuCost) || math.IsInf(gpuCost, 0) {
  660. gpuCost, err = strconv.ParseFloat(cfg.GPU, 64)
  661. if err != nil || math.IsNaN(gpuCost) || math.IsInf(gpuCost, 0) {
  662. gpuCost = 0
  663. }
  664. }
  665. pvCost, err := strconv.ParseFloat(pvCostStr, 64)
  666. if err != nil || math.IsNaN(cpuCost) || math.IsInf(cpuCost, 0) {
  667. pvCost, err = strconv.ParseFloat(cfg.Storage, 64)
  668. if err != nil || math.IsNaN(pvCost) || math.IsInf(pvCost, 0) {
  669. pvCost = 0
  670. }
  671. }
  672. return cpuCost, ramCost, gpuCost, pvCost, usesCustom
  673. }
  674. func getPriceVectors(cp models.Provider, costDatum *CostData, rate string, discount float64, customDiscount float64, idleCoefficient float64) ([]*util.Vector, []*util.Vector, []*util.Vector, [][]*util.Vector, []*util.Vector) {
  675. var cpuCost float64
  676. var ramCost float64
  677. var gpuCost float64
  678. var pvCost float64
  679. var usesCustom bool
  680. // If custom pricing is enabled and can be retrieved, replace
  681. // default cost values with custom values
  682. customPricing, err := cp.GetConfig()
  683. if err != nil {
  684. log.Errorf("failed to load custom pricing: %s", err)
  685. }
  686. if provider.CustomPricesEnabled(cp) && err == nil {
  687. var cpuCostStr string
  688. var ramCostStr string
  689. var gpuCostStr string
  690. var pvCostStr string
  691. if costDatum.NodeData.IsSpot() {
  692. cpuCostStr = customPricing.SpotCPU
  693. ramCostStr = customPricing.SpotRAM
  694. gpuCostStr = customPricing.SpotGPU
  695. } else {
  696. cpuCostStr = customPricing.CPU
  697. ramCostStr = customPricing.RAM
  698. gpuCostStr = customPricing.GPU
  699. }
  700. pvCostStr = customPricing.Storage
  701. cpuCost, ramCost, gpuCost, pvCost, usesCustom = parseVectorPricing(customPricing, costDatum, cpuCostStr, ramCostStr, gpuCostStr, pvCostStr)
  702. } else if costDatum.NodeData == nil && err == nil {
  703. cpuCostStr := customPricing.CPU
  704. ramCostStr := customPricing.RAM
  705. gpuCostStr := customPricing.GPU
  706. pvCostStr := customPricing.Storage
  707. cpuCost, ramCost, gpuCost, pvCost, usesCustom = parseVectorPricing(customPricing, costDatum, cpuCostStr, ramCostStr, gpuCostStr, pvCostStr)
  708. } else {
  709. cpuCostStr := costDatum.NodeData.VCPUCost
  710. ramCostStr := costDatum.NodeData.RAMCost
  711. gpuCostStr := costDatum.NodeData.GPUCost
  712. pvCostStr := costDatum.NodeData.StorageCost
  713. cpuCost, ramCost, gpuCost, pvCost, usesCustom = parseVectorPricing(customPricing, costDatum, cpuCostStr, ramCostStr, gpuCostStr, pvCostStr)
  714. }
  715. if usesCustom {
  716. log.DedupedWarningf(5, "No pricing data found for node `%s` , using custom pricing", costDatum.NodeName)
  717. }
  718. cpuDiscount, ramDiscount := getDiscounts(costDatum, cpuCost, ramCost, discount)
  719. log.Debugf("Node Name: %s", costDatum.NodeName)
  720. log.Debugf("Blended CPU Discount: %f", cpuDiscount)
  721. log.Debugf("Blended RAM Discount: %f", ramDiscount)
  722. // TODO should we try to apply the rate coefficient here or leave it as a totals-only metric?
  723. rateCoeff := 1.0
  724. if idleCoefficient == 0 {
  725. idleCoefficient = 1.0
  726. }
  727. cpuv := make([]*util.Vector, 0, len(costDatum.CPUAllocation))
  728. for _, val := range costDatum.CPUAllocation {
  729. cpuv = append(cpuv, &util.Vector{
  730. Timestamp: math.Round(val.Timestamp/10) * 10,
  731. Value: (val.Value * cpuCost * (1 - cpuDiscount) * (1 - customDiscount) / idleCoefficient) * rateCoeff,
  732. })
  733. }
  734. ramv := make([]*util.Vector, 0, len(costDatum.RAMAllocation))
  735. for _, val := range costDatum.RAMAllocation {
  736. ramv = append(ramv, &util.Vector{
  737. Timestamp: math.Round(val.Timestamp/10) * 10,
  738. Value: ((val.Value / 1024 / 1024 / 1024) * ramCost * (1 - ramDiscount) * (1 - customDiscount) / idleCoefficient) * rateCoeff,
  739. })
  740. }
  741. gpuv := make([]*util.Vector, 0, len(costDatum.GPUReq))
  742. for _, val := range costDatum.GPUReq {
  743. gpuv = append(gpuv, &util.Vector{
  744. Timestamp: math.Round(val.Timestamp/10) * 10,
  745. Value: (val.Value * gpuCost * (1 - discount) * (1 - customDiscount) / idleCoefficient) * rateCoeff,
  746. })
  747. }
  748. pvvs := make([][]*util.Vector, 0, len(costDatum.PVCData))
  749. for _, pvcData := range costDatum.PVCData {
  750. pvv := make([]*util.Vector, 0, len(pvcData.Values))
  751. if pvcData.Volume != nil {
  752. cost, _ := strconv.ParseFloat(pvcData.Volume.Cost, 64)
  753. // override with custom pricing if enabled
  754. if provider.CustomPricesEnabled(cp) {
  755. cost = pvCost
  756. }
  757. for _, val := range pvcData.Values {
  758. pvv = append(pvv, &util.Vector{
  759. Timestamp: math.Round(val.Timestamp/10) * 10,
  760. Value: ((val.Value / 1024 / 1024 / 1024) * cost * (1 - customDiscount) / idleCoefficient) * rateCoeff,
  761. })
  762. }
  763. pvvs = append(pvvs, pvv)
  764. }
  765. }
  766. netv := make([]*util.Vector, 0, len(costDatum.NetworkData))
  767. for _, val := range costDatum.NetworkData {
  768. netv = append(netv, &util.Vector{
  769. Timestamp: math.Round(val.Timestamp/10) * 10,
  770. Value: val.Value,
  771. })
  772. }
  773. return cpuv, ramv, gpuv, pvvs, netv
  774. }
  775. func averageVectors(vectors []*util.Vector) float64 {
  776. if len(vectors) == 0 {
  777. return 0.0
  778. }
  779. return totalVectors(vectors) / float64(len(vectors))
  780. }
  781. func totalVectors(vectors []*util.Vector) float64 {
  782. total := 0.0
  783. for _, vector := range vectors {
  784. total += vector.Value
  785. }
  786. return total
  787. }
  788. // addVectors adds two slices of Vectors. Vector timestamps are rounded to the
  789. // nearest ten seconds to allow matching of Vectors within a delta allowance.
  790. // Matching Vectors are summed, while unmatched Vectors are passed through.
  791. // e.g. [(t=1, 1), (t=2, 2)] + [(t=2, 2), (t=3, 3)] = [(t=1, 1), (t=2, 4), (t=3, 3)]
  792. func addVectors(xvs []*util.Vector, yvs []*util.Vector) []*util.Vector {
  793. sumOp := func(result *util.Vector, x *float64, y *float64) bool {
  794. if x != nil && y != nil {
  795. result.Value = *x + *y
  796. } else if y != nil {
  797. result.Value = *y
  798. } else if x != nil {
  799. result.Value = *x
  800. }
  801. return true
  802. }
  803. return util.ApplyVectorOp(xvs, yvs, sumOp)
  804. }
  805. // minCostDataLength sets the minimum number of time series data required to
  806. // cache both raw and aggregated cost data
  807. const minCostDataLength = 2
  808. // EmptyDataError describes an error caused by empty cost data for some
  809. // defined interval
  810. type EmptyDataError struct {
  811. err error
  812. window opencost.Window
  813. }
  814. // Error implements the error interface
  815. func (ede *EmptyDataError) Error() string {
  816. err := fmt.Sprintf("empty data for range: %s", ede.window)
  817. if ede.err != nil {
  818. err += fmt.Sprintf(": %s", ede.err)
  819. }
  820. return err
  821. }
  822. func costDataTimeSeriesLength(costData map[string]*CostData) int {
  823. l := 0
  824. for _, cd := range costData {
  825. if l < len(cd.RAMAllocation) {
  826. l = len(cd.RAMAllocation)
  827. }
  828. if l < len(cd.CPUAllocation) {
  829. l = len(cd.CPUAllocation)
  830. }
  831. }
  832. return l
  833. }
  834. // ScaleHourlyCostData converts per-hour cost data to per-resolution data. If the target resolution is higher (i.e. < 1.0h)
  835. // then we can do simple multiplication by the fraction-of-an-hour and retain accuracy. If the target resolution is
  836. // lower (i.e. > 1.0h) then we sum groups of hourly data by resolution to maintain fidelity.
  837. // e.g. (100 hours of per-hour hourly data, resolutionHours=10) => 10 data points, grouped and summed by 10-hour window
  838. // e.g. (20 minutes of per-minute hourly data, resolutionHours=1/60) => 20 data points, scaled down by a factor of 60
  839. func ScaleHourlyCostData(data map[string]*CostData, resolutionHours float64) map[string]*CostData {
  840. scaled := map[string]*CostData{}
  841. for key, datum := range data {
  842. datum.RAMReq = scaleVectorSeries(datum.RAMReq, resolutionHours)
  843. datum.RAMUsed = scaleVectorSeries(datum.RAMUsed, resolutionHours)
  844. datum.RAMAllocation = scaleVectorSeries(datum.RAMAllocation, resolutionHours)
  845. datum.CPUReq = scaleVectorSeries(datum.CPUReq, resolutionHours)
  846. datum.CPUUsed = scaleVectorSeries(datum.CPUUsed, resolutionHours)
  847. datum.CPUAllocation = scaleVectorSeries(datum.CPUAllocation, resolutionHours)
  848. datum.GPUReq = scaleVectorSeries(datum.GPUReq, resolutionHours)
  849. datum.NetworkData = scaleVectorSeries(datum.NetworkData, resolutionHours)
  850. for _, pvcDatum := range datum.PVCData {
  851. pvcDatum.Values = scaleVectorSeries(pvcDatum.Values, resolutionHours)
  852. }
  853. scaled[key] = datum
  854. }
  855. return scaled
  856. }
  857. func scaleVectorSeries(vs []*util.Vector, resolutionHours float64) []*util.Vector {
  858. // if scaling to a lower resolution, compress the hourly data for maximum accuracy
  859. if resolutionHours > 1.0 {
  860. return compressVectorSeries(vs, resolutionHours)
  861. }
  862. // if scaling to a higher resolution, simply scale each value down by the fraction of an hour
  863. for _, v := range vs {
  864. v.Value *= resolutionHours
  865. }
  866. return vs
  867. }
  868. func compressVectorSeries(vs []*util.Vector, resolutionHours float64) []*util.Vector {
  869. if len(vs) == 0 {
  870. return vs
  871. }
  872. compressed := []*util.Vector{}
  873. threshold := float64(60 * 60 * resolutionHours)
  874. var acc *util.Vector
  875. for i, v := range vs {
  876. if acc == nil {
  877. // start a new accumulation from current datum
  878. acc = &util.Vector{
  879. Value: vs[i].Value,
  880. Timestamp: vs[i].Timestamp,
  881. }
  882. continue
  883. }
  884. if v.Timestamp-acc.Timestamp < threshold {
  885. // v should be accumulated in current datum
  886. acc.Value += v.Value
  887. } else {
  888. // v falls outside current datum's threshold; append and start a new one
  889. compressed = append(compressed, acc)
  890. acc = &util.Vector{
  891. Value: vs[i].Value,
  892. Timestamp: vs[i].Timestamp,
  893. }
  894. }
  895. }
  896. // append any remaining, incomplete accumulation
  897. if acc != nil {
  898. compressed = append(compressed, acc)
  899. }
  900. return compressed
  901. }
  902. type AggregateQueryOpts struct {
  903. Rate string
  904. Filters map[string]string
  905. SharedResources *SharedResourceInfo
  906. ShareSplit string
  907. AllocateIdle bool
  908. IncludeTimeSeries bool
  909. IncludeEfficiency bool
  910. DisableAggregateCostModelCache bool
  911. ClearCache bool
  912. NoCache bool
  913. NoExpireCache bool
  914. RemoteEnabled bool
  915. DisableSharedOverhead bool
  916. UseETLAdapter bool
  917. }
  918. func DefaultAggregateQueryOpts() *AggregateQueryOpts {
  919. return &AggregateQueryOpts{
  920. Rate: "",
  921. Filters: map[string]string{},
  922. SharedResources: nil,
  923. ShareSplit: SplitTypeWeighted,
  924. AllocateIdle: false,
  925. IncludeTimeSeries: true,
  926. IncludeEfficiency: true,
  927. DisableAggregateCostModelCache: env.IsAggregateCostModelCacheDisabled(),
  928. ClearCache: false,
  929. NoCache: false,
  930. NoExpireCache: false,
  931. RemoteEnabled: env.IsRemoteEnabled(),
  932. DisableSharedOverhead: false,
  933. UseETLAdapter: false,
  934. }
  935. }
  936. // ComputeAggregateCostModel computes cost data for the given window, then aggregates it by the given fields.
  937. // Data is cached on two levels: the aggregation is cached as well as the underlying cost data.
  938. func (a *Accesses) ComputeAggregateCostModel(promClient prometheusClient.Client, window opencost.Window, field string, subfields []string, opts *AggregateQueryOpts) (map[string]*Aggregation, string, error) {
  939. // Window is the range of the query, i.e. (start, end)
  940. // It must be closed, i.e. neither start nor end can be nil
  941. if window.IsOpen() {
  942. return nil, "", fmt.Errorf("illegal window: %s", window)
  943. }
  944. // Resolution is the duration of each datum in the cost model range query,
  945. // which corresponds to both the step size given to Prometheus query_range
  946. // and to the window passed to the range queries.
  947. // i.e. by default, we support 1h resolution for queries of windows defined
  948. // in terms of days or integer multiples of hours (e.g. 1d, 12h)
  949. resolution := time.Hour
  950. // Determine resolution by size of duration and divisibility of window.
  951. // By default, resolution is 1hr. If the window is smaller than 1hr, then
  952. // resolution goes down to 1m. If the window is not a multiple of 1hr, then
  953. // resolution goes down to 1m. If the window is greater than 1d, then
  954. // resolution gets scaled up to improve performance by reducing the amount
  955. // of data being computed.
  956. durMins := int64(math.Trunc(window.Minutes()))
  957. if durMins < 24*60 { // less than 1d
  958. // TODO should we have additional options for going by
  959. // e.g. 30m? 10m? 5m?
  960. if durMins%60 != 0 || durMins < 3*60 { // not divisible by 1h or less than 3h
  961. resolution = time.Minute
  962. }
  963. } else { // greater than 1d
  964. if durMins >= 7*24*60 { // greater than (or equal to) 7 days
  965. resolution = 24.0 * time.Hour
  966. } else if durMins >= 2*24*60 { // greater than (or equal to) 2 days
  967. resolution = 2.0 * time.Hour
  968. }
  969. }
  970. // Parse options
  971. if opts == nil {
  972. opts = DefaultAggregateQueryOpts()
  973. }
  974. rate := opts.Rate
  975. filters := opts.Filters
  976. sri := opts.SharedResources
  977. shared := opts.ShareSplit
  978. allocateIdle := opts.AllocateIdle
  979. includeTimeSeries := opts.IncludeTimeSeries
  980. includeEfficiency := opts.IncludeEfficiency
  981. disableAggregateCostModelCache := opts.DisableAggregateCostModelCache
  982. clearCache := opts.ClearCache
  983. noCache := opts.NoCache
  984. noExpireCache := opts.NoExpireCache
  985. remoteEnabled := opts.RemoteEnabled
  986. disableSharedOverhead := opts.DisableSharedOverhead
  987. // retainFuncs override filterFuncs. Make sure shared resources do not
  988. // get filtered out.
  989. retainFuncs := []FilterFunc{}
  990. retainFuncs = append(retainFuncs, func(cd *CostData) (bool, string) {
  991. if sri != nil {
  992. return sri.IsSharedResource(cd), ""
  993. }
  994. return false, ""
  995. })
  996. // Parse cost data filters into FilterFuncs
  997. filterFuncs := []FilterFunc{}
  998. aggregateEnvironment := func(costDatum *CostData) string {
  999. if field == "cluster" {
  1000. return costDatum.ClusterID
  1001. } else if field == "node" {
  1002. return costDatum.NodeName
  1003. } else if field == "namespace" {
  1004. return costDatum.Namespace
  1005. } else if field == "service" {
  1006. if len(costDatum.Services) > 0 {
  1007. return costDatum.Namespace + "/" + costDatum.Services[0]
  1008. }
  1009. } else if field == "deployment" {
  1010. if len(costDatum.Deployments) > 0 {
  1011. return costDatum.Namespace + "/" + costDatum.Deployments[0]
  1012. }
  1013. } else if field == "daemonset" {
  1014. if len(costDatum.Daemonsets) > 0 {
  1015. return costDatum.Namespace + "/" + costDatum.Daemonsets[0]
  1016. }
  1017. } else if field == "statefulset" {
  1018. if len(costDatum.Statefulsets) > 0 {
  1019. return costDatum.Namespace + "/" + costDatum.Statefulsets[0]
  1020. }
  1021. } else if field == "label" {
  1022. if costDatum.Labels != nil {
  1023. for _, sf := range subfields {
  1024. if subfieldName, ok := costDatum.Labels[sf]; ok {
  1025. return fmt.Sprintf("%s=%s", sf, subfieldName)
  1026. }
  1027. }
  1028. }
  1029. } else if field == "annotation" {
  1030. if costDatum.Annotations != nil {
  1031. for _, sf := range subfields {
  1032. if subfieldName, ok := costDatum.Annotations[sf]; ok {
  1033. return fmt.Sprintf("%s=%s", sf, subfieldName)
  1034. }
  1035. }
  1036. }
  1037. } else if field == "pod" {
  1038. return costDatum.Namespace + "/" + costDatum.PodName
  1039. } else if field == "container" {
  1040. return costDatum.Namespace + "/" + costDatum.PodName + "/" + costDatum.Name
  1041. }
  1042. return ""
  1043. }
  1044. if filters["podprefix"] != "" {
  1045. pps := []string{}
  1046. for _, fp := range strings.Split(filters["podprefix"], ",") {
  1047. if fp != "" {
  1048. cleanedFilter := strings.TrimSpace(fp)
  1049. pps = append(pps, cleanedFilter)
  1050. }
  1051. }
  1052. filterFuncs = append(filterFuncs, func(cd *CostData) (bool, string) {
  1053. aggEnv := aggregateEnvironment(cd)
  1054. for _, pp := range pps {
  1055. cleanedFilter := strings.TrimSpace(pp)
  1056. if strings.HasPrefix(cd.PodName, cleanedFilter) {
  1057. return true, aggEnv
  1058. }
  1059. }
  1060. return false, aggEnv
  1061. })
  1062. }
  1063. if filters["namespace"] != "" {
  1064. // namespaces may be comma-separated, e.g. kubecost,default
  1065. // multiple namespaces are evaluated as an OR relationship
  1066. nss := strings.Split(filters["namespace"], ",")
  1067. filterFuncs = append(filterFuncs, func(cd *CostData) (bool, string) {
  1068. aggEnv := aggregateEnvironment(cd)
  1069. for _, ns := range nss {
  1070. nsTrim := strings.TrimSpace(ns)
  1071. if cd.Namespace == nsTrim {
  1072. return true, aggEnv
  1073. } else if strings.HasSuffix(nsTrim, "*") { // trigger wildcard prefix filtering
  1074. nsTrimAsterisk := strings.TrimSuffix(nsTrim, "*")
  1075. if strings.HasPrefix(cd.Namespace, nsTrimAsterisk) {
  1076. return true, aggEnv
  1077. }
  1078. }
  1079. }
  1080. return false, aggEnv
  1081. })
  1082. }
  1083. if filters["node"] != "" {
  1084. // nodes may be comma-separated, e.g. aws-node-1,aws-node-2
  1085. // multiple nodes are evaluated as an OR relationship
  1086. nodes := strings.Split(filters["node"], ",")
  1087. filterFuncs = append(filterFuncs, func(cd *CostData) (bool, string) {
  1088. aggEnv := aggregateEnvironment(cd)
  1089. for _, node := range nodes {
  1090. nodeTrim := strings.TrimSpace(node)
  1091. if cd.NodeName == nodeTrim {
  1092. return true, aggEnv
  1093. } else if strings.HasSuffix(nodeTrim, "*") { // trigger wildcard prefix filtering
  1094. nodeTrimAsterisk := strings.TrimSuffix(nodeTrim, "*")
  1095. if strings.HasPrefix(cd.NodeName, nodeTrimAsterisk) {
  1096. return true, aggEnv
  1097. }
  1098. }
  1099. }
  1100. return false, aggEnv
  1101. })
  1102. }
  1103. if filters["cluster"] != "" {
  1104. // clusters may be comma-separated, e.g. cluster-one,cluster-two
  1105. // multiple clusters are evaluated as an OR relationship
  1106. cs := strings.Split(filters["cluster"], ",")
  1107. filterFuncs = append(filterFuncs, func(cd *CostData) (bool, string) {
  1108. aggEnv := aggregateEnvironment(cd)
  1109. for _, c := range cs {
  1110. cTrim := strings.TrimSpace(c)
  1111. id, name := cd.ClusterID, cd.ClusterName
  1112. if id == cTrim || name == cTrim {
  1113. return true, aggEnv
  1114. } else if strings.HasSuffix(cTrim, "*") { // trigger wildcard prefix filtering
  1115. cTrimAsterisk := strings.TrimSuffix(cTrim, "*")
  1116. if strings.HasPrefix(id, cTrimAsterisk) || strings.HasPrefix(name, cTrimAsterisk) {
  1117. return true, aggEnv
  1118. }
  1119. }
  1120. }
  1121. return false, aggEnv
  1122. })
  1123. }
  1124. if filters["labels"] != "" {
  1125. // labels are expected to be comma-separated and to take the form key=value
  1126. // e.g. app=cost-analyzer,app.kubernetes.io/instance=kubecost
  1127. // each different label will be applied as an AND
  1128. // multiple values for a single label will be evaluated as an OR
  1129. labelValues := map[string][]string{}
  1130. ls := strings.Split(filters["labels"], ",")
  1131. for _, l := range ls {
  1132. lTrim := strings.TrimSpace(l)
  1133. label := strings.Split(lTrim, "=")
  1134. if len(label) == 2 {
  1135. ln := promutil.SanitizeLabelName(strings.TrimSpace(label[0]))
  1136. lv := strings.TrimSpace(label[1])
  1137. labelValues[ln] = append(labelValues[ln], lv)
  1138. } else {
  1139. // label is not of the form name=value, so log it and move on
  1140. log.Warnf("ComputeAggregateCostModel: skipping illegal label filter: %s", l)
  1141. }
  1142. }
  1143. // Generate FilterFunc for each set of label filters by invoking a function instead of accessing
  1144. // values by closure to prevent reference-type looping bug.
  1145. // (see https://github.com/golang/go/wiki/CommonMistakes#using-reference-to-loop-iterator-variable)
  1146. for label, values := range labelValues {
  1147. ff := (func(l string, vs []string) FilterFunc {
  1148. return func(cd *CostData) (bool, string) {
  1149. ae := aggregateEnvironment(cd)
  1150. for _, v := range vs {
  1151. if v == "__unallocated__" { // Special case. __unallocated__ means return all pods without the attached label
  1152. if _, ok := cd.Labels[l]; !ok {
  1153. return true, ae
  1154. }
  1155. }
  1156. if cd.Labels[l] == v {
  1157. return true, ae
  1158. } else if strings.HasSuffix(v, "*") { // trigger wildcard prefix filtering
  1159. vTrim := strings.TrimSuffix(v, "*")
  1160. if strings.HasPrefix(cd.Labels[l], vTrim) {
  1161. return true, ae
  1162. }
  1163. }
  1164. }
  1165. return false, ae
  1166. }
  1167. })(label, values)
  1168. filterFuncs = append(filterFuncs, ff)
  1169. }
  1170. }
  1171. if filters["annotations"] != "" {
  1172. // annotations are expected to be comma-separated and to take the form key=value
  1173. // e.g. app=cost-analyzer,app.kubernetes.io/instance=kubecost
  1174. // each different annotation will be applied as an AND
  1175. // multiple values for a single annotation will be evaluated as an OR
  1176. annotationValues := map[string][]string{}
  1177. as := strings.Split(filters["annotations"], ",")
  1178. for _, annot := range as {
  1179. aTrim := strings.TrimSpace(annot)
  1180. annotation := strings.Split(aTrim, "=")
  1181. if len(annotation) == 2 {
  1182. an := promutil.SanitizeLabelName(strings.TrimSpace(annotation[0]))
  1183. av := strings.TrimSpace(annotation[1])
  1184. annotationValues[an] = append(annotationValues[an], av)
  1185. } else {
  1186. // annotation is not of the form name=value, so log it and move on
  1187. log.Warnf("ComputeAggregateCostModel: skipping illegal annotation filter: %s", annot)
  1188. }
  1189. }
  1190. // Generate FilterFunc for each set of annotation filters by invoking a function instead of accessing
  1191. // values by closure to prevent reference-type looping bug.
  1192. // (see https://github.com/golang/go/wiki/CommonMistakes#using-reference-to-loop-iterator-variable)
  1193. for annotation, values := range annotationValues {
  1194. ff := (func(l string, vs []string) FilterFunc {
  1195. return func(cd *CostData) (bool, string) {
  1196. ae := aggregateEnvironment(cd)
  1197. for _, v := range vs {
  1198. if v == "__unallocated__" { // Special case. __unallocated__ means return all pods without the attached label
  1199. if _, ok := cd.Annotations[l]; !ok {
  1200. return true, ae
  1201. }
  1202. }
  1203. if cd.Annotations[l] == v {
  1204. return true, ae
  1205. } else if strings.HasSuffix(v, "*") { // trigger wildcard prefix filtering
  1206. vTrim := strings.TrimSuffix(v, "*")
  1207. if strings.HasPrefix(cd.Annotations[l], vTrim) {
  1208. return true, ae
  1209. }
  1210. }
  1211. }
  1212. return false, ae
  1213. }
  1214. })(annotation, values)
  1215. filterFuncs = append(filterFuncs, ff)
  1216. }
  1217. }
  1218. // clear cache prior to checking the cache so that a clearCache=true
  1219. // request always returns a freshly computed value
  1220. if clearCache {
  1221. a.AggregateCache.Flush()
  1222. a.CostDataCache.Flush()
  1223. }
  1224. cacheExpiry := a.GetCacheExpiration(window.Duration())
  1225. if noExpireCache {
  1226. cacheExpiry = cache.NoExpiration
  1227. }
  1228. // parametrize cache key by all request parameters
  1229. aggKey := GenerateAggKey(window, field, subfields, opts)
  1230. thanosOffset := time.Now().Add(-thanos.OffsetDuration())
  1231. if a.ThanosClient != nil && window.End().After(thanosOffset) {
  1232. log.Infof("ComputeAggregateCostModel: setting end time backwards to first present data")
  1233. // Apply offsets to both end and start times to maintain correct time range
  1234. deltaDuration := window.End().Sub(thanosOffset)
  1235. s := window.Start().Add(-1 * deltaDuration)
  1236. e := time.Now().Add(-thanos.OffsetDuration())
  1237. window.Set(&s, &e)
  1238. }
  1239. dur, off := window.DurationOffsetStrings()
  1240. key := fmt.Sprintf(`%s:%s:%fh:%t`, dur, off, resolution.Hours(), remoteEnabled)
  1241. // report message about which of the two caches hit. by default report a miss
  1242. cacheMessage := fmt.Sprintf("ComputeAggregateCostModel: L1 cache miss: %s L2 cache miss: %s", aggKey, key)
  1243. // check the cache for aggregated response; if cache is hit and not disabled, return response
  1244. if value, found := a.AggregateCache.Get(aggKey); found && !disableAggregateCostModelCache && !noCache {
  1245. result, ok := value.(map[string]*Aggregation)
  1246. if !ok {
  1247. // disable cache and recompute if type cast fails
  1248. log.Errorf("ComputeAggregateCostModel: caching error: failed to cast aggregate data to struct: %s", aggKey)
  1249. return a.ComputeAggregateCostModel(promClient, window, field, subfields, opts)
  1250. }
  1251. return result, fmt.Sprintf("aggregate cache hit: %s", aggKey), nil
  1252. }
  1253. if window.Hours() >= 1.0 {
  1254. // exclude the last window of the time frame to match Prometheus definitions of range, offset, and resolution
  1255. start := window.Start().Add(resolution)
  1256. window.Set(&start, window.End())
  1257. } else {
  1258. // don't cache requests for durations of less than one hour
  1259. disableAggregateCostModelCache = true
  1260. }
  1261. // attempt to retrieve cost data from cache
  1262. var costData map[string]*CostData
  1263. var err error
  1264. cacheData, found := a.CostDataCache.Get(key)
  1265. if found && !disableAggregateCostModelCache && !noCache {
  1266. ok := false
  1267. costData, ok = cacheData.(map[string]*CostData)
  1268. cacheMessage = fmt.Sprintf("ComputeAggregateCostModel: L1 cache miss: %s, L2 cost data cache hit: %s", aggKey, key)
  1269. if !ok {
  1270. log.Errorf("ComputeAggregateCostModel: caching error: failed to cast cost data to struct: %s", key)
  1271. }
  1272. } else {
  1273. log.Infof("ComputeAggregateCostModel: missed cache: %s (found %t, disableAggregateCostModelCache %t, noCache %t)", key, found, disableAggregateCostModelCache, noCache)
  1274. costData, err = a.Model.ComputeCostDataRange(promClient, a.CloudProvider, window, resolution, "", "", remoteEnabled)
  1275. if err != nil {
  1276. if prom.IsErrorCollection(err) {
  1277. return nil, "", err
  1278. }
  1279. if pce, ok := err.(prom.CommError); ok {
  1280. return nil, "", pce
  1281. }
  1282. if strings.Contains(err.Error(), "data is empty") {
  1283. return nil, "", &EmptyDataError{err: err, window: window}
  1284. }
  1285. return nil, "", err
  1286. }
  1287. // compute length of the time series in the cost data and only compute
  1288. // aggregates and cache if the length is sufficiently high
  1289. costDataLen := costDataTimeSeriesLength(costData)
  1290. if costDataLen == 0 {
  1291. return nil, "", &EmptyDataError{window: window}
  1292. }
  1293. if costDataLen >= minCostDataLength && !noCache {
  1294. log.Infof("ComputeAggregateCostModel: setting L2 cache: %s", key)
  1295. a.CostDataCache.Set(key, costData, cacheExpiry)
  1296. }
  1297. }
  1298. c, err := a.CloudProvider.GetConfig()
  1299. if err != nil {
  1300. return nil, "", err
  1301. }
  1302. discount, err := ParsePercentString(c.Discount)
  1303. if err != nil {
  1304. return nil, "", err
  1305. }
  1306. customDiscount, err := ParsePercentString(c.NegotiatedDiscount)
  1307. if err != nil {
  1308. return nil, "", err
  1309. }
  1310. sc := make(map[string]*SharedCostInfo)
  1311. if !disableSharedOverhead {
  1312. costPerMonth := c.GetSharedOverheadCostPerMonth()
  1313. durationCoefficient := window.Hours() / timeutil.HoursPerMonth
  1314. sc["total"] = &SharedCostInfo{
  1315. Name: "total",
  1316. Cost: costPerMonth * durationCoefficient,
  1317. }
  1318. }
  1319. idleCoefficients := make(map[string]float64)
  1320. if allocateIdle {
  1321. dur, off, err := window.DurationOffset()
  1322. if err != nil {
  1323. log.Errorf("ComputeAggregateCostModel: error computing idle coefficient: illegal window: %s (%s)", window, err)
  1324. return nil, "", err
  1325. }
  1326. if a.ThanosClient != nil && off < thanos.OffsetDuration() {
  1327. // Determine difference between the Thanos offset and the requested
  1328. // offset; e.g. off=1h, thanosOffsetDuration=3h => diff=2h
  1329. diff := thanos.OffsetDuration() - off
  1330. // Reduce duration by difference and increase offset by difference
  1331. // e.g. 24h offset 0h => 21h offset 3h
  1332. dur = dur - diff
  1333. off = thanos.OffsetDuration()
  1334. log.Infof("ComputeAggregateCostModel: setting duration, offset to %s, %s due to Thanos", dur, off)
  1335. // Idle computation cannot be fulfilled for some windows, specifically
  1336. // those with sum(duration, offset) < Thanos offset, because there is
  1337. // no data within that window.
  1338. if dur <= 0 {
  1339. return nil, "", fmt.Errorf("requested idle coefficients from Thanos for illegal duration, offset: %s, %s (original window %s)", dur, off, window)
  1340. }
  1341. }
  1342. idleCoefficients, err = a.ComputeIdleCoefficient(costData, promClient, a.CloudProvider, discount, customDiscount, dur, off)
  1343. if err != nil {
  1344. durStr, offStr := timeutil.DurationOffsetStrings(dur, off)
  1345. log.Errorf("ComputeAggregateCostModel: error computing idle coefficient: duration=%s, offset=%s, err=%s", durStr, offStr, err)
  1346. return nil, "", err
  1347. }
  1348. }
  1349. totalContainerCost := 0.0
  1350. if shared == SplitTypeWeighted {
  1351. totalContainerCost = GetTotalContainerCost(costData, rate, a.CloudProvider, discount, customDiscount, idleCoefficients)
  1352. }
  1353. // filter cost data by namespace and cluster after caching for maximal cache hits
  1354. costData, filteredContainerCount, filteredEnvironments := FilterCostData(costData, retainFuncs, filterFuncs)
  1355. // aggregate cost model data by given fields and cache the result for the default expiration
  1356. aggOpts := &AggregationOptions{
  1357. Discount: discount,
  1358. CustomDiscount: customDiscount,
  1359. IdleCoefficients: idleCoefficients,
  1360. IncludeEfficiency: includeEfficiency,
  1361. IncludeTimeSeries: includeTimeSeries,
  1362. Rate: rate,
  1363. ResolutionHours: resolution.Hours(),
  1364. SharedResourceInfo: sri,
  1365. SharedCosts: sc,
  1366. FilteredContainerCount: filteredContainerCount,
  1367. FilteredEnvironments: filteredEnvironments,
  1368. TotalContainerCost: totalContainerCost,
  1369. SharedSplit: shared,
  1370. }
  1371. result := AggregateCostData(costData, field, subfields, a.CloudProvider, aggOpts)
  1372. // If sending time series data back, switch scale back to hourly data. At this point,
  1373. // resolutionHours may have converted our hourly data to more- or less-than hourly data.
  1374. if includeTimeSeries {
  1375. for _, aggs := range result {
  1376. ScaleAggregationTimeSeries(aggs, resolution.Hours())
  1377. }
  1378. }
  1379. // compute length of the time series in the cost data and only cache
  1380. // aggregation results if the length is sufficiently high
  1381. costDataLen := costDataTimeSeriesLength(costData)
  1382. if costDataLen >= minCostDataLength && window.Hours() > 1.0 && !noCache {
  1383. // Set the result map (rather than a pointer to it) because map is a reference type
  1384. log.Infof("ComputeAggregateCostModel: setting aggregate cache: %s", aggKey)
  1385. a.AggregateCache.Set(aggKey, result, cacheExpiry)
  1386. } else {
  1387. log.Infof("ComputeAggregateCostModel: not setting aggregate cache: %s (not enough data: %t; duration less than 1h: %t; noCache: %t)", key, costDataLen < minCostDataLength, window.Hours() < 1, noCache)
  1388. }
  1389. return result, cacheMessage, nil
  1390. }
  1391. // ScaleAggregationTimeSeries reverses the scaling done by ScaleHourlyCostData, returning
  1392. // the aggregation's time series to hourly data.
  1393. func ScaleAggregationTimeSeries(aggregation *Aggregation, resolutionHours float64) {
  1394. for _, v := range aggregation.CPUCostVector {
  1395. v.Value /= resolutionHours
  1396. }
  1397. for _, v := range aggregation.GPUCostVector {
  1398. v.Value /= resolutionHours
  1399. }
  1400. for _, v := range aggregation.RAMCostVector {
  1401. v.Value /= resolutionHours
  1402. }
  1403. for _, v := range aggregation.PVCostVector {
  1404. v.Value /= resolutionHours
  1405. }
  1406. for _, v := range aggregation.NetworkCostVector {
  1407. v.Value /= resolutionHours
  1408. }
  1409. for _, v := range aggregation.TotalCostVector {
  1410. v.Value /= resolutionHours
  1411. }
  1412. return
  1413. }
  1414. // String returns a string representation of the encapsulated shared resources, which
  1415. // can be used to uniquely identify a set of shared resources. Sorting sets of shared
  1416. // resources ensures that strings representing permutations of the same combination match.
  1417. func (s *SharedResourceInfo) String() string {
  1418. if s == nil {
  1419. return ""
  1420. }
  1421. nss := []string{}
  1422. for ns := range s.SharedNamespace {
  1423. nss = append(nss, ns)
  1424. }
  1425. sort.Strings(nss)
  1426. nsStr := strings.Join(nss, ",")
  1427. labels := []string{}
  1428. for lbl, vals := range s.LabelSelectors {
  1429. for val := range vals {
  1430. if lbl != "" && val != "" {
  1431. labels = append(labels, fmt.Sprintf("%s=%s", lbl, val))
  1432. }
  1433. }
  1434. }
  1435. sort.Strings(labels)
  1436. labelStr := strings.Join(labels, ",")
  1437. return fmt.Sprintf("%s:%s", nsStr, labelStr)
  1438. }
  1439. type aggKeyParams struct {
  1440. duration string
  1441. offset string
  1442. filters map[string]string
  1443. field string
  1444. subfields []string
  1445. rate string
  1446. sri *SharedResourceInfo
  1447. shareType string
  1448. idle bool
  1449. timeSeries bool
  1450. efficiency bool
  1451. }
  1452. // GenerateAggKey generates a parameter-unique key for caching the aggregate cost model
  1453. func GenerateAggKey(window opencost.Window, field string, subfields []string, opts *AggregateQueryOpts) string {
  1454. if opts == nil {
  1455. opts = DefaultAggregateQueryOpts()
  1456. }
  1457. // Covert to duration, offset so that cache hits occur, even when timestamps have
  1458. // shifted slightly.
  1459. duration, offset := window.DurationOffsetStrings()
  1460. // parse, trim, and sort podprefix filters
  1461. podPrefixFilters := []string{}
  1462. if ppfs, ok := opts.Filters["podprefix"]; ok && ppfs != "" {
  1463. for _, psf := range strings.Split(ppfs, ",") {
  1464. podPrefixFilters = append(podPrefixFilters, strings.TrimSpace(psf))
  1465. }
  1466. }
  1467. sort.Strings(podPrefixFilters)
  1468. podPrefixFiltersStr := strings.Join(podPrefixFilters, ",")
  1469. // parse, trim, and sort namespace filters
  1470. nsFilters := []string{}
  1471. if nsfs, ok := opts.Filters["namespace"]; ok && nsfs != "" {
  1472. for _, nsf := range strings.Split(nsfs, ",") {
  1473. nsFilters = append(nsFilters, strings.TrimSpace(nsf))
  1474. }
  1475. }
  1476. sort.Strings(nsFilters)
  1477. nsFilterStr := strings.Join(nsFilters, ",")
  1478. // parse, trim, and sort node filters
  1479. nodeFilters := []string{}
  1480. if nodefs, ok := opts.Filters["node"]; ok && nodefs != "" {
  1481. for _, nodef := range strings.Split(nodefs, ",") {
  1482. nodeFilters = append(nodeFilters, strings.TrimSpace(nodef))
  1483. }
  1484. }
  1485. sort.Strings(nodeFilters)
  1486. nodeFilterStr := strings.Join(nodeFilters, ",")
  1487. // parse, trim, and sort cluster filters
  1488. cFilters := []string{}
  1489. if cfs, ok := opts.Filters["cluster"]; ok && cfs != "" {
  1490. for _, cf := range strings.Split(cfs, ",") {
  1491. cFilters = append(cFilters, strings.TrimSpace(cf))
  1492. }
  1493. }
  1494. sort.Strings(cFilters)
  1495. cFilterStr := strings.Join(cFilters, ",")
  1496. // parse, trim, and sort label filters
  1497. lFilters := []string{}
  1498. if lfs, ok := opts.Filters["labels"]; ok && lfs != "" {
  1499. for _, lf := range strings.Split(lfs, ",") {
  1500. // trim whitespace from the label name and the label value
  1501. // of each label name/value pair, then reconstruct
  1502. // e.g. "tier = frontend, app = kubecost" == "app=kubecost,tier=frontend"
  1503. lfa := strings.Split(lf, "=")
  1504. if len(lfa) == 2 {
  1505. lfn := strings.TrimSpace(lfa[0])
  1506. lfv := strings.TrimSpace(lfa[1])
  1507. lFilters = append(lFilters, fmt.Sprintf("%s=%s", lfn, lfv))
  1508. } else {
  1509. // label is not of the form name=value, so log it and move on
  1510. log.Warnf("GenerateAggKey: skipping illegal label filter: %s", lf)
  1511. }
  1512. }
  1513. }
  1514. sort.Strings(lFilters)
  1515. lFilterStr := strings.Join(lFilters, ",")
  1516. // parse, trim, and sort annotation filters
  1517. aFilters := []string{}
  1518. if afs, ok := opts.Filters["annotations"]; ok && afs != "" {
  1519. for _, af := range strings.Split(afs, ",") {
  1520. // trim whitespace from the annotation name and the annotation value
  1521. // of each annotation name/value pair, then reconstruct
  1522. // e.g. "tier = frontend, app = kubecost" == "app=kubecost,tier=frontend"
  1523. afa := strings.Split(af, "=")
  1524. if len(afa) == 2 {
  1525. afn := strings.TrimSpace(afa[0])
  1526. afv := strings.TrimSpace(afa[1])
  1527. aFilters = append(aFilters, fmt.Sprintf("%s=%s", afn, afv))
  1528. } else {
  1529. // annotation is not of the form name=value, so log it and move on
  1530. log.Warnf("GenerateAggKey: skipping illegal annotation filter: %s", af)
  1531. }
  1532. }
  1533. }
  1534. sort.Strings(aFilters)
  1535. aFilterStr := strings.Join(aFilters, ",")
  1536. filterStr := fmt.Sprintf("%s:%s:%s:%s:%s:%s", nsFilterStr, nodeFilterStr, cFilterStr, lFilterStr, aFilterStr, podPrefixFiltersStr)
  1537. sort.Strings(subfields)
  1538. fieldStr := fmt.Sprintf("%s:%s", field, strings.Join(subfields, ","))
  1539. if offset == "1m" {
  1540. offset = ""
  1541. }
  1542. return fmt.Sprintf("%s:%s:%s:%s:%s:%s:%s:%t:%t:%t", duration, offset, filterStr, fieldStr, opts.Rate,
  1543. opts.SharedResources, opts.ShareSplit, opts.AllocateIdle, opts.IncludeTimeSeries,
  1544. opts.IncludeEfficiency)
  1545. }
  1546. // Aggregator is capable of computing the aggregated cost model. This is
  1547. // a brutal interface, which should be cleaned up, but it's necessary for
  1548. // being able to swap in an ETL-backed implementation.
  1549. type Aggregator interface {
  1550. ComputeAggregateCostModel(promClient prometheusClient.Client, window opencost.Window, field string, subfields []string, opts *AggregateQueryOpts) (map[string]*Aggregation, string, error)
  1551. }
  1552. func (a *Accesses) warmAggregateCostModelCache() {
  1553. // Only allow one concurrent cache-warming operation
  1554. sem := util.NewSemaphore(1)
  1555. // Set default values, pulling them from application settings where applicable, and warm the cache
  1556. // for the given duration. Cache is intentionally set to expire (i.e. noExpireCache=false) so that
  1557. // if the default parameters change, the old cached defaults with eventually expire. Thus, the
  1558. // timing of the cache expiry/refresh is the only mechanism ensuring 100% cache warmth.
  1559. warmFunc := func(duration, offset time.Duration, cacheEfficiencyData bool) (error, error) {
  1560. if a.ThanosClient != nil {
  1561. duration = thanos.OffsetDuration()
  1562. log.Infof("Setting Offset to %s", duration)
  1563. }
  1564. fmtDuration, fmtOffset := timeutil.DurationOffsetStrings(duration, offset)
  1565. durationHrs, err := timeutil.FormatDurationStringDaysToHours(fmtDuration)
  1566. promClient := a.GetPrometheusClient(true)
  1567. windowStr := fmt.Sprintf("%s offset %s", fmtDuration, fmtOffset)
  1568. window, err := opencost.ParseWindowUTC(windowStr)
  1569. if err != nil {
  1570. return nil, fmt.Errorf("invalid window from window string: %s", windowStr)
  1571. }
  1572. field := "namespace"
  1573. subfields := []string{}
  1574. aggOpts := DefaultAggregateQueryOpts()
  1575. aggOpts.Rate = ""
  1576. aggOpts.Filters = map[string]string{}
  1577. aggOpts.IncludeTimeSeries = false
  1578. aggOpts.IncludeEfficiency = true
  1579. aggOpts.DisableAggregateCostModelCache = true
  1580. aggOpts.ClearCache = false
  1581. aggOpts.NoCache = false
  1582. aggOpts.NoExpireCache = false
  1583. aggOpts.ShareSplit = SplitTypeWeighted
  1584. aggOpts.RemoteEnabled = env.IsRemoteEnabled()
  1585. aggOpts.AllocateIdle = provider.AllocateIdleByDefault(a.CloudProvider)
  1586. sharedNamespaces := provider.SharedNamespaces(a.CloudProvider)
  1587. sharedLabelNames, sharedLabelValues := provider.SharedLabels(a.CloudProvider)
  1588. if len(sharedNamespaces) > 0 || len(sharedLabelNames) > 0 {
  1589. aggOpts.SharedResources = NewSharedResourceInfo(true, sharedNamespaces, sharedLabelNames, sharedLabelValues)
  1590. }
  1591. aggKey := GenerateAggKey(window, field, subfields, aggOpts)
  1592. log.Infof("aggregation: cache warming defaults: %s", aggKey)
  1593. key := fmt.Sprintf("%s:%s", durationHrs, fmtOffset)
  1594. _, _, aggErr := a.ComputeAggregateCostModel(promClient, window, field, subfields, aggOpts)
  1595. if aggErr != nil {
  1596. log.Infof("Error building cache %s: %s", window, aggErr)
  1597. }
  1598. totals, err := a.ComputeClusterCosts(promClient, a.CloudProvider, duration, offset, cacheEfficiencyData)
  1599. if err != nil {
  1600. log.Infof("Error building cluster costs cache %s", key)
  1601. }
  1602. maxMinutesWithData := 0.0
  1603. for _, cluster := range totals {
  1604. if cluster.DataMinutes > maxMinutesWithData {
  1605. maxMinutesWithData = cluster.DataMinutes
  1606. }
  1607. }
  1608. if len(totals) > 0 && maxMinutesWithData > clusterCostsCacheMinutes {
  1609. a.ClusterCostsCache.Set(key, totals, a.GetCacheExpiration(window.Duration()))
  1610. log.Infof("caching %s cluster costs for %s", fmtDuration, a.GetCacheExpiration(window.Duration()))
  1611. } else {
  1612. log.Warnf("not caching %s cluster costs: no data or less than %f minutes data ", fmtDuration, clusterCostsCacheMinutes)
  1613. }
  1614. return aggErr, err
  1615. }
  1616. // 1 day
  1617. go func(sem *util.Semaphore) {
  1618. defer errors.HandlePanic()
  1619. offset := time.Minute
  1620. duration := 24 * time.Hour
  1621. for {
  1622. sem.Acquire()
  1623. warmFunc(duration, offset, true)
  1624. sem.Return()
  1625. log.Infof("aggregation: warm cache: %s", timeutil.DurationString(duration))
  1626. time.Sleep(a.GetCacheRefresh(duration))
  1627. }
  1628. }(sem)
  1629. if !env.IsETLEnabled() {
  1630. // 2 day
  1631. go func(sem *util.Semaphore) {
  1632. defer errors.HandlePanic()
  1633. offset := time.Minute
  1634. duration := 2 * 24 * time.Hour
  1635. for {
  1636. sem.Acquire()
  1637. warmFunc(duration, offset, false)
  1638. sem.Return()
  1639. log.Infof("aggregation: warm cache: %s", timeutil.DurationString(duration))
  1640. time.Sleep(a.GetCacheRefresh(duration))
  1641. }
  1642. }(sem)
  1643. // 7 day
  1644. go func(sem *util.Semaphore) {
  1645. defer errors.HandlePanic()
  1646. offset := time.Minute
  1647. duration := 7 * 24 * time.Hour
  1648. for {
  1649. sem.Acquire()
  1650. aggErr, err := warmFunc(duration, offset, false)
  1651. sem.Return()
  1652. log.Infof("aggregation: warm cache: %s", timeutil.DurationString(duration))
  1653. if aggErr == nil && err == nil {
  1654. time.Sleep(a.GetCacheRefresh(duration))
  1655. } else {
  1656. time.Sleep(5 * time.Minute)
  1657. }
  1658. }
  1659. }(sem)
  1660. // 30 day
  1661. go func(sem *util.Semaphore) {
  1662. defer errors.HandlePanic()
  1663. for {
  1664. offset := time.Minute
  1665. duration := 30 * 24 * time.Hour
  1666. sem.Acquire()
  1667. aggErr, err := warmFunc(duration, offset, false)
  1668. sem.Return()
  1669. if aggErr == nil && err == nil {
  1670. time.Sleep(a.GetCacheRefresh(duration))
  1671. } else {
  1672. time.Sleep(5 * time.Minute)
  1673. }
  1674. }
  1675. }(sem)
  1676. }
  1677. }
  1678. var (
  1679. // Convert UTC-RFC3339 pairs to configured UTC offset
  1680. // e.g. with UTC offset of -0600, 2020-07-01T00:00:00Z becomes
  1681. // 2020-07-01T06:00:00Z == 2020-07-01T00:00:00-0600
  1682. // TODO niko/etl fix the frontend because this is confusing if you're
  1683. // actually asking for UTC time (...Z) and we swap that "Z" out for the
  1684. // configured UTC offset without asking
  1685. rfc3339 = `\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\dZ`
  1686. rfc3339Regex = regexp.MustCompile(fmt.Sprintf(`(%s),(%s)`, rfc3339, rfc3339))
  1687. durRegex = regexp.MustCompile(`^(\d+)(m|h|d|s)$`)
  1688. percentRegex = regexp.MustCompile(`(\d+\.*\d*)%`)
  1689. )
  1690. // AggregateCostModelHandler handles requests to the aggregated cost model API. See
  1691. // ComputeAggregateCostModel for details.
  1692. func (a *Accesses) AggregateCostModelHandler(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
  1693. w.Header().Set("Content-Type", "application/json")
  1694. windowStr := r.URL.Query().Get("window")
  1695. match := rfc3339Regex.FindStringSubmatch(windowStr)
  1696. if match != nil {
  1697. start, _ := time.Parse(time.RFC3339, match[1])
  1698. start = start.Add(-env.GetParsedUTCOffset()).In(time.UTC)
  1699. end, _ := time.Parse(time.RFC3339, match[2])
  1700. end = end.Add(-env.GetParsedUTCOffset()).In(time.UTC)
  1701. windowStr = fmt.Sprintf("%sZ,%sZ", start.Format("2006-01-02T15:04:05"), end.Format("2006-01-02T15:04:05Z"))
  1702. }
  1703. // determine duration and offset from query parameters
  1704. window, err := opencost.ParseWindowWithOffset(windowStr, env.GetParsedUTCOffset())
  1705. if err != nil || window.Start() == nil {
  1706. WriteError(w, BadRequest(fmt.Sprintf("invalid window: %s", err)))
  1707. return
  1708. }
  1709. isDurationStr := durRegex.MatchString(windowStr)
  1710. // legacy offset option should override window offset
  1711. if r.URL.Query().Get("offset") != "" {
  1712. offset := r.URL.Query().Get("offset")
  1713. // Shift window by offset, but only when manually set with separate
  1714. // parameter and window was provided as a duration string. Otherwise,
  1715. // do not alter the (duration, offset) from ParseWindowWithOffset.
  1716. if offset != "1m" && isDurationStr {
  1717. match := durRegex.FindStringSubmatch(offset)
  1718. if match != nil && len(match) == 3 {
  1719. dur := time.Minute
  1720. if match[2] == "h" {
  1721. dur = time.Hour
  1722. }
  1723. if match[2] == "d" {
  1724. dur = 24 * time.Hour
  1725. }
  1726. if match[2] == "s" {
  1727. dur = time.Second
  1728. }
  1729. num, _ := strconv.ParseInt(match[1], 10, 64)
  1730. window = window.Shift(-time.Duration(num) * dur)
  1731. }
  1732. }
  1733. }
  1734. opts := DefaultAggregateQueryOpts()
  1735. // parse remaining query parameters
  1736. namespace := r.URL.Query().Get("namespace")
  1737. cluster := r.URL.Query().Get("cluster")
  1738. labels := r.URL.Query().Get("labels")
  1739. annotations := r.URL.Query().Get("annotations")
  1740. podprefix := r.URL.Query().Get("podprefix")
  1741. field := r.URL.Query().Get("aggregation")
  1742. sharedNamespaces := r.URL.Query().Get("sharedNamespaces")
  1743. sharedLabelNames := r.URL.Query().Get("sharedLabelNames")
  1744. sharedLabelValues := r.URL.Query().Get("sharedLabelValues")
  1745. remote := r.URL.Query().Get("remote") != "false"
  1746. subfieldStr := r.URL.Query().Get("aggregationSubfield")
  1747. subfields := []string{}
  1748. if len(subfieldStr) > 0 {
  1749. s := strings.Split(r.URL.Query().Get("aggregationSubfield"), ",")
  1750. for _, rawLabel := range s {
  1751. subfields = append(subfields, promutil.SanitizeLabelName(rawLabel))
  1752. }
  1753. }
  1754. idleFlag := r.URL.Query().Get("allocateIdle")
  1755. if idleFlag == "default" {
  1756. c, _ := a.CloudProvider.GetConfig()
  1757. opts.AllocateIdle = (c.DefaultIdle == "true")
  1758. } else {
  1759. opts.AllocateIdle = (idleFlag == "true")
  1760. }
  1761. opts.Rate = r.URL.Query().Get("rate")
  1762. opts.ShareSplit = r.URL.Query().Get("sharedSplit")
  1763. // timeSeries == true maintains the time series dimension of the data,
  1764. // which by default gets summed over the entire interval
  1765. opts.IncludeTimeSeries = r.URL.Query().Get("timeSeries") == "true"
  1766. // efficiency has been deprecated in favor of a default to always send efficiency
  1767. opts.IncludeEfficiency = true
  1768. // TODO niko/caching rename "recomputeCache"
  1769. // disableCache, if set to "true", tells this function to recompute and
  1770. // cache the requested data
  1771. opts.DisableAggregateCostModelCache = r.URL.Query().Get("disableCache") == "true"
  1772. // clearCache, if set to "true", tells this function to flush the cache,
  1773. // then recompute and cache the requested data
  1774. opts.ClearCache = r.URL.Query().Get("clearCache") == "true"
  1775. // noCache avoids the cache altogether, both reading from and writing to
  1776. opts.NoCache = r.URL.Query().Get("noCache") == "true"
  1777. // noExpireCache should only be used by cache warming to set non-expiring caches
  1778. opts.NoExpireCache = false
  1779. // etl triggers ETL adapter
  1780. opts.UseETLAdapter = r.URL.Query().Get("etl") == "true"
  1781. // aggregation field is required
  1782. if field == "" {
  1783. WriteError(w, BadRequest("Missing aggregation field parameter"))
  1784. return
  1785. }
  1786. // aggregation subfield is required when aggregation field is "label"
  1787. if (field == "label" || field == "annotation") && len(subfields) == 0 {
  1788. WriteError(w, BadRequest("Missing aggregation subfield parameter"))
  1789. return
  1790. }
  1791. // enforce one of the available rate options
  1792. if opts.Rate != "" && opts.Rate != "hourly" && opts.Rate != "daily" && opts.Rate != "monthly" {
  1793. WriteError(w, BadRequest("Rate parameter only supports: hourly, daily, monthly or empty"))
  1794. return
  1795. }
  1796. // parse cost data filters
  1797. // namespace and cluster are exact-string-matches
  1798. // labels are expected to be comma-separated and to take the form key=value
  1799. // e.g. app=cost-analyzer,app.kubernetes.io/instance=kubecost
  1800. opts.Filters = map[string]string{
  1801. "namespace": namespace,
  1802. "cluster": cluster,
  1803. "labels": labels,
  1804. "annotations": annotations,
  1805. "podprefix": podprefix,
  1806. }
  1807. // parse shared resources
  1808. sn := []string{}
  1809. sln := []string{}
  1810. slv := []string{}
  1811. if sharedNamespaces != "" {
  1812. sn = strings.Split(sharedNamespaces, ",")
  1813. }
  1814. if sharedLabelNames != "" {
  1815. sln = strings.Split(sharedLabelNames, ",")
  1816. slv = strings.Split(sharedLabelValues, ",")
  1817. if len(sln) != len(slv) || slv[0] == "" {
  1818. WriteError(w, BadRequest("Supply exactly one shared label value per shared label name"))
  1819. return
  1820. }
  1821. }
  1822. if len(sn) > 0 || len(sln) > 0 {
  1823. opts.SharedResources = NewSharedResourceInfo(true, sn, sln, slv)
  1824. }
  1825. // enable remote if it is available and not disabled
  1826. opts.RemoteEnabled = remote && env.IsRemoteEnabled()
  1827. promClient := a.GetPrometheusClient(remote)
  1828. var data map[string]*Aggregation
  1829. var message string
  1830. data, message, err = a.AggAPI.ComputeAggregateCostModel(promClient, window, field, subfields, opts)
  1831. // Find any warnings in http request context
  1832. warning, _ := httputil.GetWarning(r)
  1833. if err != nil {
  1834. if emptyErr, ok := err.(*EmptyDataError); ok {
  1835. if warning == "" {
  1836. w.Write(WrapData(map[string]interface{}{}, emptyErr))
  1837. } else {
  1838. w.Write(WrapDataWithWarning(map[string]interface{}{}, emptyErr, warning))
  1839. }
  1840. return
  1841. }
  1842. if boundaryErr, ok := err.(*opencost.BoundaryError); ok {
  1843. if window.Start() != nil && window.Start().After(time.Now().Add(-90*24*time.Hour)) {
  1844. // Asking for data within a 90 day period: it will be available
  1845. // after the pipeline builds
  1846. msg := "Data will be available after ETL is built"
  1847. match := percentRegex.FindStringSubmatch(boundaryErr.Message)
  1848. if len(match) > 1 {
  1849. completionPct, err := strconv.ParseFloat(match[1], 64)
  1850. if err == nil {
  1851. msg = fmt.Sprintf("%s (%.1f%% complete)", msg, completionPct)
  1852. }
  1853. }
  1854. WriteError(w, InternalServerError(msg))
  1855. } else {
  1856. // Boundary error outside of 90 day period; may not be available
  1857. WriteError(w, InternalServerError(boundaryErr.Error()))
  1858. }
  1859. return
  1860. }
  1861. errStr := fmt.Sprintf("error computing aggregate cost model: %s", err)
  1862. WriteError(w, InternalServerError(errStr))
  1863. return
  1864. }
  1865. if warning == "" {
  1866. w.Write(WrapDataWithMessage(data, nil, message))
  1867. } else {
  1868. w.Write(WrapDataWithMessageAndWarning(data, nil, message, warning))
  1869. }
  1870. }
  1871. // ParseAggregationProperties attempts to parse and return aggregation properties
  1872. // encoded under the given key. If none exist, or if parsing fails, an error
  1873. // is returned with empty AllocationProperties.
  1874. func ParseAggregationProperties(aggregations []string) ([]string, error) {
  1875. aggregateBy := []string{}
  1876. // In case of no aggregation option, aggregate to the container, with a key Cluster/Node/Namespace/Pod/Container
  1877. if len(aggregations) == 0 {
  1878. aggregateBy = []string{
  1879. opencost.AllocationClusterProp,
  1880. opencost.AllocationNodeProp,
  1881. opencost.AllocationNamespaceProp,
  1882. opencost.AllocationPodProp,
  1883. opencost.AllocationContainerProp,
  1884. }
  1885. } else if len(aggregations) == 1 && aggregations[0] == "all" {
  1886. aggregateBy = []string{}
  1887. } else {
  1888. for _, agg := range aggregations {
  1889. aggregate := strings.TrimSpace(agg)
  1890. if aggregate != "" {
  1891. if prop, err := opencost.ParseProperty(aggregate); err == nil {
  1892. aggregateBy = append(aggregateBy, string(prop))
  1893. } else if strings.HasPrefix(aggregate, "label:") {
  1894. aggregateBy = append(aggregateBy, aggregate)
  1895. } else if strings.HasPrefix(aggregate, "annotation:") {
  1896. aggregateBy = append(aggregateBy, aggregate)
  1897. }
  1898. }
  1899. }
  1900. }
  1901. return aggregateBy, nil
  1902. }
  1903. func (a *Accesses) ComputeAllocationHandlerSummary(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
  1904. w.Header().Set("Content-Type", "application/json")
  1905. qp := httputil.NewQueryParams(r.URL.Query())
  1906. // Window is a required field describing the window of time over which to
  1907. // compute allocation data.
  1908. window, err := opencost.ParseWindowWithOffset(qp.Get("window", ""), env.GetParsedUTCOffset())
  1909. if err != nil {
  1910. http.Error(w, fmt.Sprintf("Invalid 'window' parameter: %s", err), http.StatusBadRequest)
  1911. }
  1912. // Step is an optional parameter that defines the duration per-set, i.e.
  1913. // the window for an AllocationSet, of the AllocationSetRange to be
  1914. // computed. Defaults to the window size, making one set.
  1915. step := qp.GetDuration("step", window.Duration())
  1916. // Resolution is an optional parameter, defaulting to the configured ETL
  1917. // resolution.
  1918. resolution := qp.GetDuration("resolution", env.GetETLResolution())
  1919. // Aggregation is a required comma-separated list of fields by which to
  1920. // aggregate results. Some fields allow a sub-field, which is distinguished
  1921. // with a colon; e.g. "label:app".
  1922. // Examples: "namespace", "namespace,label:app"
  1923. aggregations := qp.GetList("aggregate", ",")
  1924. aggregateBy, err := ParseAggregationProperties(aggregations)
  1925. if err != nil {
  1926. http.Error(w, fmt.Sprintf("Invalid 'aggregate' parameter: %s", err), http.StatusBadRequest)
  1927. }
  1928. // Accumulate is an optional parameter, defaulting to false, which if true
  1929. // sums each Set in the Range, producing one Set.
  1930. accumulate := qp.GetBool("accumulate", false)
  1931. // Query for AllocationSets in increments of the given step duration,
  1932. // appending each to the AllocationSetRange.
  1933. asr := opencost.NewAllocationSetRange()
  1934. stepStart := *window.Start()
  1935. for window.End().After(stepStart) {
  1936. stepEnd := stepStart.Add(step)
  1937. stepWindow := opencost.NewWindow(&stepStart, &stepEnd)
  1938. as, err := a.Model.ComputeAllocation(*stepWindow.Start(), *stepWindow.End(), resolution)
  1939. if err != nil {
  1940. WriteError(w, InternalServerError(err.Error()))
  1941. return
  1942. }
  1943. asr.Append(as)
  1944. stepStart = stepEnd
  1945. }
  1946. // Aggregate, if requested
  1947. if len(aggregateBy) > 0 {
  1948. err = asr.AggregateBy(aggregateBy, nil)
  1949. if err != nil {
  1950. WriteError(w, InternalServerError(err.Error()))
  1951. return
  1952. }
  1953. }
  1954. // Accumulate, if requested
  1955. if accumulate {
  1956. asr, err = asr.Accumulate(opencost.AccumulateOptionAll)
  1957. if err != nil {
  1958. WriteError(w, InternalServerError(err.Error()))
  1959. return
  1960. }
  1961. }
  1962. sasl := []*opencost.SummaryAllocationSet{}
  1963. for _, as := range asr.Slice() {
  1964. sas := opencost.NewSummaryAllocationSet(as, nil, nil, false, false)
  1965. sasl = append(sasl, sas)
  1966. }
  1967. sasr := opencost.NewSummaryAllocationSetRange(sasl...)
  1968. w.Write(WrapData(sasr, nil))
  1969. }
  1970. // ComputeAllocationHandler computes an AllocationSetRange from the CostModel.
  1971. func (a *Accesses) ComputeAllocationHandler(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
  1972. w.Header().Set("Content-Type", "application/json")
  1973. qp := httputil.NewQueryParams(r.URL.Query())
  1974. // Window is a required field describing the window of time over which to
  1975. // compute allocation data.
  1976. window, err := opencost.ParseWindowWithOffset(qp.Get("window", ""), env.GetParsedUTCOffset())
  1977. if err != nil {
  1978. http.Error(w, fmt.Sprintf("Invalid 'window' parameter: %s", err), http.StatusBadRequest)
  1979. }
  1980. // Resolution is an optional parameter, defaulting to the configured ETL
  1981. // resolution.
  1982. resolution := qp.GetDuration("resolution", env.GetETLResolution())
  1983. // Step is an optional parameter that defines the duration per-set, i.e.
  1984. // the window for an AllocationSet, of the AllocationSetRange to be
  1985. // computed. Defaults to the window size, making one set.
  1986. step := qp.GetDuration("step", window.Duration())
  1987. // Aggregation is an optional comma-separated list of fields by which to
  1988. // aggregate results. Some fields allow a sub-field, which is distinguished
  1989. // with a colon; e.g. "label:app".
  1990. // Examples: "namespace", "namespace,label:app"
  1991. aggregations := qp.GetList("aggregate", ",")
  1992. aggregateBy, err := ParseAggregationProperties(aggregations)
  1993. if err != nil {
  1994. http.Error(w, fmt.Sprintf("Invalid 'aggregate' parameter: %s", err), http.StatusBadRequest)
  1995. }
  1996. // IncludeIdle, if true, uses Asset data to incorporate Idle Allocation
  1997. includeIdle := qp.GetBool("includeIdle", false)
  1998. // Accumulate is an optional parameter, defaulting to false, which if true
  1999. // sums each Set in the Range, producing one Set.
  2000. accumulate := qp.GetBool("accumulate", false)
  2001. // Accumulate is an optional parameter that accumulates an AllocationSetRange
  2002. // by the resolution of the given time duration.
  2003. // Defaults to 0. If a value is not passed then the parameter is not used.
  2004. accumulateBy := opencost.AccumulateOption(qp.Get("accumulateBy", ""))
  2005. // if accumulateBy is not explicitly set, and accumulate is true, ensure result is accumulated
  2006. if accumulateBy == opencost.AccumulateOptionNone && accumulate {
  2007. accumulateBy = opencost.AccumulateOptionAll
  2008. }
  2009. // IdleByNode, if true, computes idle allocations at the node level.
  2010. // Otherwise it is computed at the cluster level. (Not relevant if idle
  2011. // is not included.)
  2012. idleByNode := qp.GetBool("idleByNode", false)
  2013. sharedLoadBalancer := qp.GetBool("sharelb", false)
  2014. // IncludeProportionalAssetResourceCosts, if true,
  2015. includeProportionalAssetResourceCosts := qp.GetBool("includeProportionalAssetResourceCosts", false)
  2016. // include aggregated labels/annotations if true
  2017. includeAggregatedMetadata := qp.GetBool("includeAggregatedMetadata", false)
  2018. asr, err := a.Model.QueryAllocation(window, resolution, step, aggregateBy, includeIdle, idleByNode, includeProportionalAssetResourceCosts, includeAggregatedMetadata, sharedLoadBalancer, accumulateBy)
  2019. if err != nil {
  2020. if strings.Contains(strings.ToLower(err.Error()), "bad request") {
  2021. WriteError(w, BadRequest(err.Error()))
  2022. } else {
  2023. WriteError(w, InternalServerError(err.Error()))
  2024. }
  2025. return
  2026. }
  2027. w.Write(WrapData(asr, nil))
  2028. }
  2029. // The below was transferred from a different package in order to maintain
  2030. // previous behavior. Ultimately, we should clean this up at some point.
  2031. // TODO move to util and/or standardize everything
  2032. type Error struct {
  2033. StatusCode int
  2034. Body string
  2035. }
  2036. func WriteError(w http.ResponseWriter, err Error) {
  2037. status := err.StatusCode
  2038. if status == 0 {
  2039. status = http.StatusInternalServerError
  2040. }
  2041. w.WriteHeader(status)
  2042. resp, _ := json.Marshal(&Response{
  2043. Code: status,
  2044. Message: fmt.Sprintf("Error: %s", err.Body),
  2045. })
  2046. w.Write(resp)
  2047. }
  2048. func BadRequest(message string) Error {
  2049. return Error{
  2050. StatusCode: http.StatusBadRequest,
  2051. Body: message,
  2052. }
  2053. }
  2054. func InternalServerError(message string) Error {
  2055. if message == "" {
  2056. message = "Internal Server Error"
  2057. }
  2058. return Error{
  2059. StatusCode: http.StatusInternalServerError,
  2060. Body: message,
  2061. }
  2062. }
  2063. func NotFound() Error {
  2064. return Error{
  2065. StatusCode: http.StatusNotFound,
  2066. Body: "Not Found",
  2067. }
  2068. }