|
@@ -3,11 +3,12 @@ package costmodel
|
|
|
import (
|
|
import (
|
|
|
"fmt"
|
|
"fmt"
|
|
|
"os"
|
|
"os"
|
|
|
|
|
+ "sync"
|
|
|
"time"
|
|
"time"
|
|
|
|
|
|
|
|
- costAnalyzerCloud "github.com/kubecost/cost-model/cloud"
|
|
|
|
|
- prometheusClient "github.com/prometheus/client_golang/api"
|
|
|
|
|
-
|
|
|
|
|
|
|
+ "github.com/kubecost/cost-model/cloud"
|
|
|
|
|
+ "github.com/kubecost/cost-model/util"
|
|
|
|
|
+ prometheus "github.com/prometheus/client_golang/api"
|
|
|
"k8s.io/klog"
|
|
"k8s.io/klog"
|
|
|
)
|
|
)
|
|
|
|
|
|
|
@@ -33,6 +34,257 @@ const (
|
|
|
) by (cluster_id) %s`
|
|
) by (cluster_id) %s`
|
|
|
)
|
|
)
|
|
|
|
|
|
|
|
|
|
+// TODO move this to a package-accessible helper
|
|
|
|
|
+type PromQueryContext struct {
|
|
|
|
|
+ client prometheus.Client
|
|
|
|
|
+ ec *util.ErrorCollector
|
|
|
|
|
+ wg *sync.WaitGroup
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+// TODO move this to a package-accessible helper function once dependencies are able to
|
|
|
|
|
+// be extricated from costmodel package (PromQueryResult -> Vector). Otherwise, circular deps.
|
|
|
|
|
+func AsyncPromQuery(query string, resultCh chan []*PromQueryResult, ctx PromQueryContext) {
|
|
|
|
|
+ if ctx.wg != nil {
|
|
|
|
|
+ defer ctx.wg.Done()
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ raw, promErr := Query(ctx.client, query)
|
|
|
|
|
+ ctx.ec.Report(promErr)
|
|
|
|
|
+
|
|
|
|
|
+ results, parseErr := NewQueryResults(raw)
|
|
|
|
|
+ ctx.ec.Report(parseErr)
|
|
|
|
|
+
|
|
|
|
|
+ resultCh <- results
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+// Costs represents cumulative and monthly cluster costs over a given duration. Costs
|
|
|
|
|
+// are broken down by cores, memory, and storage.
|
|
|
|
|
+type ClusterCosts struct {
|
|
|
|
|
+ Start *time.Time `json:"startTime"`
|
|
|
|
|
+ End *time.Time `json:"endTime"`
|
|
|
|
|
+ CPUCumulative float64 `json:"cpuCumulativeCost"`
|
|
|
|
|
+ CPUMonthly float64 `json:"cpuMonthlyCost"`
|
|
|
|
|
+ GPUCumulative float64 `json:"gpuCumulativeCost"`
|
|
|
|
|
+ GPUMonthly float64 `json:"gpuMonthlyCost"`
|
|
|
|
|
+ RAMCumulative float64 `json:"ramCumulativeCost"`
|
|
|
|
|
+ RAMMonthly float64 `json:"ramMonthlyCost"`
|
|
|
|
|
+ StorageCumulative float64 `json:"storageCumulativeCost"`
|
|
|
|
|
+ StorageMonthly float64 `json:"storageMonthlyCost"`
|
|
|
|
|
+ TotalCumulative float64 `json:"totalCumulativeCost"`
|
|
|
|
|
+ TotalMonthly float64 `json:"totalMonthlyCost"`
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+// NewClusterCostsFromCumulative takes cumulative cost data over a given time range, computes
|
|
|
|
|
+// the associated monthly rate data, and returns the Costs.
|
|
|
|
|
+func NewClusterCostsFromCumulative(cpu, gpu, ram, storage float64, window, offset string, dataHours float64) (*ClusterCosts, error) {
|
|
|
|
|
+ start, end, err := util.ParseTimeRange(window, offset)
|
|
|
|
|
+ if err != nil {
|
|
|
|
|
+ return nil, err
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ // If the number of hours is not given (i.e. is zero) compute one from the window and offset
|
|
|
|
|
+ if dataHours == 0 {
|
|
|
|
|
+ dataHours = end.Sub(*start).Hours()
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ // Do not allow zero-length windows to prevent divide-by-zero issues
|
|
|
|
|
+ if dataHours == 0 {
|
|
|
|
|
+ return nil, fmt.Errorf("illegal time range: window %s, offset %s", window, offset)
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ cc := &ClusterCosts{
|
|
|
|
|
+ Start: start,
|
|
|
|
|
+ End: end,
|
|
|
|
|
+ CPUCumulative: cpu,
|
|
|
|
|
+ GPUCumulative: gpu,
|
|
|
|
|
+ RAMCumulative: ram,
|
|
|
|
|
+ StorageCumulative: storage,
|
|
|
|
|
+ TotalCumulative: cpu + gpu + ram + storage,
|
|
|
|
|
+ CPUMonthly: cpu / dataHours * (util.HoursPerMonth),
|
|
|
|
|
+ GPUMonthly: gpu / dataHours * (util.HoursPerMonth),
|
|
|
|
|
+ RAMMonthly: ram / dataHours * (util.HoursPerMonth),
|
|
|
|
|
+ StorageMonthly: storage / dataHours * (util.HoursPerMonth),
|
|
|
|
|
+ }
|
|
|
|
|
+ cc.TotalMonthly = cc.CPUMonthly + cc.GPUMonthly + cc.RAMMonthly + cc.StorageMonthly
|
|
|
|
|
+
|
|
|
|
|
+ return cc, nil
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+// NewClusterCostsFromMonthly takes monthly-rate cost data over a given time range, computes
|
|
|
|
|
+// the associated cumulative cost data, and returns the Costs.
|
|
|
|
|
+func NewClusterCostsFromMonthly(cpuMonthly, gpuMonthly, ramMonthly, storageMonthly float64, window, offset string, dataHours float64) (*ClusterCosts, error) {
|
|
|
|
|
+ start, end, err := util.ParseTimeRange(window, offset)
|
|
|
|
|
+ if err != nil {
|
|
|
|
|
+ return nil, err
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ // If the number of hours is not given (i.e. is zero) compute one from the window and offset
|
|
|
|
|
+ if dataHours == 0 {
|
|
|
|
|
+ dataHours = end.Sub(*start).Hours()
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ // Do not allow zero-length windows to prevent divide-by-zero issues
|
|
|
|
|
+ if dataHours == 0 {
|
|
|
|
|
+ return nil, fmt.Errorf("illegal time range: window %s, offset %s", window, offset)
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ cc := &ClusterCosts{
|
|
|
|
|
+ Start: start,
|
|
|
|
|
+ End: end,
|
|
|
|
|
+ CPUMonthly: cpuMonthly,
|
|
|
|
|
+ GPUMonthly: gpuMonthly,
|
|
|
|
|
+ RAMMonthly: ramMonthly,
|
|
|
|
|
+ StorageMonthly: storageMonthly,
|
|
|
|
|
+ TotalMonthly: cpuMonthly + gpuMonthly + ramMonthly + storageMonthly,
|
|
|
|
|
+ CPUCumulative: cpuMonthly / util.HoursPerMonth * dataHours,
|
|
|
|
|
+ GPUCumulative: gpuMonthly / util.HoursPerMonth * dataHours,
|
|
|
|
|
+ RAMCumulative: ramMonthly / util.HoursPerMonth * dataHours,
|
|
|
|
|
+ StorageCumulative: storageMonthly / util.HoursPerMonth * dataHours,
|
|
|
|
|
+ }
|
|
|
|
|
+ cc.TotalCumulative = cc.CPUCumulative + cc.GPUCumulative + cc.RAMCumulative + cc.StorageCumulative
|
|
|
|
|
+
|
|
|
|
|
+ return cc, nil
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+// ComputeClusterCosts gives the cumulative and monthly-rate cluster costs over a window of time for all clusters.
|
|
|
|
|
+func ComputeClusterCosts(client prometheus.Client, provider cloud.Provider, window, offset string) (map[string]*ClusterCosts, error) {
|
|
|
|
|
+ // Compute number of minutes in the full interval, for use interpolating missed scrapes or scaling missing data
|
|
|
|
|
+ start, end, err := util.ParseTimeRange(window, offset)
|
|
|
|
|
+ if err != nil {
|
|
|
|
|
+ return nil, err
|
|
|
|
|
+ }
|
|
|
|
|
+ mins := end.Sub(*start).Minutes()
|
|
|
|
|
+
|
|
|
|
|
+ const fmtQueryDataCount = `max(count_over_time(kube_node_status_capacity_cpu_cores[%s:1m]%s))`
|
|
|
|
|
+
|
|
|
|
|
+ const fmtQueryTotalGPU = `sum(
|
|
|
|
|
+ sum_over_time(node_gpu_hourly_cost[%s:1m]%s) / 60
|
|
|
|
|
+ ) by (node, cluster_id)`
|
|
|
|
|
+
|
|
|
|
|
+ const fmtQueryTotalCPU = `sum(
|
|
|
|
|
+ sum(sum_over_time(kube_node_status_capacity_cpu_cores[%s:1m]%s)) by (node, cluster_id) *
|
|
|
|
|
+ avg(avg_over_time(node_cpu_hourly_cost[%s:1m]%s)) by (node, cluster_id) / 60
|
|
|
|
|
+ )`
|
|
|
|
|
+
|
|
|
|
|
+ const fmtQueryTotalRAM = `sum(
|
|
|
|
|
+ sum(sum_over_time(kube_node_status_capacity_memory_bytes[%s:1m]%s) / 1024 / 1024 / 1024) by (node, cluster_id) *
|
|
|
|
|
+ avg(avg_over_time(node_ram_hourly_cost[%s:1m]%s)) by (node, cluster_id) / 60
|
|
|
|
|
+ )`
|
|
|
|
|
+
|
|
|
|
|
+ const fmtQueryTotalStorage = `sum(
|
|
|
|
|
+ sum(sum_over_time(kube_persistentvolume_capacity_bytes[%s:1m]%s)) by (persistentvolume, cluster_id) / 1024 / 1024 / 1024 *
|
|
|
|
|
+ avg(avg_over_time(pv_hourly_cost[%s:1m]%s)) by (persistentvolume, cluster_id) / 60
|
|
|
|
|
+ )%s`
|
|
|
|
|
+
|
|
|
|
|
+ queryTotalLocalStorage := provider.GetLocalStorageQuery(window, offset, false)
|
|
|
|
|
+ if queryTotalLocalStorage != "" {
|
|
|
|
|
+ queryTotalLocalStorage = fmt.Sprintf(" + %s", queryTotalLocalStorage)
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ fmtOffset := ""
|
|
|
|
|
+ if offset != "" {
|
|
|
|
|
+ fmtOffset = fmt.Sprintf("offset %s", offset)
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ queryDataCount := fmt.Sprintf(fmtQueryDataCount, window, fmtOffset)
|
|
|
|
|
+ queryTotalGPU := fmt.Sprintf(fmtQueryTotalGPU, window, fmtOffset)
|
|
|
|
|
+ queryTotalCPU := fmt.Sprintf(fmtQueryTotalCPU, window, fmtOffset, window, fmtOffset)
|
|
|
|
|
+ queryTotalRAM := fmt.Sprintf(fmtQueryTotalRAM, window, fmtOffset, window, fmtOffset)
|
|
|
|
|
+ queryTotalStorage := fmt.Sprintf(fmtQueryTotalStorage, window, fmtOffset, window, fmtOffset, queryTotalLocalStorage)
|
|
|
|
|
+ numQueries := 5
|
|
|
|
|
+
|
|
|
|
|
+ klog.V(4).Infof("[Debug] queryDataCount: %s", queryDataCount)
|
|
|
|
|
+ klog.V(4).Infof("[Debug] queryTotalGPU: %s", queryTotalGPU)
|
|
|
|
|
+ klog.V(4).Infof("[Debug] queryTotalCPU: %s", queryTotalCPU)
|
|
|
|
|
+ klog.V(4).Infof("[Debug] queryTotalRAM: %s", queryTotalRAM)
|
|
|
|
|
+ klog.V(4).Infof("[Debug] queryTotalStorage: %s", queryTotalStorage)
|
|
|
|
|
+
|
|
|
|
|
+ // Submit queries to Prometheus asynchronously
|
|
|
|
|
+ var ec util.ErrorCollector
|
|
|
|
|
+ var wg sync.WaitGroup
|
|
|
|
|
+ ctx := PromQueryContext{client, &ec, &wg}
|
|
|
|
|
+ ctx.wg.Add(numQueries)
|
|
|
|
|
+
|
|
|
|
|
+ chDataCount := make(chan []*PromQueryResult, 1)
|
|
|
|
|
+ go AsyncPromQuery(queryDataCount, chDataCount, ctx)
|
|
|
|
|
+
|
|
|
|
|
+ chTotalGPU := make(chan []*PromQueryResult, 1)
|
|
|
|
|
+ go AsyncPromQuery(queryTotalGPU, chTotalGPU, ctx)
|
|
|
|
|
+
|
|
|
|
|
+ chTotalCPU := make(chan []*PromQueryResult, 1)
|
|
|
|
|
+ go AsyncPromQuery(queryTotalCPU, chTotalCPU, ctx)
|
|
|
|
|
+
|
|
|
|
|
+ chTotalRAM := make(chan []*PromQueryResult, 1)
|
|
|
|
|
+ go AsyncPromQuery(queryTotalRAM, chTotalRAM, ctx)
|
|
|
|
|
+
|
|
|
|
|
+ chTotalStorage := make(chan []*PromQueryResult, 1)
|
|
|
|
|
+ go AsyncPromQuery(queryTotalStorage, chTotalStorage, ctx)
|
|
|
|
|
+
|
|
|
|
|
+ // After queries complete, retrieve results
|
|
|
|
|
+ wg.Wait()
|
|
|
|
|
+
|
|
|
|
|
+ resultsDataCount := <-chDataCount
|
|
|
|
|
+ close(chDataCount)
|
|
|
|
|
+
|
|
|
|
|
+ resultsTotalGPU := <-chTotalGPU
|
|
|
|
|
+ close(chTotalGPU)
|
|
|
|
|
+
|
|
|
|
|
+ resultsTotalCPU := <-chTotalCPU
|
|
|
|
|
+ close(chTotalCPU)
|
|
|
|
|
+
|
|
|
|
|
+ resultsTotalRAM := <-chTotalRAM
|
|
|
|
|
+ close(chTotalRAM)
|
|
|
|
|
+
|
|
|
|
|
+ resultsTotalStorage := <-chTotalStorage
|
|
|
|
|
+ close(chTotalStorage)
|
|
|
|
|
+
|
|
|
|
|
+ dataMins := mins
|
|
|
|
|
+ if len(resultsDataCount) > 0 && len(resultsDataCount[0].Values) > 0 {
|
|
|
|
|
+ dataMins = resultsDataCount[0].Values[0].Value
|
|
|
|
|
+ } else {
|
|
|
|
|
+ klog.V(3).Infof("[Warning] cluster cost data count returned no results")
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ // Intermediate structure storing mapping of [clusterID][type ∈ {cpu, ram, storage, total}]=cost
|
|
|
|
|
+ costData := make(map[string]map[string]float64)
|
|
|
|
|
+ defaultClusterID := os.Getenv(clusterIDKey)
|
|
|
|
|
+
|
|
|
|
|
+ // Helper function to iterate over Prom query results, parsing the raw values into
|
|
|
|
|
+ // the intermediate costData structure.
|
|
|
|
|
+ setCostsFromResults := func(costData map[string]map[string]float64, results []*PromQueryResult, name string) {
|
|
|
|
|
+ for _, result := range results {
|
|
|
|
|
+ clusterID, _ := result.GetString("cluster_id")
|
|
|
|
|
+ if clusterID == "" {
|
|
|
|
|
+ clusterID = defaultClusterID
|
|
|
|
|
+ }
|
|
|
|
|
+ if _, ok := costData[clusterID]; !ok {
|
|
|
|
|
+ costData[clusterID] = map[string]float64{}
|
|
|
|
|
+ }
|
|
|
|
|
+ if len(result.Values) > 0 {
|
|
|
|
|
+ costData[clusterID][name] += result.Values[0].Value
|
|
|
|
|
+ costData[clusterID]["total"] += result.Values[0].Value
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+ setCostsFromResults(costData, resultsTotalGPU, "gpu")
|
|
|
|
|
+ setCostsFromResults(costData, resultsTotalCPU, "cpu")
|
|
|
|
|
+ setCostsFromResults(costData, resultsTotalRAM, "ram")
|
|
|
|
|
+ setCostsFromResults(costData, resultsTotalStorage, "storage")
|
|
|
|
|
+
|
|
|
|
|
+ // Convert intermediate structure to Costs instances
|
|
|
|
|
+ costsByCluster := map[string]*ClusterCosts{}
|
|
|
|
|
+ for id, cd := range costData {
|
|
|
|
|
+ costs, err := NewClusterCostsFromCumulative(cd["cpu"], cd["gpu"], cd["ram"], cd["storage"], window, offset, dataMins/util.MinsPerHour)
|
|
|
|
|
+ if err != nil {
|
|
|
|
|
+ klog.V(3).Infof("[Warning] Failed to parse cluster costs on %s (%s) from cumulative data: %+v", window, offset, cd)
|
|
|
|
|
+ return nil, err
|
|
|
|
|
+ }
|
|
|
|
|
+ costsByCluster[id] = costs
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ return costsByCluster, nil
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
type Totals struct {
|
|
type Totals struct {
|
|
|
TotalCost [][]string `json:"totalcost"`
|
|
TotalCost [][]string `json:"totalcost"`
|
|
|
CPUCost [][]string `json:"cpucost"`
|
|
CPUCost [][]string `json:"cpucost"`
|
|
@@ -103,34 +355,33 @@ func resultToTotal(qr interface{}) (map[string][][]string, error) {
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
// ClusterCostsForAllClusters gives the cluster costs averaged over a window of time for all clusters.
|
|
// ClusterCostsForAllClusters gives the cluster costs averaged over a window of time for all clusters.
|
|
|
-func ClusterCostsForAllClusters(cli prometheusClient.Client, cloud costAnalyzerCloud.Provider, windowString, offset string) (map[string]*Totals, error) {
|
|
|
|
|
-
|
|
|
|
|
- if offset != "" {
|
|
|
|
|
- offset = fmt.Sprintf("offset %s", offset)
|
|
|
|
|
- }
|
|
|
|
|
-
|
|
|
|
|
- localStorageQuery, err := cloud.GetLocalStorageQuery(offset)
|
|
|
|
|
- if err != nil {
|
|
|
|
|
- return nil, err
|
|
|
|
|
- }
|
|
|
|
|
|
|
+func ClusterCostsForAllClusters(cli prometheus.Client, provider cloud.Provider, window, offset string) (map[string]*Totals, error) {
|
|
|
|
|
+ localStorageQuery := provider.GetLocalStorageQuery(window, offset, true)
|
|
|
if localStorageQuery != "" {
|
|
if localStorageQuery != "" {
|
|
|
localStorageQuery = fmt.Sprintf("+ %s", localStorageQuery)
|
|
localStorageQuery = fmt.Sprintf("+ %s", localStorageQuery)
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
- qCores := fmt.Sprintf(queryClusterCores, windowString, offset, windowString, offset, windowString, offset)
|
|
|
|
|
- qRAM := fmt.Sprintf(queryClusterRAM, windowString, offset, windowString, offset)
|
|
|
|
|
- qStorage := fmt.Sprintf(queryStorage, windowString, offset, windowString, offset, localStorageQuery)
|
|
|
|
|
|
|
+ fmtOffset := ""
|
|
|
|
|
+ if offset != "" {
|
|
|
|
|
+ fmtOffset = fmt.Sprintf("offset %s", offset)
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ qCores := fmt.Sprintf(queryClusterCores, window, fmtOffset, window, fmtOffset, window, fmtOffset)
|
|
|
|
|
+ qRAM := fmt.Sprintf(queryClusterRAM, window, fmtOffset, window, fmtOffset)
|
|
|
|
|
+ qStorage := fmt.Sprintf(queryStorage, window, fmtOffset, window, fmtOffset, localStorageQuery)
|
|
|
|
|
|
|
|
klog.V(4).Infof("Running query %s", qCores)
|
|
klog.V(4).Infof("Running query %s", qCores)
|
|
|
resultClusterCores, err := Query(cli, qCores)
|
|
resultClusterCores, err := Query(cli, qCores)
|
|
|
if err != nil {
|
|
if err != nil {
|
|
|
return nil, fmt.Errorf("Error for query %s: %s", qCores, err.Error())
|
|
return nil, fmt.Errorf("Error for query %s: %s", qCores, err.Error())
|
|
|
}
|
|
}
|
|
|
|
|
+
|
|
|
klog.V(4).Infof("Running query %s", qRAM)
|
|
klog.V(4).Infof("Running query %s", qRAM)
|
|
|
resultClusterRAM, err := Query(cli, qRAM)
|
|
resultClusterRAM, err := Query(cli, qRAM)
|
|
|
if err != nil {
|
|
if err != nil {
|
|
|
return nil, fmt.Errorf("Error for query %s: %s", qRAM, err.Error())
|
|
return nil, fmt.Errorf("Error for query %s: %s", qRAM, err.Error())
|
|
|
}
|
|
}
|
|
|
|
|
+
|
|
|
klog.V(4).Infof("Running query %s", qRAM)
|
|
klog.V(4).Infof("Running query %s", qRAM)
|
|
|
resultStorage, err := Query(cli, qStorage)
|
|
resultStorage, err := Query(cli, qStorage)
|
|
|
if err != nil {
|
|
if err != nil {
|
|
@@ -175,25 +426,23 @@ func ClusterCostsForAllClusters(cli prometheusClient.Client, cloud costAnalyzerC
|
|
|
return toReturn, nil
|
|
return toReturn, nil
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
-// ClusterCosts gives the current full cluster costs averaged over a window of time.
|
|
|
|
|
-func ClusterCosts(cli prometheusClient.Client, cloud costAnalyzerCloud.Provider, windowString, offset string) (*Totals, error) {
|
|
|
|
|
-
|
|
|
|
|
|
|
+// AverageClusterTotals gives the current full cluster costs averaged over a window of time.
|
|
|
|
|
+// Used to be ClutserCosts, but has been deprecated for that use.
|
|
|
|
|
+func AverageClusterTotals(cli prometheus.Client, provider cloud.Provider, windowString, offset string) (*Totals, error) {
|
|
|
// turn offsets of the format "[0-9+]h" into the format "offset [0-9+]h" for use in query templatess
|
|
// turn offsets of the format "[0-9+]h" into the format "offset [0-9+]h" for use in query templatess
|
|
|
|
|
+ fmtOffset := ""
|
|
|
if offset != "" {
|
|
if offset != "" {
|
|
|
- offset = fmt.Sprintf("offset %s", offset)
|
|
|
|
|
|
|
+ fmtOffset = fmt.Sprintf("offset %s", offset)
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
- localStorageQuery, err := cloud.GetLocalStorageQuery(offset)
|
|
|
|
|
- if err != nil {
|
|
|
|
|
- return nil, err
|
|
|
|
|
- }
|
|
|
|
|
|
|
+ localStorageQuery := provider.GetLocalStorageQuery(windowString, offset, true)
|
|
|
if localStorageQuery != "" {
|
|
if localStorageQuery != "" {
|
|
|
localStorageQuery = fmt.Sprintf("+ %s", localStorageQuery)
|
|
localStorageQuery = fmt.Sprintf("+ %s", localStorageQuery)
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
- qCores := fmt.Sprintf(queryClusterCores, windowString, offset, windowString, offset, windowString, offset)
|
|
|
|
|
- qRAM := fmt.Sprintf(queryClusterRAM, windowString, offset, windowString, offset)
|
|
|
|
|
- qStorage := fmt.Sprintf(queryStorage, windowString, offset, windowString, offset, localStorageQuery)
|
|
|
|
|
|
|
+ qCores := fmt.Sprintf(queryClusterCores, windowString, fmtOffset, windowString, fmtOffset, windowString, fmtOffset)
|
|
|
|
|
+ qRAM := fmt.Sprintf(queryClusterRAM, windowString, fmtOffset, windowString, fmtOffset)
|
|
|
|
|
+ qStorage := fmt.Sprintf(queryStorage, windowString, fmtOffset, windowString, fmtOffset, localStorageQuery)
|
|
|
qTotal := fmt.Sprintf(queryTotal, localStorageQuery)
|
|
qTotal := fmt.Sprintf(queryTotal, localStorageQuery)
|
|
|
|
|
|
|
|
resultClusterCores, err := Query(cli, qCores)
|
|
resultClusterCores, err := Query(cli, qCores)
|
|
@@ -246,12 +495,8 @@ func ClusterCosts(cli prometheusClient.Client, cloud costAnalyzerCloud.Provider,
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
// ClusterCostsOverTime gives the full cluster costs over time
|
|
// ClusterCostsOverTime gives the full cluster costs over time
|
|
|
-func ClusterCostsOverTime(cli prometheusClient.Client, cloud costAnalyzerCloud.Provider, startString, endString, windowString, offset string) (*Totals, error) {
|
|
|
|
|
-
|
|
|
|
|
- localStorageQuery, err := cloud.GetLocalStorageQuery(offset)
|
|
|
|
|
- if err != nil {
|
|
|
|
|
- return nil, err
|
|
|
|
|
- }
|
|
|
|
|
|
|
+func ClusterCostsOverTime(cli prometheus.Client, provider cloud.Provider, startString, endString, windowString, offset string) (*Totals, error) {
|
|
|
|
|
+ localStorageQuery := provider.GetLocalStorageQuery(windowString, offset, true)
|
|
|
if localStorageQuery != "" {
|
|
if localStorageQuery != "" {
|
|
|
localStorageQuery = fmt.Sprintf("+ %s", localStorageQuery)
|
|
localStorageQuery = fmt.Sprintf("+ %s", localStorageQuery)
|
|
|
}
|
|
}
|