Просмотр исходного кода

Merge branch 'develop' into dwbrown2-patch-9

Signed-off-by: Webb Brown <298359+dwbrown2@users.noreply.github.com>
Webb Brown 3 лет назад
Родитель
Сommit
a30a3aa651
100 измененных файлов с 2478 добавлено и 553 удалено
  1. 1 1
      .github/PULL_REQUEST_TEMPLATE.md
  2. 7 7
      CONTRIBUTING.md
  3. 2 2
      Dockerfile
  4. 16 0
      MAINTAINERS.md
  5. 2 1
      README.md
  6. 2 2
      cmd/costmodel/main.go
  7. 1 1
      deploying-as-a-pod.md
  8. 2 2
      go.mod
  9. 2 2
      go.sum
  10. 25 18
      pkg/cloud/awsprovider.go
  11. 17 25
      pkg/cloud/azureprovider.go
  12. 3 5
      pkg/cloud/csvprovider.go
  13. 3 3
      pkg/cloud/customprovider.go
  14. 18 13
      pkg/cloud/gcpprovider.go
  15. 24 19
      pkg/cloud/provider.go
  16. 4 4
      pkg/cloud/providerconfig.go
  17. 2 2
      pkg/clustercache/clustercache.go
  18. 4 4
      pkg/clustercache/clusterexporter.go
  19. 3 3
      pkg/clustercache/clusterimporter.go
  20. 1 1
      pkg/clustercache/watchcontroller.go
  21. 12 12
      pkg/cmd/agent/agent.go
  22. 3 3
      pkg/cmd/commands.go
  23. 5 5
      pkg/cmd/costmodel/costmodel.go
  24. 3 3
      pkg/config/configfile.go
  25. 2 2
      pkg/config/configmanager.go
  26. 28 24
      pkg/costmodel/aggregation.go
  27. 1 1
      pkg/costmodel/aggregation_test.go
  28. 24 22
      pkg/costmodel/allocation.go
  29. 34 12
      pkg/costmodel/cluster.go
  30. 4 4
      pkg/costmodel/cluster_helpers.go
  31. 4 4
      pkg/costmodel/cluster_helpers_test.go
  32. 7 7
      pkg/costmodel/clusterinfo.go
  33. 12 5
      pkg/costmodel/clusters/clustermap.go
  34. 2 2
      pkg/costmodel/containerkeys.go
  35. 15 11
      pkg/costmodel/costmodel.go
  36. 1 1
      pkg/costmodel/intervals.go
  37. 1 1
      pkg/costmodel/intervals_test.go
  38. 2 2
      pkg/costmodel/key.go
  39. 14 14
      pkg/costmodel/metrics.go
  40. 5 5
      pkg/costmodel/networkcosts.go
  41. 6 6
      pkg/costmodel/promparsers.go
  42. 20 20
      pkg/costmodel/router.go
  43. 2 2
      pkg/costmodel/settings.go
  44. 5 5
      pkg/costmodel/sql.go
  45. 5 4
      pkg/env/costmodelenv.go
  46. 1 1
      pkg/env/env.go
  47. 30 28
      pkg/kubecost/allocation.go
  48. 86 37
      pkg/kubecost/allocation_test.go
  49. 12 1
      pkg/kubecost/allocationfilter.go
  50. 165 0
      pkg/kubecost/allocationfilter_test.go
  51. 2 2
      pkg/kubecost/allocationprops.go
  52. 16 6
      pkg/kubecost/asset.go
  53. 56 1
      pkg/kubecost/asset_test.go
  54. 4 1
      pkg/kubecost/asset_unmarshal.go
  55. 1 1
      pkg/kubecost/asset_unmarshal_test.go
  56. 363 0
      pkg/kubecost/audit.go
  57. 15 1
      pkg/kubecost/bingen.go
  58. 2 2
      pkg/kubecost/config.go
  59. 1 1
      pkg/kubecost/config_test.go
  60. 90 0
      pkg/kubecost/etlrange.go
  61. 15 0
      pkg/kubecost/etlset.go
  62. 1 1
      pkg/kubecost/json.go
  63. 1136 65
      pkg/kubecost/kubecost_codecs.go
  64. 1 1
      pkg/kubecost/query.go
  65. 22 23
      pkg/kubecost/summaryallocation.go
  66. 1 1
      pkg/kubecost/summaryallocation_test.go
  67. 4 3
      pkg/kubecost/totals.go
  68. 26 15
      pkg/kubecost/window.go
  69. 10 1
      pkg/kubecost/window_test.go
  70. 2 2
      pkg/metrics/deploymentmetrics.go
  71. 1 1
      pkg/metrics/jobmetrics.go
  72. 2 2
      pkg/metrics/kubemetrics.go
  73. 2 2
      pkg/metrics/metricsconfig.go
  74. 2 2
      pkg/metrics/namespacemetrics.go
  75. 3 3
      pkg/metrics/nodemetrics.go
  76. 2 2
      pkg/metrics/podlabelmetrics.go
  77. 3 3
      pkg/metrics/podmetrics.go
  78. 1 1
      pkg/metrics/pvcmetrics.go
  79. 1 1
      pkg/metrics/pvmetrics.go
  80. 2 2
      pkg/metrics/servicemetrics.go
  81. 2 2
      pkg/metrics/statefulsetmetrics.go
  82. 2 2
      pkg/prom/diagnostics.go
  83. 1 1
      pkg/prom/error.go
  84. 1 1
      pkg/prom/metrics.go
  85. 5 5
      pkg/prom/prom.go
  86. 9 16
      pkg/prom/query.go
  87. 2 2
      pkg/prom/ratelimitedclient_test.go
  88. 2 2
      pkg/prom/result.go
  89. 1 1
      pkg/prom/validate.go
  90. 3 3
      pkg/services/clusters/clustermanager.go
  91. 2 2
      pkg/services/clusters/clustersendpoints.go
  92. 2 2
      pkg/services/clusterservice.go
  93. 1 1
      pkg/services/services.go
  94. 1 1
      pkg/storage/azurestorage.go
  95. 1 1
      pkg/storage/filestorage.go
  96. 1 1
      pkg/storage/gcsstorage.go
  97. 1 1
      pkg/storage/s3storage.go
  98. 1 1
      pkg/storage/storagetypes.go
  99. 1 1
      pkg/storage/storagetypes_test.go
  100. 2 2
      pkg/thanos/thanos.go

+ 1 - 1
.github/PULL_REQUEST_TEMPLATE.md

@@ -16,5 +16,5 @@
 ## Does this PR require changes to documentation?
 * 
 
-## Have you labeled this PR and its corresponding Issue as "next release" if it should be part of the next Kubecost release? If not, why not?
+## Have you labeled this PR and its corresponding Issue as "next release" if it should be part of the next Opencost release? If not, why not?
 * 

+ 7 - 7
CONTRIBUTING.md

@@ -2,7 +2,7 @@
 
 Thanks for your help improving the OpenCost project! There are many ways to contribute to the project, including the following:
 
-* contributing or providing feedback on the [OpenCost Spec](https://github.com/kubecost/opencost/tree/develop/spec)
+* contributing or providing feedback on the [OpenCost Spec](https://github.com/opencost/opencost/tree/develop/spec)
 * contributing documentation 
 * joining the discussion on Slack or in [OpenCost community discussions](https://drive.google.com/drive/folders/1hXlcyFPePB7t3z6lyVzdxmdfrbzeT1Jz)
 * committing software via the workflow below
@@ -25,8 +25,8 @@ This repository's contribution workflow follows a typical open-source model:
 Follow these steps to build from source and deploy:
 
 1. `docker build --rm -f "Dockerfile" -t <repo>/kubecost-cost-model:<tag> .`
-2. Edit the [pulled image](https://github.com/kubecost/opencost/blob/master/kubernetes/deployment.yaml#L25) in the deployment.yaml to <repo>/kubecost-cost-model:<tag>
-3. Set [this environment variable](https://github.com/kubecost/opencost/blob/master/kubernetes/deployment.yaml#L33) to the address of your prometheus server
+2. Edit the [pulled image](https://github.com/opencost/opencost/blob/master/kubernetes/deployment.yaml#L25) in the deployment.yaml to <repo>/kubecost-cost-model:<tag>
+3. Set [this environment variable](https://github.com/opencost/opencost/blob/master/kubernetes/deployment.yaml#L33) to the address of your prometheus server
 4. `kubectl create namespace cost-model`
 5. `kubectl apply -f kubernetes/ --namespace cost-model`
 6. `kubectl port-forward --namespace cost-model service/cost-model 9003`
@@ -62,7 +62,7 @@ Example:
 export KUBECONFIG_PATH=~/.kube/config
 ```
 
-There are two more environement variabes recommended to run locally. These should be set as the default file location used is `/var/` which usually requires more permissions than kubecost actually needs to run. They do not need to match but keeping everything together can help cleanup when no longer needed.
+There are two more environment variables recommended to run locally. These should be set as the default file location used is `/var/` which usually requires more permissions than kubecost actually needs to run. They do not need to match but keeping everything together can help cleanup when no longer needed.
 
 ```bash
 ETL_PATH_PREFIX="/my/cool/path/kubecost/var/config"
@@ -80,7 +80,7 @@ ETL_PATH_PREFIX="/my/cool/path/kubecost/var/config" CONFIG_PATH="/my/cool/path/k
 To run these tests:
 
 - Make sure you have a kubeconfig that can point to your cluster, and have permissions to create/modify a namespace called "test"
-- Connect to your the prometheus kubecost emits to on localhost:9003:
+- Connect to your the Prometheus kubecost emits to on localhost:9003:
   `kubectl port-forward --namespace kubecost service/kubecost-prometheus-server 9003:80`
 - Temporary workaround: Copy the default.json file in this project at cloud/default.json to /models/default.json on the machine your test is running on. TODO: fix this and inject the cloud/default.json path into provider.go.
 - Navigate to cost-model/test
@@ -88,11 +88,11 @@ To run these tests:
 
 ## Certification of Origin
 
-By contributing to this project you certify that your contribution was created in whole or in part by you and that you have the right to submit it under the open source license indicated in the project. In other words, please confirm that you, as a contributor, have the legal right to make the contribution.
+By contributing to this project, you certify that your contribution was created in whole or in part by you and that you have the right to submit it under the open source license indicated in the project. In other words, please confirm that you, as a contributor, have the legal right to make the contribution.
 
 ## Committing
 
-Please write a commit message with Fixes Issue # if there is an outstanding issue that is fixed. It’s okay to submit a PR without a corresponding issue, just please try be detailed in the description about the problem you’re addressing.
+Please write a commit message with Fixes Issue # if there is an outstanding issue that is fixed. It’s okay to submit a PR without a corresponding issue; just please try to be detailed in the description of the problem you’re addressing.
 
 Please run go fmt on the project directory. Lint can be okay (for example, comments on exported functions are nice but not required on the server).
 

+ 2 - 2
Dockerfile

@@ -26,8 +26,8 @@ RUN set -e ;\
     GOOS=linux \
     go build -a -installsuffix cgo \
     -ldflags \
-    "-X github.com/kubecost/opencost/pkg/version.Version=${version} \
-     -X github.com/kubecost/opencost/pkg/version.GitCommit=${commit}" \
+    "-X github.com/opencost/opencost/pkg/version.Version=${version} \
+    -X github.com/opencost/opencost/pkg/version.GitCommit=${commit}" \
     -o /go/bin/app
 
 FROM alpine:latest

+ 16 - 0
MAINTAINERS.md

@@ -0,0 +1,16 @@
+# OpenCost Maintainers
+
+Official list of OpenCost Maintainers.
+
+Please keep the below list sorted in ascending order.
+
+## Maintainers
+
+| Maintainer | GitHub ID | Affiliation | Email |
+| --------------- | --------- | ----------- | ----------- |
+| Ajay Tripathy | @AjayTripathy | Kubecost | <Ajay@kubecost.com> |
+| Matt Bolt | @​mbolt35 | Kubecost | <matt@kubecost.com> |
+| Michael Dresser | @michaelmdresser | Kubecost | <michael@kubecost.com> |
+| Niko Kovacevic | @nikovacevic | Kubecost | <niko@kubecost.com> |
+| Sean Holcomb | @Sean-Holcomb | Kubecost | <Sean@kubecost.com> |
+| Thomas Evans | @teevans | Kubecost | <thomas@kubecost.com> |

+ 2 - 1
README.md

@@ -4,6 +4,7 @@
 
 OpenCost models give teams visibility into current and historical Kubernetes spend and resource allocation. These models provide cost transparency in Kubernetes environments that support multiple applications, teams, departments, etc.
 
+
 OpenCost was originally developed and open sourced by [Kubecost](https://kubecost.com). This project combines a [specification](/spec/) as well as a Golang implementation of these detailed requirements.
 
 ![OpenCost allocation UI](/allocation-drilldown.gif)
@@ -42,4 +43,4 @@ If you need any support or have any questions on contributing to the project, yo
 
 ## FAQ
 
-You can view [OpenCost documentation](https://www.opencost.io/docs/FAQ) for a list of commonly asked questions.  
+You can view [OpenCost documentation](https://www.opencost.io/docs/FAQ) for a list of commonly asked questions.

+ 2 - 2
cmd/costmodel/main.go

@@ -1,13 +1,13 @@
 package main
 
 import (
-	"github.com/kubecost/opencost/pkg/cmd"
+	"github.com/opencost/opencost/pkg/cmd"
 	"github.com/rs/zerolog/log"
 )
 
 func main() {
 	// runs the appropriate application mode using the default cost-model command
-	// see: github.com/kubecost/opencost/pkg/cmd package for details
+	// see: github.com/opencost/opencost/pkg/cmd package for details
 	if err := cmd.Execute(nil); err != nil {
 		log.Fatal().Err(err)
 	}

+ 1 - 1
deploying-as-a-pod.md

@@ -4,7 +4,7 @@ See this page for all [Kubecost install options](http://docs.kubecost.com/instal
 
 If you would like to deploy the cost model (w/o dashboards) directly a pod on your cluster, complete the steps listed below.
 
-1. Set [this environment variable](https://github.com/kubecost/opencost/blob/c211fbc1244a9da9667c7180a9e4c7f988d7978a/kubernetes/deployment.yaml#L33) to the address of your prometheus server
+1. Set [this environment variable](https://github.com/opencost/opencost/blob/c211fbc1244a9da9667c7180a9e4c7f988d7978a/kubernetes/deployment.yaml#L33) to the address of your prometheus server
 2. `kubectl create namespace cost-model`
 3. `kubectl apply -f kubernetes/ --namespace cost-model`
 4. `kubectl port-forward --namespace cost-model service/cost-model 9003`

+ 2 - 2
go.mod

@@ -1,4 +1,4 @@
-module github.com/kubecost/opencost
+module github.com/opencost/opencost
 
 replace github.com/golang/lint => golang.org/x/lint v0.0.0-20180702182130-06c8688daad7
 
@@ -29,7 +29,7 @@ require (
 	github.com/json-iterator/go v1.1.12
 	github.com/jszwec/csvutil v1.2.1
 	github.com/julienschmidt/httprouter v1.3.0
-	github.com/kubecost/events v0.0.4
+	github.com/kubecost/events v0.0.6
 	github.com/lib/pq v1.2.0
 	github.com/microcosm-cc/bluemonday v1.0.16
 	github.com/minio/minio-go/v7 v7.0.15

+ 2 - 2
go.sum

@@ -405,8 +405,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
 github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
 github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
 github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
-github.com/kubecost/events v0.0.4 h1:iQJyG8q+4OjzGZTbLD1DOhT4NyO1/bzQn8HcasKOUzQ=
-github.com/kubecost/events v0.0.4/go.mod h1:i3DyCVatehxq6tAbvBrARuafjkX2DECPk9OWxiaRIhY=
+github.com/kubecost/events v0.0.6 h1:ql1ZUnLfheD2hHm/otsHZ8BOYt87rY5e9sPFHges4ec=
+github.com/kubecost/events v0.0.6/go.mod h1:i3DyCVatehxq6tAbvBrARuafjkX2DECPk9OWxiaRIhY=
 github.com/labstack/echo/v4 v4.1.11/go.mod h1:i541M3Fj6f76NZtHSj7TXnyM8n2gaodfvfxNnFqi74g=
 github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
 github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0=

+ 25 - 18
pkg/cloud/awsprovider.go

@@ -6,7 +6,6 @@ import (
 	"context"
 	"encoding/csv"
 	"fmt"
-	"github.com/kubecost/opencost/pkg/kubecost"
 	"io"
 	"io/ioutil"
 	"net/http"
@@ -16,13 +15,15 @@ import (
 	"sync"
 	"time"
 
-	"github.com/kubecost/opencost/pkg/clustercache"
-	"github.com/kubecost/opencost/pkg/env"
-	"github.com/kubecost/opencost/pkg/errors"
-	"github.com/kubecost/opencost/pkg/log"
-	"github.com/kubecost/opencost/pkg/util"
-	"github.com/kubecost/opencost/pkg/util/fileutil"
-	"github.com/kubecost/opencost/pkg/util/json"
+	"github.com/opencost/opencost/pkg/kubecost"
+
+	"github.com/opencost/opencost/pkg/clustercache"
+	"github.com/opencost/opencost/pkg/env"
+	"github.com/opencost/opencost/pkg/errors"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util"
+	"github.com/opencost/opencost/pkg/util/fileutil"
+	"github.com/opencost/opencost/pkg/util/json"
 
 	awsSDK "github.com/aws/aws-sdk-go-v2/aws"
 	"github.com/aws/aws-sdk-go-v2/config"
@@ -40,14 +41,23 @@ import (
 	v1 "k8s.io/api/core/v1"
 )
 
-const supportedSpotFeedVersion = "1"
-const SpotInfoUpdateType = "spotinfo"
-const AthenaInfoUpdateType = "athenainfo"
-const PreemptibleType = "preemptible"
+const (
+	supportedSpotFeedVersion = "1"
+	SpotInfoUpdateType       = "spotinfo"
+	AthenaInfoUpdateType     = "athenainfo"
+	PreemptibleType          = "preemptible"
+
+	APIPricingSource              = "Public API"
+	SpotPricingSource             = "Spot Data Feed"
+	ReservedInstancePricingSource = "Savings Plan, Reserved Instance, and Out-Of-Cluster"
+)
 
-const APIPricingSource = "Public API"
-const SpotPricingSource = "Spot Data Feed"
-const ReservedInstancePricingSource = "Savings Plan, Reserved Instance, and Out-Of-Cluster"
+var (
+	// It's of the form aws:///us-east-2a/i-0fea4fd46592d050b and we want i-0fea4fd46592d050b, if it exists
+	provIdRx      = regexp.MustCompile("aws:///([^/]+)/([^/]+)")
+	usageTypeRegx = regexp.MustCompile(".*(-|^)(EBS.+)")
+	versionRx     = regexp.MustCompile("^#Version: (\\d+)\\.\\d+$")
+)
 
 func (aws *AWS) PricingSourceStatus() map[string]*PricingSource {
 
@@ -605,7 +615,6 @@ func (k *awsKey) GPUType() string {
 }
 
 func (k *awsKey) ID() string {
-	provIdRx := regexp.MustCompile("aws:///([^/]+)/([^/]+)") // It's of the form aws:///us-east-2a/i-0fea4fd46592d050b and we want i-0fea4fd46592d050b, if it exists
 	for matchNum, group := range provIdRx.FindStringSubmatch(k.ProviderID) {
 		if matchNum == 2 {
 			return group
@@ -932,7 +941,6 @@ func (aws *AWS) DownloadPricingData() error {
 				} else if strings.Contains(product.Attributes.UsageType, "EBS:Volume") {
 					// UsageTypes may be prefixed with a region code - we're removing this when using
 					// volTypes to keep lookups generic
-					usageTypeRegx := regexp.MustCompile(".*(-|^)(EBS.+)")
 					usageTypeMatch := usageTypeRegx.FindStringSubmatch(product.Attributes.UsageType)
 					usageTypeNoRegion := usageTypeMatch[len(usageTypeMatch)-1]
 					key := locationToRegion[product.Attributes.Location] + "," + usageTypeNoRegion
@@ -1992,7 +2000,6 @@ func (aws *AWS) parseSpotData(bucket string, prefix string, projectID string, re
 		keys = append(keys, obj.Key)
 	}
 
-	versionRx := regexp.MustCompile("^#Version: (\\d+)\\.\\d+$")
 	header, err := csvutil.Header(spotInfo{}, "csv")
 	if err != nil {
 		return nil, err

+ 17 - 25
pkg/cloud/azureprovider.go

@@ -13,12 +13,12 @@ import (
 	"sync"
 	"time"
 
-	"github.com/kubecost/opencost/pkg/clustercache"
-	"github.com/kubecost/opencost/pkg/env"
-	"github.com/kubecost/opencost/pkg/log"
-	"github.com/kubecost/opencost/pkg/util"
-	"github.com/kubecost/opencost/pkg/util/fileutil"
-	"github.com/kubecost/opencost/pkg/util/json"
+	"github.com/opencost/opencost/pkg/clustercache"
+	"github.com/opencost/opencost/pkg/env"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util"
+	"github.com/opencost/opencost/pkg/util/fileutil"
+	"github.com/opencost/opencost/pkg/util/json"
 
 	"github.com/Azure/azure-sdk-for-go/services/preview/commerce/mgmt/2015-06-01-preview/commerce"
 	"github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2016-06-01/subscriptions"
@@ -66,6 +66,10 @@ var (
 	mtStandardL, _ = regexp.Compile(`^Standard_L\d+[_v\d]*[_Promo]*$`)
 	mtStandardM, _ = regexp.Compile(`^Standard_M\d+[m|t|l]*s[_v\d]*[_Promo]*$`)
 	mtStandardN, _ = regexp.Compile(`^Standard_N[C|D|V]\d+r?[_v\d]*[_Promo]*$`)
+
+	// azure:///subscriptions/0badafdf-1234-abcd-wxyz-123456789/...
+	//  => 0badafdf-1234-abcd-wxyz-123456789
+	azureSubRegex = regexp.MustCompile("azure:///subscriptions/([^/]*)/*")
 )
 
 // List obtained by installing the Azure CLI tool "az", described here:
@@ -906,7 +910,7 @@ func (az *Azure) DownloadPricingData() error {
 
 	// There is no easy way of supporting Standard Azure-File, because it's billed per used GB
 	// this will set the price to "0" as a workaround to not spam with `Persistent Volume pricing not found for` error
-	// check https://github.com/kubecost/opencost/issues/159 for more information (same problem on AWS)
+	// check https://github.com/opencost/opencost/issues/159 for more information (same problem on AWS)
 	zeroPrice := "0.0"
 	for region := range regions {
 		key := region + "," + AzureFileStandardStorageClass
@@ -1062,22 +1066,13 @@ func (az *Azure) NetworkPricing() (*Network, error) {
 	}, nil
 }
 
+// LoadBalancerPricing on Azure, LoadBalancer services correspond to public IPs. For now the pricing of LoadBalancer
+// services will be that of a standard static public IP https://azure.microsoft.com/en-us/pricing/details/ip-addresses/.
+// Azure still has load balancers which follow the standard pricing scheme based on rules
+// https://azure.microsoft.com/en-us/pricing/details/load-balancer/, they are created on a per-cluster basis.
 func (azr *Azure) LoadBalancerPricing() (*LoadBalancer, error) {
-	fffrc := 0.025
-	afrc := 0.010
-	lbidc := 0.008
-
-	numForwardingRules := 1.0
-	dataIngressGB := 0.0
-
-	var totalCost float64
-	if numForwardingRules < 5 {
-		totalCost = fffrc*numForwardingRules + lbidc*dataIngressGB
-	} else {
-		totalCost = fffrc*5 + afrc*(numForwardingRules-5) + lbidc*dataIngressGB
-	}
 	return &LoadBalancer{
-		Cost: totalCost,
+		Cost: 0.005,
 	}, nil
 }
 
@@ -1297,10 +1292,7 @@ func (az *Azure) Regions() []string {
 }
 
 func parseAzureSubscriptionID(id string) string {
-	// azure:///subscriptions/0badafdf-1234-abcd-wxyz-123456789/...
-	//  => 0badafdf-1234-abcd-wxyz-123456789
-	rx := regexp.MustCompile("azure:///subscriptions/([^/]*)/*")
-	match := rx.FindStringSubmatch(id)
+	match := azureSubRegex.FindStringSubmatch(id)
 	if len(match) >= 2 {
 		return match[1]
 	}

+ 3 - 5
pkg/cloud/csvprovider.go

@@ -5,19 +5,18 @@ import (
 	"fmt"
 	"io"
 	"os"
-	"regexp"
 	"strconv"
 	"strings"
 	"sync"
 	"time"
 
-	"github.com/kubecost/opencost/pkg/env"
-	"github.com/kubecost/opencost/pkg/util"
+	"github.com/opencost/opencost/pkg/env"
+	"github.com/opencost/opencost/pkg/util"
 
 	"github.com/aws/aws-sdk-go/aws"
 	"github.com/aws/aws-sdk-go/aws/session"
 	"github.com/aws/aws-sdk-go/service/s3"
-	"github.com/kubecost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/log"
 	v1 "k8s.io/api/core/v1"
 
 	"github.com/jszwec/csvutil"
@@ -240,7 +239,6 @@ func NodeValueFromMapField(m string, n *v1.Node, useRegion bool) string {
 		}
 	}
 	if len(mf) == 2 && mf[0] == "spec" && mf[1] == "providerID" {
-		provIdRx := regexp.MustCompile("aws:///([^/]+)/([^/]+)") // It's of the form aws:///us-east-2a/i-0fea4fd46592d050b and we want i-0fea4fd46592d050b, if it exists
 		for matchNum, group := range provIdRx.FindStringSubmatch(n.Spec.ProviderID) {
 			if matchNum == 2 {
 				return toReturn + group

+ 3 - 3
pkg/cloud/customprovider.go

@@ -8,9 +8,9 @@ import (
 	"sync"
 	"time"
 
-	"github.com/kubecost/opencost/pkg/clustercache"
-	"github.com/kubecost/opencost/pkg/env"
-	"github.com/kubecost/opencost/pkg/util/json"
+	"github.com/opencost/opencost/pkg/clustercache"
+	"github.com/opencost/opencost/pkg/env"
+	"github.com/opencost/opencost/pkg/util/json"
 
 	v1 "k8s.io/api/core/v1"
 )

+ 18 - 13
pkg/cloud/gcpprovider.go

@@ -13,15 +13,15 @@ import (
 	"sync"
 	"time"
 
-	"github.com/kubecost/opencost/pkg/kubecost"
-
-	"github.com/kubecost/opencost/pkg/clustercache"
-	"github.com/kubecost/opencost/pkg/env"
-	"github.com/kubecost/opencost/pkg/log"
-	"github.com/kubecost/opencost/pkg/util"
-	"github.com/kubecost/opencost/pkg/util/fileutil"
-	"github.com/kubecost/opencost/pkg/util/json"
-	"github.com/kubecost/opencost/pkg/util/timeutil"
+	"github.com/opencost/opencost/pkg/kubecost"
+
+	"github.com/opencost/opencost/pkg/clustercache"
+	"github.com/opencost/opencost/pkg/env"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util"
+	"github.com/opencost/opencost/pkg/util/fileutil"
+	"github.com/opencost/opencost/pkg/util/json"
+	"github.com/opencost/opencost/pkg/util/timeutil"
 	"github.com/rs/zerolog"
 
 	"cloud.google.com/go/bigquery"
@@ -69,6 +69,13 @@ var gcpRegions = []string{
 	"us-west4",
 }
 
+var (
+	nvidiaGPURegex = regexp.MustCompile("(Nvidia Tesla [^ ]+) ")
+	// gce://guestbook-12345/...
+	//  => guestbook-12345
+	gceRegex = regexp.MustCompile("gce://([^/]*)/*")
+)
+
 type userAgentTransport struct {
 	userAgent string
 	base      http.RoundTripper
@@ -597,8 +604,7 @@ func (gcp *GCP) parsePage(r io.Reader, inputKeys map[string]Key, pvKeys map[stri
 					}
 				*/
 				var gpuType string
-				provIdRx := regexp.MustCompile("(Nvidia Tesla [^ ]+) ")
-				for matchnum, group := range provIdRx.FindStringSubmatch(product.Description) {
+				for matchnum, group := range nvidiaGPURegex.FindStringSubmatch(product.Description) {
 					if matchnum == 1 {
 						gpuType = strings.ToLower(strings.Join(strings.Split(group, " "), "-"))
 						log.Debug("GPU type found: " + gpuType)
@@ -1391,8 +1397,7 @@ func sustainedUseDiscount(class string, defaultDiscount float64, isPreemptible b
 func parseGCPProjectID(id string) string {
 	// gce://guestbook-12345/...
 	//  => guestbook-12345
-	rx := regexp.MustCompile("gce://([^/]*)/*")
-	match := rx.FindStringSubmatch(id)
+	match := gceRegex.FindStringSubmatch(id)
 	if len(match) >= 2 {
 		return match[1]
 	}

+ 24 - 19
pkg/cloud/provider.go

@@ -4,7 +4,6 @@ import (
 	"database/sql"
 	"errors"
 	"fmt"
-	"github.com/kubecost/opencost/pkg/kubecost"
 	"io"
 	"regexp"
 	"strconv"
@@ -12,15 +11,17 @@ import (
 	"sync"
 	"time"
 
-	"github.com/kubecost/opencost/pkg/util"
+	"github.com/opencost/opencost/pkg/kubecost"
+
+	"github.com/opencost/opencost/pkg/util"
 
 	"cloud.google.com/go/compute/metadata"
 
-	"github.com/kubecost/opencost/pkg/clustercache"
-	"github.com/kubecost/opencost/pkg/config"
-	"github.com/kubecost/opencost/pkg/env"
-	"github.com/kubecost/opencost/pkg/log"
-	"github.com/kubecost/opencost/pkg/util/watcher"
+	"github.com/opencost/opencost/pkg/clustercache"
+	"github.com/opencost/opencost/pkg/config"
+	"github.com/opencost/opencost/pkg/env"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util/watcher"
 
 	v1 "k8s.io/api/core/v1"
 )
@@ -607,20 +608,27 @@ func GetOrCreateClusterMeta(cluster_id, cluster_name string) (string, string, er
 	return id, name, nil
 }
 
+var (
+	// It's of the form aws:///us-east-2a/i-0fea4fd46592d050b and we want i-0fea4fd46592d050b, if it exists
+	providerAWSRegex = regexp.MustCompile("aws://[^/]*/[^/]*/([^/]+)")
+	// gce://guestbook-227502/us-central1-a/gke-niko-n1-standard-2-wljla-8df8e58a-hfy7
+	//  => gke-niko-n1-standard-2-wljla-8df8e58a-hfy7
+	providerGCERegex = regexp.MustCompile("gce://[^/]*/[^/]*/([^/]+)")
+	// Capture "vol-0fc54c5e83b8d2b76" from "aws://us-east-2a/vol-0fc54c5e83b8d2b76"
+	persistentVolumeAWSRegex = regexp.MustCompile("aws:/[^/]*/[^/]*/([^/]+)")
+	// Capture "ad9d88195b52a47c89b5055120f28c58" from "ad9d88195b52a47c89b5055120f28c58-1037804914.us-east-2.elb.amazonaws.com"
+	loadBalancerAWSRegex = regexp.MustCompile("^([^-]+)-.+amazonaws\\.com$")
+)
+
 // ParseID attempts to parse a ProviderId from a string based on formats from the various providers and
 // returns the string as is if it cannot find a match
 func ParseID(id string) string {
-	// It's of the form aws:///us-east-2a/i-0fea4fd46592d050b and we want i-0fea4fd46592d050b, if it exists
-	rx := regexp.MustCompile("aws://[^/]*/[^/]*/([^/]+)")
-	match := rx.FindStringSubmatch(id)
+	match := providerAWSRegex.FindStringSubmatch(id)
 	if len(match) >= 2 {
 		return match[1]
 	}
 
-	// gce://guestbook-227502/us-central1-a/gke-niko-n1-standard-2-wljla-8df8e58a-hfy7
-	//  => gke-niko-n1-standard-2-wljla-8df8e58a-hfy7
-	rx = regexp.MustCompile("gce://[^/]*/[^/]*/([^/]+)")
-	match = rx.FindStringSubmatch(id)
+	match = providerGCERegex.FindStringSubmatch(id)
 	if len(match) >= 2 {
 		return match[1]
 	}
@@ -632,9 +640,7 @@ func ParseID(id string) string {
 // ParsePVID attempts to parse a PV ProviderId from a string based on formats from the various providers and
 // returns the string as is if it cannot find a match
 func ParsePVID(id string) string {
-	// Capture "vol-0fc54c5e83b8d2b76" from "aws://us-east-2a/vol-0fc54c5e83b8d2b76"
-	rx := regexp.MustCompile("aws:/[^/]*/[^/]*/([^/]+)")
-	match := rx.FindStringSubmatch(id)
+	match := persistentVolumeAWSRegex.FindStringSubmatch(id)
 	if len(match) >= 2 {
 		return match[1]
 	}
@@ -646,8 +652,7 @@ func ParsePVID(id string) string {
 // ParseLBID attempts to parse a LB ProviderId from a string based on formats from the various providers and
 // returns the string as is if it cannot find a match
 func ParseLBID(id string) string {
-	rx := regexp.MustCompile("^([^-]+)-.+amazonaws\\.com$") // Capture "ad9d88195b52a47c89b5055120f28c58" from "ad9d88195b52a47c89b5055120f28c58-1037804914.us-east-2.elb.amazonaws.com"
-	match := rx.FindStringSubmatch(id)
+	match := loadBalancerAWSRegex.FindStringSubmatch(id)
 	if len(match) >= 2 {
 		return match[1]
 	}

+ 4 - 4
pkg/cloud/providerconfig.go

@@ -8,11 +8,11 @@ import (
 	"strings"
 	"sync"
 
-	"github.com/kubecost/opencost/pkg/config"
-	"github.com/kubecost/opencost/pkg/env"
-	"github.com/kubecost/opencost/pkg/log"
-	"github.com/kubecost/opencost/pkg/util/json"
 	"github.com/microcosm-cc/bluemonday"
+	"github.com/opencost/opencost/pkg/config"
+	"github.com/opencost/opencost/pkg/env"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util/json"
 )
 
 var sanitizePolicy = bluemonday.UGCPolicy()

+ 2 - 2
pkg/clustercache/clustercache.go

@@ -3,8 +3,8 @@ package clustercache
 import (
 	"sync"
 
-	"github.com/kubecost/opencost/pkg/env"
-	"github.com/kubecost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/env"
+	"github.com/opencost/opencost/pkg/log"
 
 	appsv1 "k8s.io/api/apps/v1"
 	autoscaling "k8s.io/api/autoscaling/v2beta1"

+ 4 - 4
pkg/clustercache/clusterexporter.go

@@ -3,10 +3,10 @@ package clustercache
 import (
 	"time"
 
-	"github.com/kubecost/opencost/pkg/config"
-	"github.com/kubecost/opencost/pkg/log"
-	"github.com/kubecost/opencost/pkg/util/atomic"
-	"github.com/kubecost/opencost/pkg/util/json"
+	"github.com/opencost/opencost/pkg/config"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util/atomic"
+	"github.com/opencost/opencost/pkg/util/json"
 
 	appsv1 "k8s.io/api/apps/v1"
 	autoscaling "k8s.io/api/autoscaling/v2beta1"

+ 3 - 3
pkg/clustercache/clusterimporter.go

@@ -3,9 +3,9 @@ package clustercache
 import (
 	"sync"
 
-	"github.com/kubecost/opencost/pkg/config"
-	"github.com/kubecost/opencost/pkg/log"
-	"github.com/kubecost/opencost/pkg/util/json"
+	"github.com/opencost/opencost/pkg/config"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util/json"
 	appsv1 "k8s.io/api/apps/v1"
 	autoscaling "k8s.io/api/autoscaling/v2beta1"
 	batchv1 "k8s.io/api/batch/v1"

+ 1 - 1
pkg/clustercache/watchcontroller.go

@@ -5,7 +5,7 @@ import (
 	"reflect"
 	"time"
 
-	"github.com/kubecost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/log"
 
 	"k8s.io/apimachinery/pkg/fields"
 	rt "k8s.io/apimachinery/pkg/runtime"

+ 12 - 12
pkg/cmd/agent/agent.go

@@ -7,18 +7,18 @@ import (
 	"path"
 	"time"
 
-	"github.com/kubecost/opencost/pkg/cloud"
-	"github.com/kubecost/opencost/pkg/clustercache"
-	"github.com/kubecost/opencost/pkg/config"
-	"github.com/kubecost/opencost/pkg/costmodel"
-	"github.com/kubecost/opencost/pkg/costmodel/clusters"
-	"github.com/kubecost/opencost/pkg/env"
-	"github.com/kubecost/opencost/pkg/kubeconfig"
-	"github.com/kubecost/opencost/pkg/log"
-	"github.com/kubecost/opencost/pkg/metrics"
-	"github.com/kubecost/opencost/pkg/prom"
-	"github.com/kubecost/opencost/pkg/util/watcher"
-	"github.com/kubecost/opencost/pkg/version"
+	"github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/clustercache"
+	"github.com/opencost/opencost/pkg/config"
+	"github.com/opencost/opencost/pkg/costmodel"
+	"github.com/opencost/opencost/pkg/costmodel/clusters"
+	"github.com/opencost/opencost/pkg/env"
+	"github.com/opencost/opencost/pkg/kubeconfig"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/metrics"
+	"github.com/opencost/opencost/pkg/prom"
+	"github.com/opencost/opencost/pkg/util/watcher"
+	"github.com/opencost/opencost/pkg/version"
 
 	prometheus "github.com/prometheus/client_golang/api"
 	prometheusAPI "github.com/prometheus/client_golang/api/prometheus/v1"

+ 3 - 3
pkg/cmd/commands.go

@@ -5,9 +5,9 @@ import (
 	"os"
 	"strings"
 
-	"github.com/kubecost/opencost/pkg/cmd/agent"
-	"github.com/kubecost/opencost/pkg/cmd/costmodel"
-	"github.com/kubecost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/cmd/agent"
+	"github.com/opencost/opencost/pkg/cmd/costmodel"
+	"github.com/opencost/opencost/pkg/log"
 	"github.com/spf13/cobra"
 	"github.com/spf13/viper"
 )

+ 5 - 5
pkg/cmd/costmodel/costmodel.go

@@ -4,11 +4,11 @@ import (
 	"net/http"
 
 	"github.com/julienschmidt/httprouter"
-	"github.com/kubecost/opencost/pkg/costmodel"
-	"github.com/kubecost/opencost/pkg/errors"
-	"github.com/kubecost/opencost/pkg/log"
-	"github.com/kubecost/opencost/pkg/metrics"
-	"github.com/kubecost/opencost/pkg/version"
+	"github.com/opencost/opencost/pkg/costmodel"
+	"github.com/opencost/opencost/pkg/errors"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/metrics"
+	"github.com/opencost/opencost/pkg/version"
 	"github.com/prometheus/client_golang/prometheus/promhttp"
 	"github.com/rs/cors"
 )

+ 3 - 3
pkg/config/configfile.go

@@ -8,9 +8,9 @@ import (
 	"time"
 
 	"github.com/google/uuid"
-	"github.com/kubecost/opencost/pkg/log"
-	"github.com/kubecost/opencost/pkg/storage"
-	"github.com/kubecost/opencost/pkg/util/atomic"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/storage"
+	"github.com/opencost/opencost/pkg/util/atomic"
 )
 
 // HandlerID is a unique identifier assigned to a provided ConfigChangedHandler. This is used to remove a handler

+ 2 - 2
pkg/config/configmanager.go

@@ -4,8 +4,8 @@ import (
 	"io/ioutil"
 	"sync"
 
-	"github.com/kubecost/opencost/pkg/log"
-	"github.com/kubecost/opencost/pkg/storage"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/storage"
 )
 
 //--------------------------------------------------------------------------

+ 28 - 24
pkg/costmodel/aggregation.go

@@ -10,19 +10,19 @@ import (
 	"strings"
 	"time"
 
-	"github.com/kubecost/opencost/pkg/util/httputil"
-	"github.com/kubecost/opencost/pkg/util/timeutil"
+	"github.com/opencost/opencost/pkg/util/httputil"
+	"github.com/opencost/opencost/pkg/util/timeutil"
 
 	"github.com/julienschmidt/httprouter"
-	"github.com/kubecost/opencost/pkg/cloud"
-	"github.com/kubecost/opencost/pkg/env"
-	"github.com/kubecost/opencost/pkg/errors"
-	"github.com/kubecost/opencost/pkg/kubecost"
-	"github.com/kubecost/opencost/pkg/log"
-	"github.com/kubecost/opencost/pkg/prom"
-	"github.com/kubecost/opencost/pkg/thanos"
-	"github.com/kubecost/opencost/pkg/util"
-	"github.com/kubecost/opencost/pkg/util/json"
+	"github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/env"
+	"github.com/opencost/opencost/pkg/errors"
+	"github.com/opencost/opencost/pkg/kubecost"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/prom"
+	"github.com/opencost/opencost/pkg/thanos"
+	"github.com/opencost/opencost/pkg/util"
+	"github.com/opencost/opencost/pkg/util/json"
 	"github.com/patrickmn/go-cache"
 	prometheusClient "github.com/prometheus/client_golang/api"
 )
@@ -1881,6 +1881,20 @@ func (a *Accesses) warmAggregateCostModelCache() {
 	}
 }
 
+var (
+	// Convert UTC-RFC3339 pairs to configured UTC offset
+	// e.g. with UTC offset of -0600, 2020-07-01T00:00:00Z becomes
+	// 2020-07-01T06:00:00Z == 2020-07-01T00:00:00-0600
+	// TODO niko/etl fix the frontend because this is confusing if you're
+	// actually asking for UTC time (...Z) and we swap that "Z" out for the
+	// configured UTC offset without asking
+	rfc3339      = `\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\dZ`
+	rfc3339Regex = regexp.MustCompile(fmt.Sprintf(`(%s),(%s)`, rfc3339, rfc3339))
+
+	durRegex     = regexp.MustCompile(`^(\d+)(m|h|d|s)$`)
+	percentRegex = regexp.MustCompile(`(\d+\.*\d*)%`)
+)
+
 // AggregateCostModelHandler handles requests to the aggregated cost model API. See
 // ComputeAggregateCostModel for details.
 func (a *Accesses) AggregateCostModelHandler(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
@@ -1888,15 +1902,7 @@ func (a *Accesses) AggregateCostModelHandler(w http.ResponseWriter, r *http.Requ
 
 	windowStr := r.URL.Query().Get("window")
 
-	// Convert UTC-RFC3339 pairs to configured UTC offset
-	// e.g. with UTC offset of -0600, 2020-07-01T00:00:00Z becomes
-	// 2020-07-01T06:00:00Z == 2020-07-01T00:00:00-0600
-	// TODO niko/etl fix the frontend because this is confusing if you're
-	// actually asking for UTC time (...Z) and we swap that "Z" out for the
-	// configured UTC offset without asking
-	rfc3339 := `\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\dZ`
-	regex := regexp.MustCompile(fmt.Sprintf(`(%s),(%s)`, rfc3339, rfc3339))
-	match := regex.FindStringSubmatch(windowStr)
+	match := rfc3339Regex.FindStringSubmatch(windowStr)
 	if match != nil {
 		start, _ := time.Parse(time.RFC3339, match[1])
 		start = start.Add(-env.GetParsedUTCOffset()).In(time.UTC)
@@ -1912,7 +1918,6 @@ func (a *Accesses) AggregateCostModelHandler(w http.ResponseWriter, r *http.Requ
 		return
 	}
 
-	durRegex := regexp.MustCompile(`^(\d+)(m|h|d|s)$`)
 	isDurationStr := durRegex.MatchString(windowStr)
 
 	// legacy offset option should override window offset
@@ -2076,8 +2081,7 @@ func (a *Accesses) AggregateCostModelHandler(w http.ResponseWriter, r *http.Requ
 				// after the pipeline builds
 				msg := "Data will be available after ETL is built"
 
-				rex := regexp.MustCompile(`(\d+\.*\d*)%`)
-				match := rex.FindStringSubmatch(boundaryErr.Message)
+				match := percentRegex.FindStringSubmatch(boundaryErr.Message)
 				if len(match) > 1 {
 					completionPct, err := strconv.ParseFloat(match[1], 64)
 					if err == nil {
@@ -2197,7 +2201,7 @@ func (a *Accesses) ComputeAllocationHandlerSummary(w http.ResponseWriter, r *htt
 
 	sasl := []*kubecost.SummaryAllocationSet{}
 	for _, as := range asr.Slice() {
-		sas := kubecost.NewSummaryAllocationSet(as, []kubecost.AllocationMatchFunc{}, []kubecost.AllocationMatchFunc{}, false, false)
+		sas := kubecost.NewSummaryAllocationSet(as, nil, []kubecost.AllocationMatchFunc{}, false, false)
 		sasl = append(sasl, sas)
 	}
 	sasr := kubecost.NewSummaryAllocationSetRange(sasl...)

+ 1 - 1
pkg/costmodel/aggregation_test.go

@@ -3,7 +3,7 @@ package costmodel
 import (
 	"testing"
 
-	"github.com/kubecost/opencost/pkg/util"
+	"github.com/opencost/opencost/pkg/util"
 )
 
 func TestScaleHourlyCostData(t *testing.T) {

+ 24 - 22
pkg/costmodel/allocation.go

@@ -7,13 +7,13 @@ import (
 	"strings"
 	"time"
 
-	"github.com/kubecost/opencost/pkg/util/timeutil"
+	"github.com/opencost/opencost/pkg/util/timeutil"
 
-	"github.com/kubecost/opencost/pkg/cloud"
-	"github.com/kubecost/opencost/pkg/env"
-	"github.com/kubecost/opencost/pkg/kubecost"
-	"github.com/kubecost/opencost/pkg/log"
-	"github.com/kubecost/opencost/pkg/prom"
+	"github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/env"
+	"github.com/opencost/opencost/pkg/kubecost"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/prom"
 	"k8s.io/apimachinery/pkg/labels"
 )
 
@@ -2489,34 +2489,36 @@ type LB struct {
 
 func getLoadBalancerCosts(resLBCost, resLBActiveMins []*prom.QueryResult, resolution time.Duration) map[serviceKey]*LB {
 	lbMap := make(map[serviceKey]*LB)
-	lbHourlyCosts := make(map[serviceKey]float64)
-	for _, res := range resLBCost {
-		serviceKey, err := resultServiceKey(res, env.GetPromClusterLabel(), "namespace", "service_name")
-		if err != nil {
-			continue
-		}
-		lbHourlyCosts[serviceKey] = res.Values[0].Value
-	}
+
 	for _, res := range resLBActiveMins {
 		serviceKey, err := resultServiceKey(res, env.GetPromClusterLabel(), "namespace", "service_name")
 		if err != nil || len(res.Values) == 0 {
 			continue
 		}
-		if _, ok := lbHourlyCosts[serviceKey]; !ok {
-			log.Warnf("CostModel: failed to find hourly cost for Load Balancer: %v", serviceKey)
-			continue
-		}
 
 		s := time.Unix(int64(res.Values[0].Timestamp), 0)
 		// subtract resolution from start time to cover full time period
 		s = s.Add(-resolution)
 		e := time.Unix(int64(res.Values[len(res.Values)-1].Timestamp), 0)
-		hours := e.Sub(s).Hours()
 
 		lbMap[serviceKey] = &LB{
-			TotalCost: lbHourlyCosts[serviceKey] * hours,
-			Start:     s,
-			End:       e,
+			Start: s,
+			End:   e,
+		}
+	}
+
+	for _, res := range resLBCost {
+		serviceKey, err := resultServiceKey(res, env.GetPromClusterLabel(), "namespace", "service_name")
+		if err != nil {
+			continue
+		}
+		// Apply cost as price-per-hour * hours
+		if lb, ok := lbMap[serviceKey]; ok {
+			lbPricePerHr := res.Values[0].Value
+			hours := lb.End.Sub(lb.Start).Hours()
+			lb.TotalCost += lbPricePerHr * hours
+		} else {
+			log.DedupedWarningf(20, "CostModel: found minutes for key that does not exist: %s", serviceKey)
 		}
 	}
 	return lbMap

+ 34 - 12
pkg/costmodel/cluster.go

@@ -5,13 +5,13 @@ import (
 	"strconv"
 	"time"
 
-	"github.com/kubecost/opencost/pkg/kubecost"
-	"github.com/kubecost/opencost/pkg/util/timeutil"
+	"github.com/opencost/opencost/pkg/kubecost"
+	"github.com/opencost/opencost/pkg/util/timeutil"
 
-	"github.com/kubecost/opencost/pkg/cloud"
-	"github.com/kubecost/opencost/pkg/env"
-	"github.com/kubecost/opencost/pkg/log"
-	"github.com/kubecost/opencost/pkg/prom"
+	"github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/env"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/prom"
 
 	prometheus "github.com/prometheus/client_golang/api"
 )
@@ -136,8 +136,13 @@ func ClusterDisks(client prometheus.Client, provider cloud.Provider, start, end
 	// minsPerResolution determines accuracy and resource use for the following
 	// queries. Smaller values (higher resolution) result in better accuracy,
 	// but more expensive queries, and vice-a-versa.
-	minsPerResolution := 1
-	resolution := time.Duration(minsPerResolution) * time.Minute
+	resolution := env.GetETLResolution()
+	//Ensuring if ETL_RESOLUTION_SECONDS is less than 60s default it to 1m
+	var minsPerResolution int
+	if minsPerResolution = int(resolution.Minutes()); int(resolution.Minutes()) == 0 {
+		minsPerResolution = 1
+		log.DedupedWarningf(3, "ClusterDisks(): Configured ETL resolution (%d seconds) is below the 60 seconds threshold. Overriding with 1 minute.", int(resolution.Seconds()))
+	}
 
 	// hourlyToCumulative is a scaling factor that, when multiplied by an hourly
 	// value, converts it to a cumulative value; i.e.
@@ -388,8 +393,13 @@ func ClusterNodes(cp cloud.Provider, client prometheus.Client, start, end time.T
 	// minsPerResolution determines accuracy and resource use for the following
 	// queries. Smaller values (higher resolution) result in better accuracy,
 	// but more expensive queries, and vice-a-versa.
-	minsPerResolution := 1
-	resolution := time.Duration(minsPerResolution) * time.Minute
+	resolution := env.GetETLResolution()
+	//Ensuring if ETL_RESOLUTION_SECONDS is less than 60s default it to 1m
+	var minsPerResolution int
+	if minsPerResolution = int(resolution.Minutes()); int(resolution.Minutes()) == 0 {
+		minsPerResolution = 1
+		log.DedupedWarningf(3, "ClusterNodes(): Configured ETL resolution (%d seconds) is below the 60 seconds threshold. Overriding with 1 minute.", int(resolution.Seconds()))
+	}
 
 	requiredCtx := prom.NewNamedContext(client, prom.ClusterContextName)
 	optionalCtx := prom.NewNamedContext(client, prom.ClusterOptionalContextName)
@@ -544,7 +554,13 @@ func ClusterLoadBalancers(client prometheus.Client, start, end time.Time) (map[L
 	// minsPerResolution determines accuracy and resource use for the following
 	// queries. Smaller values (higher resolution) result in better accuracy,
 	// but more expensive queries, and vice-a-versa.
-	minsPerResolution := 1
+	resolution := env.GetETLResolution()
+	//Ensuring if ETL_RESOLUTION_SECONDS is less than 60s default it to 1m
+	var minsPerResolution int
+	if minsPerResolution = int(resolution.Minutes()); int(resolution.Minutes()) == 0 {
+		minsPerResolution = 1
+		log.DedupedWarningf(3, "ClusterLoadBalancers(): Configured ETL resolution (%d seconds) is below the 60 seconds threshold. Overriding with 1 minute.", int(resolution.Seconds()))
+	}
 
 	ctx := prom.NewNamedContext(client, prom.ClusterContextName)
 
@@ -670,7 +686,13 @@ func (a *Accesses) ComputeClusterCosts(client prometheus.Client, provider cloud.
 	// minsPerResolution determines accuracy and resource use for the following
 	// queries. Smaller values (higher resolution) result in better accuracy,
 	// but more expensive queries, and vice-a-versa.
-	minsPerResolution := 5
+	resolution := env.GetETLResolution()
+	//Ensuring if ETL_RESOLUTION_SECONDS is less than 60s default it to 1m
+	var minsPerResolution int
+	if minsPerResolution = int(resolution.Minutes()); int(resolution.Minutes()) < 1 {
+		minsPerResolution = 1
+		log.DedupedWarningf(3, "ComputeClusterCosts(): Configured ETL resolution (%d seconds) is below the 60 seconds threshold. Overriding with 1 minute.", int(resolution.Seconds()))
+	}
 
 	// hourlyToCumulative is a scaling factor that, when multiplied by an hourly
 	// value, converts it to a cumulative value; i.e.

+ 4 - 4
pkg/costmodel/cluster_helpers.go

@@ -4,11 +4,11 @@ import (
 	"strconv"
 	"time"
 
-	"github.com/kubecost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/cloud"
 
-	"github.com/kubecost/opencost/pkg/env"
-	"github.com/kubecost/opencost/pkg/log"
-	"github.com/kubecost/opencost/pkg/prom"
+	"github.com/opencost/opencost/pkg/env"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/prom"
 )
 
 // mergeTypeMaps takes two maps of (cluster name, node name) -> node type

+ 4 - 4
pkg/costmodel/cluster_helpers_test.go

@@ -5,10 +5,10 @@ import (
 	"testing"
 	"time"
 
-	"github.com/kubecost/opencost/pkg/cloud"
-	"github.com/kubecost/opencost/pkg/config"
-	"github.com/kubecost/opencost/pkg/prom"
-	"github.com/kubecost/opencost/pkg/util"
+	"github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/config"
+	"github.com/opencost/opencost/pkg/prom"
+	"github.com/opencost/opencost/pkg/util"
 
 	"github.com/davecgh/go-spew/spew"
 )

+ 7 - 7
pkg/costmodel/clusterinfo.go

@@ -3,13 +3,13 @@ package costmodel
 import (
 	"fmt"
 
-	cloudProvider "github.com/kubecost/opencost/pkg/cloud"
-	"github.com/kubecost/opencost/pkg/config"
-	"github.com/kubecost/opencost/pkg/costmodel/clusters"
-	"github.com/kubecost/opencost/pkg/env"
-	"github.com/kubecost/opencost/pkg/log"
-	"github.com/kubecost/opencost/pkg/thanos"
-	"github.com/kubecost/opencost/pkg/util/json"
+	cloudProvider "github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/config"
+	"github.com/opencost/opencost/pkg/costmodel/clusters"
+	"github.com/opencost/opencost/pkg/env"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/thanos"
+	"github.com/opencost/opencost/pkg/util/json"
 
 	"k8s.io/client-go/kubernetes"
 )

+ 12 - 5
pkg/costmodel/clusters/clustermap.go

@@ -7,10 +7,12 @@ import (
 	"sync"
 	"time"
 
-	"github.com/kubecost/opencost/pkg/log"
-	"github.com/kubecost/opencost/pkg/prom"
-	"github.com/kubecost/opencost/pkg/thanos"
-	"github.com/kubecost/opencost/pkg/util/retry"
+	"github.com/opencost/opencost/pkg/env"
+
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/prom"
+	"github.com/opencost/opencost/pkg/thanos"
+	"github.com/opencost/opencost/pkg/util/retry"
 
 	prometheus "github.com/prometheus/client_golang/api"
 )
@@ -20,6 +22,10 @@ const (
 	LoadRetryDelay time.Duration = 10 * time.Second
 )
 
+// prometheus query offset to apply to each non-range query
+// package scope to prevent calling duration parse each use
+var promQueryOffset = env.GetPrometheusQueryOffset()
+
 // ClusterInfo holds attributes of Cluster from metrics pulled from Prometheus
 type ClusterInfo struct {
 	ID          string `json:"id"`
@@ -138,7 +144,8 @@ func (pcm *PrometheusClusterMap) loadClusters() (map[string]*ClusterInfo, error)
 	// Execute Query
 	tryQuery := func() (interface{}, error) {
 		ctx := prom.NewNamedContext(pcm.client, prom.ClusterMapContextName)
-		r, _, e := ctx.QuerySync(clusterInfoQuery(offset))
+		resCh := ctx.QueryAtTime(clusterInfoQuery(offset), time.Now().Add(-promQueryOffset))
+		r, e := resCh.Await()
 		return r, e
 	}
 

+ 2 - 2
pkg/costmodel/containerkeys.go

@@ -4,8 +4,8 @@ import (
 	"errors"
 	"strings"
 
-	"github.com/kubecost/opencost/pkg/env"
-	"github.com/kubecost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/env"
+	"github.com/opencost/opencost/pkg/log"
 	v1 "k8s.io/api/core/v1"
 )
 

+ 15 - 11
pkg/costmodel/costmodel.go

@@ -8,14 +8,14 @@ import (
 	"strings"
 	"time"
 
-	costAnalyzerCloud "github.com/kubecost/opencost/pkg/cloud"
-	"github.com/kubecost/opencost/pkg/clustercache"
-	"github.com/kubecost/opencost/pkg/costmodel/clusters"
-	"github.com/kubecost/opencost/pkg/env"
-	"github.com/kubecost/opencost/pkg/kubecost"
-	"github.com/kubecost/opencost/pkg/log"
-	"github.com/kubecost/opencost/pkg/prom"
-	"github.com/kubecost/opencost/pkg/util"
+	costAnalyzerCloud "github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/clustercache"
+	"github.com/opencost/opencost/pkg/costmodel/clusters"
+	"github.com/opencost/opencost/pkg/env"
+	"github.com/opencost/opencost/pkg/kubecost"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/prom"
+	"github.com/opencost/opencost/pkg/util"
 	prometheus "github.com/prometheus/client_golang/api"
 	prometheusClient "github.com/prometheus/client_golang/api"
 	v1 "k8s.io/api/core/v1"
@@ -1225,7 +1225,7 @@ func (cm *CostModel) GetNodeCost(cp costAnalyzerCloud.Provider) (map[string]*cos
 }
 
 // TODO: drop some logs
-func (cm *CostModel) GetLBCost(cp costAnalyzerCloud.Provider) (map[string]*costAnalyzerCloud.LoadBalancer, error) {
+func (cm *CostModel) GetLBCost(cp costAnalyzerCloud.Provider) (map[serviceKey]*costAnalyzerCloud.LoadBalancer, error) {
 	// for fetching prices from cloud provider
 	// cfg, err := cp.GetConfig()
 	// if err != nil {
@@ -1233,12 +1233,16 @@ func (cm *CostModel) GetLBCost(cp costAnalyzerCloud.Provider) (map[string]*costA
 	// }
 
 	servicesList := cm.Cache.GetAllServices()
-	loadBalancerMap := make(map[string]*costAnalyzerCloud.LoadBalancer)
+	loadBalancerMap := make(map[serviceKey]*costAnalyzerCloud.LoadBalancer)
 
 	for _, service := range servicesList {
 		namespace := service.GetObjectMeta().GetNamespace()
 		name := service.GetObjectMeta().GetName()
-		key := namespace + "," + name // + "," + clusterID?
+		key := serviceKey{
+			Cluster:   env.GetClusterID(),
+			Namespace: namespace,
+			Service:   name,
+		}
 
 		if service.Spec.Type == "LoadBalancer" {
 			loadBalancer, err := cp.LoadBalancerPricing()

+ 1 - 1
pkg/costmodel/intervals.go

@@ -4,7 +4,7 @@ import (
 	"sort"
 	"time"
 
-	"github.com/kubecost/opencost/pkg/kubecost"
+	"github.com/opencost/opencost/pkg/kubecost"
 )
 
 // IntervalPoint describes a start or end of a window of time

+ 1 - 1
pkg/costmodel/intervals_test.go

@@ -5,7 +5,7 @@ import (
 	"testing"
 	"time"
 
-	"github.com/kubecost/opencost/pkg/kubecost"
+	"github.com/opencost/opencost/pkg/kubecost"
 )
 
 func TestGetIntervalPointsFromWindows(t *testing.T) {

+ 2 - 2
pkg/costmodel/key.go

@@ -3,8 +3,8 @@ package costmodel
 import (
 	"fmt"
 
-	"github.com/kubecost/opencost/pkg/env"
-	"github.com/kubecost/opencost/pkg/prom"
+	"github.com/opencost/opencost/pkg/env"
+	"github.com/opencost/opencost/pkg/prom"
 )
 
 type containerKey struct {

+ 14 - 14
pkg/costmodel/metrics.go

@@ -7,16 +7,16 @@ import (
 	"sync"
 	"time"
 
-	"github.com/kubecost/opencost/pkg/cloud"
-	"github.com/kubecost/opencost/pkg/clustercache"
-	"github.com/kubecost/opencost/pkg/costmodel/clusters"
-	"github.com/kubecost/opencost/pkg/env"
-	"github.com/kubecost/opencost/pkg/errors"
-	"github.com/kubecost/opencost/pkg/log"
-	"github.com/kubecost/opencost/pkg/metrics"
-	"github.com/kubecost/opencost/pkg/prom"
-	"github.com/kubecost/opencost/pkg/util"
-	"github.com/kubecost/opencost/pkg/util/atomic"
+	"github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/clustercache"
+	"github.com/opencost/opencost/pkg/costmodel/clusters"
+	"github.com/opencost/opencost/pkg/env"
+	"github.com/opencost/opencost/pkg/errors"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/metrics"
+	"github.com/opencost/opencost/pkg/prom"
+	"github.com/opencost/opencost/pkg/util"
+	"github.com/opencost/opencost/pkg/util/atomic"
 
 	promclient "github.com/prometheus/client_golang/api"
 	"github.com/prometheus/client_golang/prometheus"
@@ -534,7 +534,7 @@ func (cmme *CostModelMetricsEmitter) Start() bool {
 				const outlierFactor float64 = 30
 				// don't record cpuCost, ramCost, or gpuCost in the case of wild outliers
 				// k8s api sometimes causes cost spikes as described here:
-				// https://github.com/kubecost/opencost/issues/927
+				// https://github.com/opencost/opencost/issues/927
 				if cpuCost < outlierFactor*avgCosts.CpuCostAverage {
 					cmme.CPUPriceRecorder.WithLabelValues(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID).Set(cpuCost)
 					avgCosts.CpuCostAverage = (avgCosts.CpuCostAverage*avgCosts.NumCpuDataPoints + cpuCost) / (avgCosts.NumCpuDataPoints + 1)
@@ -572,9 +572,9 @@ func (cmme *CostModelMetricsEmitter) Start() bool {
 			}
 			for lbKey, lb := range loadBalancers {
 				// TODO: parse (if necessary) and calculate cost associated with loadBalancer based on dynamic cloud prices fetched into each lb struct on GetLBCost() call
-				keyParts := getLabelStringsFromKey(lbKey)
-				namespace := keyParts[0]
-				serviceName := keyParts[1]
+
+				namespace := lbKey.Namespace
+				serviceName := lbKey.Service
 				ingressIP := ""
 				if len(lb.IngressIPAddresses) > 0 {
 					ingressIP = lb.IngressIPAddresses[0] // assumes one ingress IP per load balancer

+ 5 - 5
pkg/costmodel/networkcosts.go

@@ -1,11 +1,11 @@
 package costmodel
 
 import (
-	costAnalyzerCloud "github.com/kubecost/opencost/pkg/cloud"
-	"github.com/kubecost/opencost/pkg/env"
-	"github.com/kubecost/opencost/pkg/log"
-	"github.com/kubecost/opencost/pkg/prom"
-	"github.com/kubecost/opencost/pkg/util"
+	costAnalyzerCloud "github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/env"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/prom"
+	"github.com/opencost/opencost/pkg/util"
 )
 
 // NetworkUsageVNetworkUsageDataector contains the network usage values for egress network traffic

+ 6 - 6
pkg/costmodel/promparsers.go

@@ -5,12 +5,12 @@ import (
 	"fmt"
 	"time"
 
-	costAnalyzerCloud "github.com/kubecost/opencost/pkg/cloud"
-	"github.com/kubecost/opencost/pkg/clustercache"
-	"github.com/kubecost/opencost/pkg/env"
-	"github.com/kubecost/opencost/pkg/log"
-	"github.com/kubecost/opencost/pkg/prom"
-	"github.com/kubecost/opencost/pkg/util"
+	costAnalyzerCloud "github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/clustercache"
+	"github.com/opencost/opencost/pkg/env"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/prom"
+	"github.com/opencost/opencost/pkg/util"
 )
 
 func GetPVInfoLocal(cache clustercache.ClusterCache, defaultClusterID string) (map[string]*PersistentVolumeClaimData, error) {

+ 20 - 20
pkg/costmodel/router.go

@@ -14,15 +14,15 @@ import (
 	"sync"
 	"time"
 
-	"github.com/kubecost/opencost/pkg/config"
-	"github.com/kubecost/opencost/pkg/kubeconfig"
-	"github.com/kubecost/opencost/pkg/metrics"
-	"github.com/kubecost/opencost/pkg/services"
-	"github.com/kubecost/opencost/pkg/util/httputil"
-	"github.com/kubecost/opencost/pkg/util/timeutil"
-	"github.com/kubecost/opencost/pkg/util/watcher"
-	"github.com/kubecost/opencost/pkg/version"
 	"github.com/microcosm-cc/bluemonday"
+	"github.com/opencost/opencost/pkg/config"
+	"github.com/opencost/opencost/pkg/kubeconfig"
+	"github.com/opencost/opencost/pkg/metrics"
+	"github.com/opencost/opencost/pkg/services"
+	"github.com/opencost/opencost/pkg/util/httputil"
+	"github.com/opencost/opencost/pkg/util/timeutil"
+	"github.com/opencost/opencost/pkg/util/watcher"
+	"github.com/opencost/opencost/pkg/version"
 	"github.com/spf13/viper"
 
 	v1 "k8s.io/api/core/v1"
@@ -31,16 +31,16 @@ import (
 
 	sentry "github.com/getsentry/sentry-go"
 
-	"github.com/kubecost/opencost/pkg/cloud"
-	"github.com/kubecost/opencost/pkg/clustercache"
-	"github.com/kubecost/opencost/pkg/costmodel/clusters"
-	"github.com/kubecost/opencost/pkg/env"
-	"github.com/kubecost/opencost/pkg/errors"
-	"github.com/kubecost/opencost/pkg/kubecost"
-	"github.com/kubecost/opencost/pkg/log"
-	"github.com/kubecost/opencost/pkg/prom"
-	"github.com/kubecost/opencost/pkg/thanos"
-	"github.com/kubecost/opencost/pkg/util/json"
+	"github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/clustercache"
+	"github.com/opencost/opencost/pkg/costmodel/clusters"
+	"github.com/opencost/opencost/pkg/env"
+	"github.com/opencost/opencost/pkg/errors"
+	"github.com/opencost/opencost/pkg/kubecost"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/prom"
+	"github.com/opencost/opencost/pkg/thanos"
+	"github.com/opencost/opencost/pkg/util/json"
 	prometheus "github.com/prometheus/client_golang/api"
 	prometheusAPI "github.com/prometheus/client_golang/api/prometheus/v1"
 	appsv1 "k8s.io/api/apps/v1"
@@ -510,7 +510,7 @@ func (a *Accesses) CostDataModelRange(w http.ResponseWriter, r *http.Request, ps
 	}
 
 	window := kubecost.NewWindow(&start, &end)
-	if window.IsOpen() || window.IsEmpty() || window.IsNegative() {
+	if window.IsOpen() || !window.HasDuration() || window.IsNegative() {
 		w.Write(WrapDataWithMessage(nil, fmt.Errorf("invalid date range: %s", window), fmt.Sprintf("invalid date range: %s", window)))
 		return
 	}
@@ -1600,7 +1600,7 @@ func Initialize(additionalConfigWatchers ...*watcher.ConfigMapWatcher) *Accesses
 	// ClusterInfo Provider to provide the cluster map with local and remote cluster data
 	var clusterInfoProvider clusters.ClusterInfoProvider
 	if env.IsClusterInfoFileEnabled() {
-		clusterInfoFile := confManager.ConfigFileAt(path.Join(configPrefix, " cluster-info.json"))
+		clusterInfoFile := confManager.ConfigFileAt(path.Join(configPrefix, "cluster-info.json"))
 		clusterInfoProvider = NewConfiguredClusterInfoProvider(clusterInfoFile)
 	} else {
 		clusterInfoProvider = NewLocalClusterInfoProvider(kubeClientset, cloudProvider)

+ 2 - 2
pkg/costmodel/settings.go

@@ -4,8 +4,8 @@ import (
 	"fmt"
 	"time"
 
-	"github.com/kubecost/opencost/pkg/cloud"
-	"github.com/kubecost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/log"
 	"github.com/patrickmn/go-cache"
 )
 

+ 5 - 5
pkg/costmodel/sql.go

@@ -5,11 +5,11 @@ import (
 	"fmt"
 	"time"
 
-	costAnalyzerCloud "github.com/kubecost/opencost/pkg/cloud"
-	"github.com/kubecost/opencost/pkg/env"
-	"github.com/kubecost/opencost/pkg/log"
-	"github.com/kubecost/opencost/pkg/util"
-	"github.com/kubecost/opencost/pkg/util/json"
+	costAnalyzerCloud "github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/env"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util"
+	"github.com/opencost/opencost/pkg/util/json"
 
 	_ "github.com/lib/pq"
 )

+ 5 - 4
pkg/env/costmodelenv.go

@@ -5,8 +5,8 @@ import (
 	"strconv"
 	"time"
 
-	"github.com/kubecost/opencost/pkg/log"
-	"github.com/kubecost/opencost/pkg/util/timeutil"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util/timeutil"
 )
 
 const (
@@ -88,6 +88,8 @@ const (
 	ETLReadOnlyMode = "ETL_READ_ONLY"
 )
 
+var offsetRegex = regexp.MustCompile(`^(\+|-)(\d\d):(\d\d)$`)
+
 func IsETLReadOnlyMode() bool {
 	return GetBool(ETLReadOnlyMode, false)
 }
@@ -400,8 +402,7 @@ func GetParsedUTCOffset() time.Duration {
 	offset := time.Duration(0)
 
 	if offsetStr := GetUTCOffset(); offsetStr != "" {
-		regex := regexp.MustCompile(`^(\+|-)(\d\d):(\d\d)$`)
-		match := regex.FindStringSubmatch(offsetStr)
+		match := offsetRegex.FindStringSubmatch(offsetStr)
 		if match == nil {
 			log.Warnf("Illegal UTC offset: %s", offsetStr)
 			return offset

+ 1 - 1
pkg/env/env.go

@@ -4,7 +4,7 @@ import (
 	"os"
 	"time"
 
-	"github.com/kubecost/opencost/pkg/util/mapper"
+	"github.com/opencost/opencost/pkg/util/mapper"
 )
 
 //--------------------------------------------------------------------------

+ 30 - 28
pkg/kubecost/allocation.go

@@ -8,9 +8,9 @@ import (
 	"sync"
 	"time"
 
-	"github.com/kubecost/opencost/pkg/log"
-	"github.com/kubecost/opencost/pkg/util"
-	"github.com/kubecost/opencost/pkg/util/json"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util"
+	"github.com/opencost/opencost/pkg/util/json"
 )
 
 // TODO Clean-up use of IsEmpty; nil checks should be separated for safety.
@@ -830,7 +830,7 @@ func NewAllocationSet(start, end time.Time, allocs ...*Allocation) *AllocationSe
 // simple flag for sharing idle resources.
 type AllocationAggregationOptions struct {
 	AllocationTotalsStore AllocationTotalsStore
-	FilterFuncs           []AllocationMatchFunc
+	Filter                AllocationFilter
 	IdleByNode            bool
 	LabelConfig           *LabelConfig
 	MergeUnallocated      bool
@@ -906,15 +906,21 @@ func (as *AllocationSet) AggregateBy(aggregateBy []string, options *AllocationAg
 		options.ShareIdle = ShareNone
 	}
 
+	// Pre-flatten the filter so we can just check == nil to see if there are
+	// filters.
+	if options.Filter != nil {
+		options.Filter = options.Filter.Flattened()
+	}
+
 	var allocatedTotalsMap map[string]map[string]float64
 
 	// If aggregateBy is nil, we don't aggregate anything. On the other hand,
 	// an empty slice implies that we should aggregate everything. See
 	// generateKey for why that makes sense.
 	shouldAggregate := aggregateBy != nil
-	shouldFilter := len(options.FilterFuncs) > 0
+	shouldFilter := options.Filter != nil
 	shouldShare := len(options.SharedHourlyCosts) > 0 || len(options.ShareFuncs) > 0
-	if !shouldAggregate && !shouldFilter && !shouldShare {
+	if !shouldAggregate && !shouldFilter && !shouldShare && options.ShareIdle == ShareNone {
 		// There is nothing for AggregateBy to do, so simply return nil
 		return nil
 	}
@@ -1063,7 +1069,7 @@ func (as *AllocationSet) AggregateBy(aggregateBy []string, options *AllocationAg
 	// Note that this can happen for any field, not just cluster, so we again
 	// need to track this on a per-cluster or per-node, per-allocation, per-resource basis.
 	var idleFiltrationCoefficients map[string]map[string]map[string]float64
-	if len(options.FilterFuncs) > 0 && options.ShareIdle == ShareNone {
+	if shouldFilter && options.ShareIdle == ShareNone {
 		idleFiltrationCoefficients, _, err = computeIdleCoeffs(options, as, shareSet)
 		if err != nil {
 			return fmt.Errorf("error computing idle filtration coefficients: %s", err)
@@ -1115,12 +1121,10 @@ func (as *AllocationSet) AggregateBy(aggregateBy []string, options *AllocationAg
 
 		skip := false
 
-		// (3) If any of the filter funcs fail, immediately skip the allocation.
-		for _, ff := range options.FilterFuncs {
-			if !ff(alloc) {
-				skip = true
-				break
-			}
+		// (3) If the allocation does not match the filter, immediately skip the
+		// allocation.
+		if options.Filter != nil {
+			skip = !options.Filter.Matches(alloc)
 		}
 		if skip {
 			// If we are tracking idle filtration coefficients, delete the
@@ -1305,11 +1309,8 @@ func (as *AllocationSet) AggregateBy(aggregateBy []string, options *AllocationAg
 	// aggregate if an exact match is found.
 	for _, alloc := range externalSet.allocations {
 		skip := false
-		for _, ff := range options.FilterFuncs {
-			if !ff(alloc) {
-				skip = true
-				break
-			}
+		if options.Filter != nil {
+			skip = !options.Filter.Matches(alloc)
 		}
 		if !skip {
 			key := alloc.generateKey(aggregateBy, options.LabelConfig)
@@ -1342,11 +1343,8 @@ func (as *AllocationSet) AggregateBy(aggregateBy []string, options *AllocationAg
 		for _, idleAlloc := range idleSet.allocations {
 			// if the idle does not apply to the non-filtered values, skip it
 			skip := false
-			for _, ff := range options.FilterFuncs {
-				if !ff(idleAlloc) {
-					skip = true
-					break
-				}
+			if options.Filter != nil {
+				skip = !options.Filter.Matches(idleAlloc)
 			}
 			if skip {
 				continue
@@ -1481,11 +1479,8 @@ func computeShareCoeffs(aggregateBy []string, options *AllocationAggregationOpti
 		// is removed. (Otherwise, all the shared cost will get redistributed
 		// over the unfiltered results, inflating their shared costs.)
 		filtered := false
-		for _, ff := range options.FilterFuncs {
-			if !ff(alloc) {
-				filtered = true
-				break
-			}
+		if options.Filter != nil {
+			filtered = !options.Filter.Matches(alloc)
 		}
 		if filtered {
 			name = "__filtered__"
@@ -1889,6 +1884,9 @@ func (as *AllocationSet) Map() map[string]*Allocation {
 
 // MarshalJSON JSON-encodes the AllocationSet
 func (as *AllocationSet) MarshalJSON() ([]byte, error) {
+	if as == nil {
+		return json.Marshal(map[string]*Allocation{})
+	}
 	as.RLock()
 	defer as.RUnlock()
 	return json.Marshal(as.allocations)
@@ -2224,6 +2222,10 @@ func (asr *AllocationSetRange) Length() int {
 
 // MarshalJSON JSON-encodes the range
 func (asr *AllocationSetRange) MarshalJSON() ([]byte, error) {
+	if asr == nil {
+		return json.Marshal([]*AllocationSet{})
+	}
+
 	asr.RLock()
 	defer asr.RUnlock()
 	return json.Marshal(asr.allocations)

+ 86 - 37
pkg/kubecost/allocation_test.go

@@ -6,8 +6,8 @@ import (
 	"testing"
 	"time"
 
-	"github.com/kubecost/opencost/pkg/util"
-	"github.com/kubecost/opencost/pkg/util/json"
+	"github.com/opencost/opencost/pkg/util"
+	"github.com/opencost/opencost/pkg/util/json"
 )
 
 func TestAllocation_Add(t *testing.T) {
@@ -750,13 +750,6 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 	}
 
 	// Filters
-	isCluster := func(matchCluster string) func(*Allocation) bool {
-		return func(a *Allocation) bool {
-			cluster := a.Properties.Cluster
-			return cluster == matchCluster
-		}
-	}
-
 	isNamespace := func(matchNamespace string) func(*Allocation) bool {
 		return func(a *Allocation) bool {
 			namespace := a.Properties.Namespace
@@ -1190,8 +1183,12 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 			start: start,
 			aggBy: []string{AllocationClusterProp},
 			aggOpts: &AllocationAggregationOptions{
-				FilterFuncs: []AllocationMatchFunc{isCluster("cluster1")},
-				ShareIdle:   ShareNone,
+				Filter: AllocationFilterCondition{
+					Field: FilterClusterID,
+					Op:    FilterEquals,
+					Value: "cluster1",
+				},
+				ShareIdle: ShareNone,
 			},
 			numResults: 1 + numIdle,
 			totalCost:  66.0,
@@ -1208,8 +1205,8 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 			start: start,
 			aggBy: []string{AllocationClusterProp},
 			aggOpts: &AllocationAggregationOptions{
-				FilterFuncs: []AllocationMatchFunc{isCluster("cluster1")},
-				ShareIdle:   ShareWeighted,
+				Filter:    AllocationFilterCondition{Field: FilterClusterID, Op: FilterEquals, Value: "cluster1"},
+				ShareIdle: ShareWeighted,
 			},
 			numResults: 1,
 			totalCost:  66.0,
@@ -1225,8 +1222,8 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 			start: start,
 			aggBy: []string{AllocationNamespaceProp},
 			aggOpts: &AllocationAggregationOptions{
-				FilterFuncs: []AllocationMatchFunc{isCluster("cluster1")},
-				ShareIdle:   ShareNone,
+				Filter:    AllocationFilterCondition{Field: FilterClusterID, Op: FilterEquals, Value: "cluster1"},
+				ShareIdle: ShareNone,
 			},
 			numResults: 2 + numIdle,
 			totalCost:  66.0,
@@ -1244,8 +1241,8 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 			start: start,
 			aggBy: []string{AllocationClusterProp},
 			aggOpts: &AllocationAggregationOptions{
-				FilterFuncs: []AllocationMatchFunc{isNamespace("namespace2")},
-				ShareIdle:   ShareNone,
+				Filter:    AllocationFilterCondition{Field: FilterNamespace, Op: FilterEquals, Value: "namespace2"},
+				ShareIdle: ShareNone,
 			},
 			numResults: numClusters + numIdle,
 			totalCost:  46.31,
@@ -1287,8 +1284,8 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 			start: start,
 			aggBy: []string{AllocationNamespaceProp},
 			aggOpts: &AllocationAggregationOptions{
-				FilterFuncs: []AllocationMatchFunc{isNamespace("namespace2")},
-				ShareIdle:   ShareWeighted,
+				Filter:    AllocationFilterCondition{Field: FilterNamespace, Op: FilterEquals, Value: "namespace2"},
+				ShareIdle: ShareWeighted,
 			},
 			numResults: 1,
 			totalCost:  46.31,
@@ -1312,7 +1309,7 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 			start: start,
 			aggBy: []string{AllocationNamespaceProp},
 			aggOpts: &AllocationAggregationOptions{
-				FilterFuncs:       []AllocationMatchFunc{isNamespace("namespace2")},
+				Filter:            AllocationFilterCondition{Field: FilterNamespace, Op: FilterEquals, Value: "namespace2"},
 				SharedHourlyCosts: map[string]float64{"total": sharedOverheadHourlyCost},
 				ShareSplit:        ShareWeighted,
 			},
@@ -1331,9 +1328,9 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 			start: start,
 			aggBy: []string{AllocationNamespaceProp},
 			aggOpts: &AllocationAggregationOptions{
-				FilterFuncs: []AllocationMatchFunc{isNamespace("namespace2")},
-				ShareFuncs:  []AllocationMatchFunc{isNamespace("namespace1")},
-				ShareSplit:  ShareWeighted,
+				Filter:     AllocationFilterCondition{Field: FilterNamespace, Op: FilterEquals, Value: "namespace2"},
+				ShareFuncs: []AllocationMatchFunc{isNamespace("namespace1")},
+				ShareSplit: ShareWeighted,
 			},
 			numResults: 1 + numIdle,
 			totalCost:  79.6667, // should be 74.7708, but I'm punting -- too difficult (NK)
@@ -1350,10 +1347,10 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 			start: start,
 			aggBy: []string{AllocationNamespaceProp},
 			aggOpts: &AllocationAggregationOptions{
-				FilterFuncs: []AllocationMatchFunc{isNamespace("namespace2")},
-				ShareFuncs:  []AllocationMatchFunc{isNamespace("namespace1")},
-				ShareSplit:  ShareWeighted,
-				ShareIdle:   ShareWeighted,
+				Filter:     AllocationFilterCondition{Field: FilterNamespace, Op: FilterEquals, Value: "namespace2"},
+				ShareFuncs: []AllocationMatchFunc{isNamespace("namespace1")},
+				ShareSplit: ShareWeighted,
+				ShareIdle:  ShareWeighted,
 			},
 			numResults: 1,
 			totalCost:  74.77083,
@@ -1456,10 +1453,10 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 			start: start,
 			aggBy: []string{AllocationNamespaceProp},
 			aggOpts: &AllocationAggregationOptions{
-				FilterFuncs: []AllocationMatchFunc{isNamespace("namespace2")},
-				ShareFuncs:  []AllocationMatchFunc{isNamespace("namespace1")},
-				ShareSplit:  ShareWeighted,
-				ShareIdle:   ShareWeighted,
+				Filter:     AllocationFilterCondition{Field: FilterNamespace, Op: FilterEquals, Value: "namespace2"},
+				ShareFuncs: []AllocationMatchFunc{isNamespace("namespace1")},
+				ShareSplit: ShareWeighted,
+				ShareIdle:  ShareWeighted,
 			},
 			numResults: 1,
 			totalCost:  74.77,
@@ -1502,7 +1499,7 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 			start: start,
 			aggBy: []string{AllocationNamespaceProp},
 			aggOpts: &AllocationAggregationOptions{
-				FilterFuncs:       []AllocationMatchFunc{isNamespace("namespace2")},
+				Filter:            AllocationFilterCondition{Field: FilterNamespace, Op: FilterEquals, Value: "namespace2"},
 				ShareSplit:        ShareWeighted,
 				ShareIdle:         ShareWeighted,
 				SharedHourlyCosts: map[string]float64{"total": sharedOverheadHourlyCost},
@@ -1568,9 +1565,9 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 			start: start,
 			aggBy: []string{AllocationNamespaceProp},
 			aggOpts: &AllocationAggregationOptions{
-				FilterFuncs: []AllocationMatchFunc{isNamespace("namespace2")},
-				ShareIdle:   ShareWeighted,
-				IdleByNode:  true,
+				Filter:     AllocationFilterCondition{Field: FilterNamespace, Op: FilterEquals, Value: "namespace2"},
+				ShareIdle:  ShareWeighted,
+				IdleByNode: true,
 			},
 			numResults: 1,
 			totalCost:  46.31,
@@ -2368,8 +2365,60 @@ func TestAllocationSetRange_InsertRange(t *testing.T) {
 // TODO niko/etl
 // func TestAllocationSetRange_Length(t *testing.T) {}
 
-// TODO niko/etl
-// func TestAllocationSetRange_MarshalJSON(t *testing.T) {}
+func TestAllocationSetRange_MarshalJSON(t *testing.T) {
+
+	tests := []struct {
+		name     string
+		arg      *AllocationSetRange
+		expected *AllocationSetRange
+	}{
+		{
+			name: "Nil ASR",
+			arg:  nil,
+		},
+		{
+			name: "Nil AS in ASR",
+			arg:  NewAllocationSetRange(nil),
+		},
+		{
+			name: "Normal ASR",
+			arg: &AllocationSetRange{
+				allocations: []*AllocationSet{
+					{
+						allocations: map[string]*Allocation{
+							"a": {
+								Start: time.Now().UTC().Truncate(day),
+							},
+						},
+					},
+				},
+			},
+		},
+	}
+
+	for _, test := range tests {
+
+		bytes, err := json.Marshal(test.arg)
+		if err != nil {
+			t.Fatalf("ASR Marshal: test %s, unexpected error: %s", test.name, err)
+		}
+
+		var testASR []*AllocationSet
+		marshaled := &testASR
+
+		err = json.Unmarshal(bytes, marshaled)
+
+		if err != nil {
+			t.Fatalf("ASR Unmarshal: test %s: unexpected error: %s", test.name, err)
+		}
+
+		if test.arg.Length() != len(testASR) {
+			t.Fatalf("ASR Unmarshal: test %s: length mutated in encoding: expected %d but got %d", test.name, test.arg.Length(), len(testASR))
+		}
+
+		// Allocations don't unmarshal back from json
+	}
+}
 
 // TODO niko/etl
 // func TestAllocationSetRange_Slice(t *testing.T) {}

+ 12 - 1
pkg/kubecost/allocationfilter.go

@@ -4,7 +4,7 @@ import (
 	"fmt"
 	"strings"
 
-	"github.com/kubecost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/log"
 )
 
 // FilterField is an enum that represents Allocation-specific fields that can be
@@ -417,3 +417,14 @@ func (or AllocationFilterOr) Matches(a *Allocation) bool {
 
 	return false
 }
+
+// AllocationFilterNone is a filter that matches no allocations. This is useful
+// for applications like authorization, where a user/group/role may be disallowed
+// from viewing Allocation data entirely.
+type AllocationFilterNone struct{}
+
+func (afn AllocationFilterNone) String() string { return "(none)" }
+
+func (afn AllocationFilterNone) Flattened() AllocationFilter { return afn }
+
+func (afn AllocationFilterNone) Matches(a *Allocation) bool { return false }

+ 165 - 0
pkg/kubecost/allocationfilter_test.go

@@ -58,6 +58,36 @@ func Test_AllocationFilterCondition_Matches(t *testing.T) {
 
 			expected: false,
 		},
+		{
+			name: "ClusterID empty StartsWith '' -> true",
+			a: &Allocation{
+				Properties: &AllocationProperties{
+					Cluster: "",
+				},
+			},
+			filter: AllocationFilterCondition{
+				Field: FilterClusterID,
+				Op:    FilterStartsWith,
+				Value: "",
+			},
+
+			expected: true,
+		},
+		{
+			name: "ClusterID nonempty StartsWith '' -> true",
+			a: &Allocation{
+				Properties: &AllocationProperties{
+					Cluster: "abc",
+				},
+			},
+			filter: AllocationFilterCondition{
+				Field: FilterClusterID,
+				Op:    FilterStartsWith,
+				Value: "",
+			},
+
+			expected: true,
+		},
 		{
 			name: "Node Equals -> true",
 			a: &Allocation{
@@ -585,6 +615,121 @@ func Test_AllocationFilterCondition_Matches(t *testing.T) {
 	}
 }
 
+func Test_AllocationFilterNone_Matches(t *testing.T) {
+	cases := []struct {
+		name string
+		a    *Allocation
+	}{
+		{
+			name: "nil",
+			a:    nil,
+		},
+		{
+			name: "nil properties",
+			a: &Allocation{
+				Properties: nil,
+			},
+		},
+		{
+			name: "empty properties",
+			a: &Allocation{
+				Properties: &AllocationProperties{},
+			},
+		},
+		{
+			name: "ClusterID",
+			a: &Allocation{
+				Properties: &AllocationProperties{
+					Cluster: "cluster-one",
+				},
+			},
+		},
+		{
+			name: "Node",
+			a: &Allocation{
+				Properties: &AllocationProperties{
+					Node: "node123",
+				},
+			},
+		},
+		{
+			name: "Namespace",
+			a: &Allocation{
+				Properties: &AllocationProperties{
+					Namespace: "kube-system",
+				},
+			},
+		},
+		{
+			name: "ControllerKind",
+			a: &Allocation{
+				Properties: &AllocationProperties{
+					ControllerKind: "deployment", // We generally store controller kinds as all lowercase
+				},
+			},
+		},
+		{
+			name: "ControllerName",
+			a: &Allocation{
+				Properties: &AllocationProperties{
+					Controller: "kc-cost-analyzer",
+				},
+			},
+		},
+		{
+			name: "Pod",
+			a: &Allocation{
+				Properties: &AllocationProperties{
+					Pod: "pod-123 UID-ABC",
+				},
+			},
+		},
+		{
+			name: "Container",
+			a: &Allocation{
+				Properties: &AllocationProperties{
+					Container: "cost-model",
+				},
+			},
+		},
+		{
+			name: `label`,
+			a: &Allocation{
+				Properties: &AllocationProperties{
+					Labels: map[string]string{
+						"app": "foo",
+					},
+				},
+			},
+		},
+		{
+			name: `annotation`,
+			a: &Allocation{
+				Properties: &AllocationProperties{
+					Annotations: map[string]string{
+						"prom_modified_name": "testing123",
+					},
+				},
+			},
+		},
+		{
+			name: `services`,
+			a: &Allocation{
+				Properties: &AllocationProperties{
+					Services: []string{"serv1", "serv2"},
+				},
+			},
+		},
+	}
+
+	for _, c := range cases {
+		result := AllocationFilterNone{}.Matches(c.a)
+
+		if result {
+			t.Errorf("%s: should have been rejected", c.name)
+		}
+	}
+}
 func Test_AllocationFilterAnd_Matches(t *testing.T) {
 	cases := []struct {
 		name   string
@@ -693,6 +838,21 @@ func Test_AllocationFilterAnd_Matches(t *testing.T) {
 			}},
 			expected: false,
 		},
+		{
+			name: `(and none) matches nothing`,
+			a: &Allocation{
+				Properties: &AllocationProperties{
+					Namespace: "kube-system",
+					Labels: map[string]string{
+						"app": "bar",
+					},
+				},
+			},
+			filter: AllocationFilterAnd{[]AllocationFilter{
+				AllocationFilterNone{},
+			}},
+			expected: false,
+		},
 	}
 
 	for _, c := range cases {
@@ -953,6 +1113,11 @@ func Test_AllocationFilter_Flattened(t *testing.T) {
 				},
 			}},
 		},
+		{
+			name:     "AllocationFilterNone",
+			input:    AllocationFilterNone{},
+			expected: AllocationFilterNone{},
+		},
 	}
 
 	for _, c := range cases {

+ 2 - 2
pkg/kubecost/allocationprops.go

@@ -5,8 +5,8 @@ import (
 	"sort"
 	"strings"
 
-	"github.com/kubecost/opencost/pkg/log"
-	"github.com/kubecost/opencost/pkg/prom"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/prom"
 )
 
 const (

+ 16 - 6
pkg/kubecost/asset.go

@@ -7,8 +7,8 @@ import (
 	"sync"
 	"time"
 
-	"github.com/kubecost/opencost/pkg/log"
-	"github.com/kubecost/opencost/pkg/util/json"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util/json"
 )
 
 // UndefinedKey is used in composing Asset group keys if the group does not have that property defined.
@@ -856,6 +856,7 @@ type ClusterManagement struct {
 	labels     AssetLabels
 	properties *AssetProperties
 	window     Window
+	adjustment float64
 	Cost       float64
 }
 
@@ -902,17 +903,17 @@ func (cm *ClusterManagement) SetLabels(props AssetLabels) {
 
 // Adjustment does not apply to ClusterManagement
 func (cm *ClusterManagement) Adjustment() float64 {
-	return 0.0
+	return cm.adjustment
 }
 
 // SetAdjustment does not apply to ClusterManagement
-func (cm *ClusterManagement) SetAdjustment(float64) {
-	return
+func (cm *ClusterManagement) SetAdjustment(adj float64) {
+	cm.adjustment = adj
 }
 
 // TotalCost returns the Asset's total cost
 func (cm *ClusterManagement) TotalCost() float64 {
-	return cm.Cost
+	return cm.Cost + cm.adjustment
 }
 
 // Start returns the Asset's precise start time within the window
@@ -991,6 +992,7 @@ func (cm *ClusterManagement) add(that *ClusterManagement) {
 	cm.window = window
 	cm.SetProperties(props)
 	cm.SetLabels(labels)
+	cm.adjustment += that.adjustment
 	cm.Cost += that.Cost
 }
 
@@ -1000,6 +1002,7 @@ func (cm *ClusterManagement) Clone() Asset {
 		labels:     cm.labels.Clone(),
 		properties: cm.properties.Clone(),
 		window:     cm.window.Clone(),
+		adjustment: cm.adjustment,
 		Cost:       cm.Cost,
 	}
 }
@@ -1022,6 +1025,10 @@ func (cm *ClusterManagement) Equal(a Asset) bool {
 		return false
 	}
 
+	if cm.adjustment != that.adjustment {
+		return false
+	}
+
 	if cm.Cost != that.Cost {
 		return false
 	}
@@ -3090,6 +3097,9 @@ func (asr *AssetSetRange) IsEmpty() bool {
 }
 
 func (asr *AssetSetRange) MarshalJSON() ([]byte, error) {
+	if asr == nil {
+		return json.Marshal([]*AssetSet{})
+	}
 	asr.RLock()
 	defer asr.RUnlock()
 	return json.Marshal(asr.assets)

+ 56 - 1
pkg/kubecost/asset_test.go

@@ -7,7 +7,7 @@ import (
 	"testing"
 	"time"
 
-	"github.com/kubecost/opencost/pkg/util"
+	"github.com/opencost/opencost/pkg/util"
 )
 
 var start1 = time.Date(2020, time.January, 1, 0, 0, 0, 0, time.UTC)
@@ -1424,3 +1424,58 @@ func TestAssetSetRange_Minutes(t *testing.T) {
 		}
 	}
 }
+
+func TestAssetSetRange_MarshalJSON(t *testing.T) {
+
+	tests := []struct {
+		name     string
+		arg      *AssetSetRange
+		expected *AssetSetRange
+	}{
+		{
+			name: "Nil ASR",
+			arg:  nil,
+		},
+		{
+			name: "Nil AS in ASR",
+			arg:  NewAssetSetRange(nil),
+		},
+		{
+			name: "Normal ASR",
+			arg: &AssetSetRange{
+				assets: []*AssetSet{
+					{
+						assets: map[string]Asset{
+							"a": &Any{
+								start: time.Now().UTC().Truncate(day),
+							},
+						},
+					},
+				},
+			},
+		},
+	}
+
+	for _, test := range tests {
+
+		bytes, err := json.Marshal(test.arg)
+		if err != nil {
+			t.Fatalf("ASR Marshal: test %s, unexpected error: %s", test.name, err)
+		}
+
+		var testASR []*AssetSet
+		marshaled := &testASR
+
+		err = json.Unmarshal(bytes, marshaled)
+
+		if err != nil {
+			t.Fatalf("ASR Unmarshal: test %s: unexpected error: %s", test.name, err)
+		}
+
+		if test.arg.Length() != len(testASR) {
+			t.Fatalf("ASR Unmarshal: test %s: length mutated in encoding: expected %d but got %d", test.name, test.arg.Length(), len(testASR))
+		}
+
+		// asset don't unmarshal back from json
+	}
+}

+ 4 - 1
pkg/kubecost/asset_unmarshal.go

@@ -6,7 +6,7 @@ import (
 	"reflect"
 	"time"
 
-	"github.com/kubecost/opencost/pkg/util/json"
+	"github.com/opencost/opencost/pkg/util/json"
 )
 
 // Encoding and decoding logic for Asset types
@@ -709,6 +709,9 @@ func (sa *SharedAsset) InterfaceToSharedAsset(itf interface{}) error {
 
 // MarshalJSON JSON-encodes the AssetSet
 func (as *AssetSet) MarshalJSON() ([]byte, error) {
+	if as == nil {
+		return json.Marshal(map[string]Asset{})
+	}
 	as.RLock()
 	defer as.RUnlock()
 	return json.Marshal(as.assets)

+ 1 - 1
pkg/kubecost/asset_unmarshal_test.go

@@ -1,7 +1,7 @@
 package kubecost
 
 import (
-	"github.com/kubecost/opencost/pkg/util/json"
+	"github.com/opencost/opencost/pkg/util/json"
 
 	"testing"
 	"time"

+ 363 - 0
pkg/kubecost/audit.go

@@ -0,0 +1,363 @@
+package kubecost
+
+import (
+	"golang.org/x/exp/slices"
+	"sync"
+	"time"
+)
+
+// AuditType the types of Audits, each of which should be contained in an AuditSet
+type AuditType string
+
+const (
+	AuditAllocationReconciliation AuditType = "AuditAllocationReconciliation"
+	AuditAllocationTotalStore     AuditType = "AuditAllocationTotalStore"
+	AuditAllocationAggStore       AuditType = "AuditAllocationAggStore"
+	AuditAssetReconciliation      AuditType = "AuditAssetReconciliation"
+	AuditAssetTotalStore          AuditType = "AuditAssetTotalStore"
+	AuditAssetAggStore            AuditType = "AuditAssetAggStore"
+	AuditClusterEquality          AuditType = "AuditClusterEquality"
+
+	AuditAll         AuditType = ""
+	AuditInvalidType AuditType = "InvalidType"
+)
+
+// ToAuditType converts a string to an Audit type
+func ToAuditType(check string) AuditType {
+	switch check {
+	case string(AuditAllocationReconciliation):
+		return AuditAllocationReconciliation
+	case string(AuditAllocationTotalStore):
+		return AuditAllocationTotalStore
+	case string(AuditAllocationAggStore):
+		return AuditAllocationAggStore
+	case string(AuditAssetReconciliation):
+		return AuditAssetReconciliation
+	case string(AuditAssetTotalStore):
+		return AuditAssetTotalStore
+	case string(AuditAssetAggStore):
+		return AuditAssetAggStore
+	//case string(AuditClusterEquality):
+	//	return AuditClusterEquality
+	case string(AuditAll):
+		return AuditAll
+	default:
+		return AuditInvalidType
+	}
+}
+
+// AuditStatus are possible outcomes of an audit
+type AuditStatus string
+
+const (
+	FailedStatus  AuditStatus = "Failed"
+	WarningStatus             = "Warning"
+	PassedStatus              = "Passed"
+)
+
+// AuditMissingValue records when a value that should be present in a store or in the audit generated results are missing
+type AuditMissingValue struct {
+	Description string
+	Key         string
+}
+
+// AuditFloatResult structure for holding the results of a failed audit on a float value, Expected should be the value
+// calculated by the Audit func while Actual is what is contained in the relevant store.
+type AuditFloatResult struct {
+	Expected float64
+	Actual   float64
+}
+
+// Clone returns a deep copy of the caller
+func (afr *AuditFloatResult) Clone() *AuditFloatResult {
+	return &AuditFloatResult{
+		Expected: afr.Expected,
+		Actual:   afr.Actual,
+	}
+}
+
+// AllocationReconciliationAudit records the differences of between compute resources (cpu, ram, gpu) costs between
+// allocations by nodes and node assets keyed on node name and compute resource
+type AllocationReconciliationAudit struct {
+	Status        AuditStatus
+	Description   string
+	LastRun       time.Time
+	Resources     map[string]map[string]*AuditFloatResult
+	MissingValues []*AuditMissingValue
+}
+
+// Clone returns a deep copy of the caller
+func (ara *AllocationReconciliationAudit) Clone() *AllocationReconciliationAudit {
+	if ara == nil {
+		return nil
+	}
+
+	resources := make(map[string]map[string]*AuditFloatResult, len(ara.Resources))
+	for node, resourceMap := range ara.Resources {
+		copyResourceMap := make(map[string]*AuditFloatResult, len(resourceMap))
+		for resourceName, val := range resourceMap {
+			copyResourceMap[resourceName] = val.Clone()
+		}
+		resources[node] = copyResourceMap
+	}
+	return &AllocationReconciliationAudit{
+		Status:        ara.Status,
+		Description:   ara.Description,
+		LastRun:       ara.LastRun,
+		Resources:     resources,
+		MissingValues: slices.Clone(ara.MissingValues),
+	}
+}
+
+// TotalAudit records the differences between a total store and the totaled results of the store that it is based on
+// keyed by cluster and node names
+type TotalAudit struct {
+	Status         AuditStatus
+	Description    string
+	LastRun        time.Time
+	TotalByNode    map[string]*AuditFloatResult
+	TotalByCluster map[string]*AuditFloatResult
+	MissingValues  []*AuditMissingValue
+}
+
+// Clone returns a deep copy of the caller
+func (ta *TotalAudit) Clone() *TotalAudit {
+	if ta == nil {
+		return nil
+	}
+
+	tbn := make(map[string]*AuditFloatResult, len(ta.TotalByNode))
+	for k, v := range ta.TotalByNode {
+		tbn[k] = v
+	}
+	tbc := make(map[string]*AuditFloatResult, len(ta.TotalByNode))
+	for k, v := range ta.TotalByCluster {
+		tbc[k] = v
+	}
+
+	return &TotalAudit{
+		Status:         ta.Status,
+		Description:    ta.Description,
+		LastRun:        ta.LastRun,
+		TotalByNode:    tbn,
+		TotalByCluster: tbc,
+		MissingValues:  slices.Clone(ta.MissingValues),
+	}
+}
+
+// AggAudit contains the results of an Audit on an AggStore keyed on aggregation prop and Allocation key
+type AggAudit struct {
+	Status        AuditStatus
+	Description   string
+	LastRun       time.Time
+	Results       map[string]map[string]*AuditFloatResult
+	MissingValues []*AuditMissingValue
+}
+
+// Clone returns a deep copy of the caller
+func (aa *AggAudit) Clone() *AggAudit {
+	if aa == nil {
+		return nil
+	}
+	res := make(map[string]map[string]*AuditFloatResult, len(aa.Results))
+	for aggType, aggResults := range aa.Results {
+		copyAggResult := make(map[string]*AuditFloatResult, len(aggResults))
+		for aggName, auditFloatResult := range aggResults {
+			copyAggResult[aggName] = auditFloatResult
+		}
+		res[aggType] = copyAggResult
+	}
+
+	return &AggAudit{
+		Status:        aa.Status,
+		Description:   aa.Description,
+		LastRun:       aa.LastRun,
+		Results:       res,
+		MissingValues: slices.Clone(aa.MissingValues),
+	}
+}
+
+// AssetReconciliationAudit records differences in assets and the Cloud
+type AssetReconciliationAudit struct {
+	Status        AuditStatus
+	Description   string
+	LastRun       time.Time
+	Results       map[string]map[string]*AuditFloatResult
+	MissingValues []*AuditMissingValue
+}
+
+// Clone returns a deep copy of the caller
+func (ara *AssetReconciliationAudit) Clone() *AssetReconciliationAudit {
+	res := make(map[string]map[string]*AuditFloatResult, len(ara.Results))
+	for aggType, aggResults := range ara.Results {
+		copyAggResult := make(map[string]*AuditFloatResult, len(aggResults))
+		for aggName, auditFloatResult := range aggResults {
+			copyAggResult[aggName] = auditFloatResult
+		}
+		res[aggType] = copyAggResult
+	}
+
+	return &AssetReconciliationAudit{
+		Status:        ara.Status,
+		Description:   ara.Description,
+		LastRun:       ara.LastRun,
+		Results:       res,
+		MissingValues: slices.Clone(ara.MissingValues),
+	}
+}
+
+// EqualityAudit records the difference in cost between Allocations and Assets aggregated by cluster and keyed on cluster
+type EqualityAudit struct {
+	Status        AuditStatus
+	Description   string
+	LastRun       time.Time
+	Clusters      map[string]*AuditFloatResult
+	MissingValues []*AuditMissingValue
+}
+
+// Clone returns a deep copy of the caller
+func (ea *EqualityAudit) Clone() *EqualityAudit {
+	if ea == nil {
+		return nil
+	}
+	clusters := make(map[string]*AuditFloatResult, len(ea.Clusters))
+	for k, v := range ea.Clusters {
+		clusters[k] = v
+	}
+	return &EqualityAudit{
+		Status:        ea.Status,
+		Description:   ea.Description,
+		LastRun:       ea.LastRun,
+		Clusters:      clusters,
+		MissingValues: slices.Clone(ea.MissingValues),
+	}
+}
+
+// AuditCoverage tracks coverage of each audit type
+type AuditCoverage struct {
+	sync.RWMutex
+	AllocationReconciliation Window `json:"allocationReconciliation"`
+	AllocationAgg            Window `json:"allocationAgg"`
+	AllocationTotal          Window `json:"allocationTotal"`
+	AssetTotal               Window `json:"assetTotal"`
+	AssetReconciliation      Window `json:"assetReconciliation"`
+	ClusterEquality          Window `json:"clusterEquality"`
+}
+
+// NewAuditCoverage create default AuditCoverage
+func NewAuditCoverage() *AuditCoverage {
+	return &AuditCoverage{}
+}
+
+// Update expands the coverage of each Window in the coverage that the given AuditSet's Window if the corresponding Audit is not nil
+// Note: This means of determining coverage can lead to holes in the given window
+func (ac *AuditCoverage) Update(as *AuditSet) {
+	if as != nil && as.AllocationReconciliation != nil {
+		ac.AllocationReconciliation.Expand(as.Window)
+		ac.AllocationAgg.Expand(as.Window)
+		ac.AllocationTotal.Expand(as.Window)
+		ac.AssetTotal.Expand(as.Window)
+		ac.AssetReconciliation.Expand(as.Window)
+		ac.ClusterEquality.Expand(as.Window)
+	}
+
+}
+
+// AuditSet is a ETLSet which contains all kind of Audits for a given Window
+type AuditSet struct {
+	sync.RWMutex
+	AllocationReconciliation *AllocationReconciliationAudit `json:"allocationReconciliation"`
+	AllocationAgg            *AggAudit                      `json:"allocationAgg"`
+	AllocationTotal          *TotalAudit                    `json:"allocationTotal"`
+	AssetTotal               *TotalAudit                    `json:"assetTotal"`
+	AssetReconciliation      *AssetReconciliationAudit      `json:"assetReconciliation"`
+	ClusterEquality          *EqualityAudit                 `json:"clusterEquality"`
+	Window                   Window                         `json:"window"`
+}
+
+// NewAuditSet creates an empty AuditSet with the given window
+func NewAuditSet(start, end time.Time) *AuditSet {
+	return &AuditSet{
+		Window: NewWindow(&start, &end),
+	}
+}
+
+// UpdateAuditSet overwrites any audit fields in the caller with those in the given AuditSet which are not nil
+func (as *AuditSet) UpdateAuditSet(that *AuditSet) *AuditSet {
+	if as == nil {
+		return that
+	}
+
+	if that.AllocationReconciliation != nil {
+		as.AllocationReconciliation = that.AllocationReconciliation
+	}
+	if that.AllocationAgg != nil {
+		as.AllocationAgg = that.AllocationAgg
+	}
+	if that.AllocationTotal != nil {
+		as.AllocationTotal = that.AllocationTotal
+	}
+	if that.AssetTotal != nil {
+		as.AssetTotal = that.AssetTotal
+	}
+	if that.AssetReconciliation != nil {
+		as.AssetReconciliation = that.AssetReconciliation
+	}
+
+	if that.ClusterEquality != nil {
+		as.ClusterEquality = that.ClusterEquality
+	}
+
+	return as
+}
+
+// ConstructSet fulfills the ETLSet interface to provide an empty version of itself so that it can be initialized in its
+// generic form.
+func (as *AuditSet) ConstructSet() ETLSet {
+	return &AuditSet{}
+}
+
+// IsEmpty returns true if any of the audits are non-nil
+func (as *AuditSet) IsEmpty() bool {
+	return as == nil || (as.AllocationReconciliation == nil &&
+		as.AllocationAgg == nil &&
+		as.AllocationTotal == nil &&
+		as.AssetTotal == nil &&
+		as.AssetReconciliation == nil &&
+		as.ClusterEquality == nil)
+}
+
+// GetWindow returns AuditSet Window
+func (as *AuditSet) GetWindow() Window {
+	return as.Window
+}
+
+// Clone returns a deep copy of the caller
+func (as *AuditSet) Clone() *AuditSet {
+	if as == nil {
+		return nil
+	}
+
+	as.RLock()
+	defer as.RUnlock()
+
+	return &AuditSet{
+		AllocationReconciliation: as.AllocationReconciliation.Clone(),
+		AllocationAgg:            as.AllocationAgg.Clone(),
+		AllocationTotal:          as.AllocationTotal.Clone(),
+		AssetTotal:               as.AssetTotal.Clone(),
+		AssetReconciliation:      as.AssetReconciliation.Clone(),
+		ClusterEquality:          as.ClusterEquality.Clone(),
+		Window:                   as.Window.Clone(),
+	}
+}
+
+// CloneSet returns a deep copy of the caller and returns set
+func (as *AuditSet) CloneSet() ETLSet {
+	return as.Clone()
+}
+
+// AuditSetRange SetRange of AuditSets
+type AuditSetRange struct {
+	SetRange[*AuditSet]
+}

+ 15 - 1
pkg/kubecost/bingen.go

@@ -57,4 +57,18 @@ package kubecost
 // @bingen:generate:PVAllocation
 // @bingen:end
 
-//go:generate bingen -package=kubecost -version=15 -buffer=github.com/kubecost/opencost/pkg/util
+// @bingen:set[name=Audit,version=1]
+// @bingen:generate:AllocationReconciliationAudit
+// @bingen:generate:TotalAudit
+// @bingen:generate:AggAudit
+// @bingen:generate:AuditFloatResult
+// @bingen:generate:AuditMissingValue
+// @bingen:generate:AssetReconciliationAudit
+// @bingen:generate:EqualityAudit
+// @bingen:generate:AuditType
+// @bingen:generate:AuditStatus
+// @bingen:generate[stringtable]:AuditSet
+// @bingen:generate:AuditSetRange
+// @bingen:end
+
+//go:generate bingen -package=kubecost -version=15 -buffer=github.com/opencost/opencost/pkg/util

+ 2 - 2
pkg/kubecost/config.go

@@ -4,8 +4,8 @@ import (
 	"fmt"
 	"strings"
 
-	"github.com/kubecost/opencost/pkg/prom"
-	"github.com/kubecost/opencost/pkg/util/cloudutil"
+	"github.com/opencost/opencost/pkg/prom"
+	"github.com/opencost/opencost/pkg/util/cloudutil"
 )
 
 // LabelConfig is a port of type AnalyzerConfig. We need to be more thoughtful

+ 1 - 1
pkg/kubecost/config_test.go

@@ -3,7 +3,7 @@ package kubecost
 import (
 	"testing"
 
-	"github.com/kubecost/opencost/pkg/util/cloudutil"
+	"github.com/opencost/opencost/pkg/util/cloudutil"
 )
 
 func TestLabelConfig_Map(t *testing.T) {

+ 90 - 0
pkg/kubecost/etlrange.go

@@ -0,0 +1,90 @@
+package kubecost
+
+import (
+	"fmt"
+	"sync"
+
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+// SetRange is a generic implementation of the SetRanges that act as containers. It covers the basic functionality that
+// is shared by the basic types but is meant to be extended by each implementation.
+type SetRange[T ETLSet] struct {
+	lock sync.RWMutex
+	sets []T
+}
+
+// Append attaches the given ETLSet to the end of the sets slice.
+// currently does not check that the window is correct.
+func (r *SetRange[T]) Append(that T) {
+	if r == nil {
+		return
+	}
+	r.lock.Lock()
+	defer r.lock.Unlock()
+	r.sets = append(r.sets, that)
+}
+
+// Each invokes the given function for each ETLSet in the SetRange
+func (r *SetRange[T]) Each(f func(int, T)) {
+	if r == nil {
+		return
+	}
+
+	for i, set := range r.sets {
+		f(i, set)
+	}
+}
+
+// Get retrieves the given index from the sets slice
+func (r *SetRange[T]) Get(i int) (T, error) {
+	var set T
+	if r == nil {
+		return set, fmt.Errorf("SetRange: Get: is nil")
+	}
+	if i < 0 || i >= len(r.sets) {
+
+		return set, fmt.Errorf("SetRange: Get: index out of range: %d", i)
+	}
+
+	r.lock.RLock()
+	defer r.lock.RUnlock()
+	return r.sets[i], nil
+}
+
+// Length returns the length of the sets slice
+func (r *SetRange[T]) Length() int {
+	if r == nil || r.sets == nil {
+		return 0
+	}
+
+	r.lock.RLock()
+	defer r.lock.RUnlock()
+	return len(r.sets)
+}
+
+// IsEmpty returns false if SetRange contains a single ETLSet that is not empty
+func (r *SetRange[T]) IsEmpty() bool {
+	if r == nil || r.Length() == 0 {
+		return true
+	}
+	r.lock.RLock()
+	defer r.lock.RUnlock()
+
+	for _, set := range r.sets {
+		if !set.IsEmpty() {
+			return false
+		}
+	}
+	return true
+}
+
+// MarshalJSON converts SetRange to JSON
+func (r *SetRange[T]) MarshalJSON() ([]byte, error) {
+	if r == nil {
+		return json.Marshal([]T{})
+	}
+	r.lock.RLock()
+	defer r.lock.RUnlock()
+	return json.Marshal(r.sets)
+}

+ 15 - 0
pkg/kubecost/etlset.go

@@ -0,0 +1,15 @@
+package kubecost
+
+import "encoding"
+
+// ETLSet is an interface which represents the basic data block of an ETL. It is keyed by its Window
+type ETLSet interface {
+	ConstructSet() ETLSet
+	CloneSet() ETLSet
+	IsEmpty() bool
+	GetWindow() Window
+
+	// Representations
+	encoding.BinaryMarshaler
+	encoding.BinaryUnmarshaler
+}

+ 1 - 1
pkg/kubecost/json.go

@@ -5,7 +5,7 @@ import (
 	"fmt"
 	"math"
 
-	"github.com/kubecost/opencost/pkg/util/json"
+	"github.com/opencost/opencost/pkg/util/json"
 )
 
 // TODO move everything below to a separate package

Разница между файлами не показана из-за своего большого размера
+ 1136 - 65
pkg/kubecost/kubecost_codecs.go


+ 1 - 1
pkg/kubecost/query.go

@@ -39,7 +39,7 @@ type AllocationQueryOptions struct {
 	AggregateBy             []string
 	Compute                 bool
 	DisableAggregatedStores bool
-	FilterFuncs             []AllocationMatchFunc
+	Filter                  AllocationFilter
 	IdleByNode              bool
 	IncludeExternal         bool
 	IncludeIdle             bool

+ 22 - 23
pkg/kubecost/summaryallocation.go

@@ -7,7 +7,7 @@ import (
 	"sync"
 	"time"
 
-	"github.com/kubecost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/log"
 )
 
 // SummaryAllocation summarizes an Allocation, keeping only fields necessary
@@ -301,15 +301,21 @@ type SummaryAllocationSet struct {
 // required for unfortunate reasons to do with performance and legacy order-of-
 // operations details, as well as the fact that reconciliation has been
 // pushed down to the conversion step between Allocation and SummaryAllocation.
-func NewSummaryAllocationSet(as *AllocationSet, ffs, kfs []AllocationMatchFunc, reconcile, reconcileNetwork bool) *SummaryAllocationSet {
+func NewSummaryAllocationSet(as *AllocationSet, filter AllocationFilter, kfs []AllocationMatchFunc, reconcile, reconcileNetwork bool) *SummaryAllocationSet {
 	if as == nil {
 		return nil
 	}
 
+	// Pre-flatten the filter so we can just check == nil to see if there are
+	// filters.
+	if filter != nil {
+		filter = filter.Flattened()
+	}
+
 	// If we can know the exact size of the map, use it. If filters or sharing
 	// functions are present, we can't know the size, so we make a default map.
 	var sasMap map[string]*SummaryAllocation
-	if len(ffs) == 0 && len(kfs) == 0 {
+	if filter == nil && len(kfs) == 0 {
 		// No filters, so make the map of summary allocations exactly the size
 		// of the origin allocation set.
 		sasMap = make(map[string]*SummaryAllocation, len(as.allocations))
@@ -342,16 +348,8 @@ func NewSummaryAllocationSet(as *AllocationSet, ffs, kfs []AllocationMatchFunc,
 
 		// If the allocation does not pass any of the given filter functions,
 		// do not insert it into the set.
-		shouldFilter := false
-		for _, ff := range ffs {
-			if !ff(alloc) {
-				shouldFilter = true
-				break
-			}
-		}
-		if shouldFilter {
+		if filter != nil && !filter.Matches(alloc) {
 			continue
-
 		}
 
 		err := sas.Insert(NewSummaryAllocation(alloc, reconcile, reconcileNetwork))
@@ -475,6 +473,12 @@ func (sas *SummaryAllocationSet) AggregateBy(aggregateBy []string, options *Allo
 		options.LabelConfig = NewLabelConfig()
 	}
 
+	// Pre-flatten the filter so we can just check == nil to see if there are
+	// filters.
+	if options.Filter != nil {
+		options.Filter = options.Filter.Flattened()
+	}
+
 	// Check if we have any work to do; if not, then early return. If
 	// aggregateBy is nil, we don't aggregate anything. On the other hand,
 	// an empty slice implies that we should aggregate everything. (See
@@ -672,7 +676,7 @@ func (sas *SummaryAllocationSet) AggregateBy(aggregateBy []string, options *Allo
 	// recorded by idle-key (cluster or node, depending on the IdleByNode
 	// option). Instantiating this map is a signal to record the totals.
 	var allocTotalsAfterFilters map[string]*AllocationTotals
-	if len(resultSet.idleKeys) > 0 && len(options.FilterFuncs) > 0 {
+	if len(resultSet.idleKeys) > 0 && options.Filter != nil {
 		allocTotalsAfterFilters = make(map[string]*AllocationTotals, len(resultSet.idleKeys))
 	}
 
@@ -961,11 +965,9 @@ func (sas *SummaryAllocationSet) AggregateBy(aggregateBy []string, options *Allo
 		// be filtered or not.
 		// TODO:CLEANUP do something about external cost, this stinks
 		ea := &Allocation{Properties: sa.Properties}
-		for _, ff := range options.FilterFuncs {
-			if !ff(ea) {
-				skip = true
-				break
-			}
+
+		if options.Filter != nil {
+			skip = !options.Filter.Matches(ea)
 		}
 
 		if !skip {
@@ -987,11 +989,8 @@ func (sas *SummaryAllocationSet) AggregateBy(aggregateBy []string, options *Allo
 		// be filtered or not.
 		// TODO:CLEANUP do something about external cost, this stinks
 		ia := &Allocation{Properties: isa.Properties}
-		for _, ff := range options.FilterFuncs {
-			if !ff(ia) {
-				skip = true
-				break
-			}
+		if options.Filter != nil {
+			skip = !options.Filter.Matches(ia)
 		}
 		if skip {
 			continue

+ 1 - 1
pkg/kubecost/summaryallocation_test.go

@@ -4,7 +4,7 @@ import (
 	"testing"
 	"time"
 
-	"github.com/kubecost/opencost/pkg/util"
+	"github.com/opencost/opencost/pkg/util"
 )
 
 func TestSummaryAllocation_Add(t *testing.T) {

+ 4 - 3
pkg/kubecost/totals.go

@@ -6,7 +6,7 @@ import (
 	"strconv"
 	"time"
 
-	"github.com/kubecost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/log"
 	"github.com/patrickmn/go-cache"
 )
 
@@ -411,7 +411,7 @@ func ComputeAssetTotals(as *AssetSet, prop AssetProperty) map[string]*AssetTotal
 
 			arts[key].Count++
 			arts[key].LoadBalancerCost += lb.Cost
-			arts[key].LoadBalancerCost += lb.adjustment
+			arts[key].LoadBalancerCostAdjustment += lb.adjustment
 		} else if cm, ok := asset.(*ClusterManagement); ok && prop == AssetClusterProp {
 			// Only record cluster management when prop is Cluster because we
 			// can't break down ClusterManagement by node.
@@ -426,7 +426,8 @@ func ComputeAssetTotals(as *AssetSet, prop AssetProperty) map[string]*AssetTotal
 			}
 
 			arts[key].Count++
-			arts[key].ClusterManagementCost += cm.TotalCost()
+			arts[key].ClusterManagementCost += cm.Cost
+			arts[key].ClusterManagementCostAdjustment += cm.adjustment
 		} else if disk, ok := asset.(*Disk); ok {
 			// Record disks in an intermediate structure, which will be
 			// processed after all assets have been seen.

+ 26 - 15
pkg/kubecost/window.go

@@ -8,10 +8,10 @@ import (
 	"strconv"
 	"time"
 
-	"github.com/kubecost/opencost/pkg/util/timeutil"
+	"github.com/opencost/opencost/pkg/util/timeutil"
 
-	"github.com/kubecost/opencost/pkg/env"
-	"github.com/kubecost/opencost/pkg/thanos"
+	"github.com/opencost/opencost/pkg/env"
+	"github.com/opencost/opencost/pkg/thanos"
 )
 
 const (
@@ -20,6 +20,15 @@ const (
 	hoursPerDay    = 24
 )
 
+var (
+	durationRegex       = regexp.MustCompile(`^(\d+)(m|h|d)$`)
+	durationOffsetRegex = regexp.MustCompile(`^(\d+)(m|h|d) offset (\d+)(m|h|d)$`)
+	offesetRegex        = regexp.MustCompile(`^(\+|-)(\d\d):(\d\d)$`)
+	rfc3339             = `\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\dZ`
+	rfcRegex            = regexp.MustCompile(fmt.Sprintf(`(%s),(%s)`, rfc3339, rfc3339))
+	timestampPairRegex  = regexp.MustCompile(`^(\d+)[,|-](\d+)$`)
+)
+
 // RoundBack rounds the given time back to a multiple of the given resolution
 // in the given time's timezone.
 // e.g. 2020-01-01T12:37:48-0700, 24h = 2020-01-01T00:00:00-0700
@@ -82,8 +91,7 @@ func ParseWindowWithOffsetString(window string, offset string) (Window, error) {
 		return ParseWindowUTC(window)
 	}
 
-	regex := regexp.MustCompile(`^(\+|-)(\d\d):(\d\d)$`)
-	match := regex.FindStringSubmatch(offset)
+	match := offesetRegex.FindStringSubmatch(offset)
 	if match == nil {
 		return Window{}, fmt.Errorf("illegal UTC offset: '%s'; should be of form '-07:00'", offset)
 	}
@@ -215,8 +223,7 @@ func parseWindow(window string, now time.Time) (Window, error) {
 	}
 
 	// Match duration strings; e.g. "45m", "24h", "7d"
-	regex := regexp.MustCompile(`^(\d+)(m|h|d)$`)
-	match := regex.FindStringSubmatch(window)
+	match := durationRegex.FindStringSubmatch(window)
 	if match != nil {
 		dur := time.Minute
 		if match[2] == "h" {
@@ -235,8 +242,7 @@ func parseWindow(window string, now time.Time) (Window, error) {
 	}
 
 	// Match duration strings with offset; e.g. "45m offset 15m", etc.
-	regex = regexp.MustCompile(`^(\d+)(m|h|d) offset (\d+)(m|h|d)$`)
-	match = regex.FindStringSubmatch(window)
+	match = durationOffsetRegex.FindStringSubmatch(window)
 	if match != nil {
 		end := now
 
@@ -268,8 +274,7 @@ func parseWindow(window string, now time.Time) (Window, error) {
 	}
 
 	// Match timestamp pairs, e.g. "1586822400,1586908800" or "1586822400-1586908800"
-	regex = regexp.MustCompile(`^(\d+)[,|-](\d+)$`)
-	match = regex.FindStringSubmatch(window)
+	match = timestampPairRegex.FindStringSubmatch(window)
 	if match != nil {
 		s, _ := strconv.ParseInt(match[1], 10, 64)
 		e, _ := strconv.ParseInt(match[2], 10, 64)
@@ -279,9 +284,7 @@ func parseWindow(window string, now time.Time) (Window, error) {
 	}
 
 	// Match RFC3339 pairs, e.g. "2020-04-01T00:00:00Z,2020-04-03T00:00:00Z"
-	rfc3339 := `\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\dZ`
-	regex = regexp.MustCompile(fmt.Sprintf(`(%s),(%s)`, rfc3339, rfc3339))
-	match = regex.FindStringSubmatch(window)
+	match = rfcRegex.FindStringSubmatch(window)
 	if match != nil {
 		start, _ := time.Parse(time.RFC3339, match[1])
 		end, _ := time.Parse(time.RFC3339, match[2])
@@ -455,14 +458,22 @@ func (w Window) Hours() float64 {
 	return w.end.Sub(*w.start).Hours()
 }
 
+//IsEmpty a Window is empty if it does not have a start and an end
 func (w Window) IsEmpty() bool {
-	return !w.IsOpen() && w.end.Equal(*w.Start())
+	return w.start == nil && w.end == nil
+}
+
+//HasDuration a Window has duration if neither start and end are not nil and not equal
+func (w Window) HasDuration() bool {
+	return !w.IsOpen() && !w.end.Equal(*w.Start())
 }
 
+//IsNegative a Window is negative if start and end are not null and end is before start
 func (w Window) IsNegative() bool {
 	return !w.IsOpen() && w.end.Before(*w.Start())
 }
 
+//IsOpen a Window is open if it has a nil start or end
 func (w Window) IsOpen() bool {
 	return w.start == nil || w.end == nil
 }

+ 10 - 1
pkg/kubecost/window_test.go

@@ -6,7 +6,7 @@ import (
 	"testing"
 	"time"
 
-	"github.com/kubecost/opencost/pkg/env"
+	"github.com/opencost/opencost/pkg/env"
 )
 
 func TestRoundBack(t *testing.T) {
@@ -324,6 +324,15 @@ func TestParseWindowUTC(t *testing.T) {
 	}
 }
 
+func BenchmarkParseWindowUTC(b *testing.B) {
+	for n := 0; n < b.N; n++ {
+		_, err := ParseWindowUTC("2020-04-08T00:00:00Z,2020-04-12T00:00:00Z")
+		if err != nil {
+			b.Fatalf("error running benchmark: %s", err.Error())
+		}
+	}
+}
+
 func TestParseWindowWithOffsetString(t *testing.T) {
 	// ParseWindowWithOffsetString should equal ParseWindowUTC when location == "UTC"
 	// for all window string formats

+ 2 - 2
pkg/metrics/deploymentmetrics.go

@@ -1,8 +1,8 @@
 package metrics
 
 import (
-	"github.com/kubecost/opencost/pkg/clustercache"
-	"github.com/kubecost/opencost/pkg/prom"
+	"github.com/opencost/opencost/pkg/clustercache"
+	"github.com/opencost/opencost/pkg/prom"
 
 	"github.com/prometheus/client_golang/prometheus"
 	dto "github.com/prometheus/client_model/go"

+ 1 - 1
pkg/metrics/jobmetrics.go

@@ -1,7 +1,7 @@
 package metrics
 
 import (
-	"github.com/kubecost/opencost/pkg/clustercache"
+	"github.com/opencost/opencost/pkg/clustercache"
 	"github.com/prometheus/client_golang/prometheus"
 	dto "github.com/prometheus/client_model/go"
 	batchv1 "k8s.io/api/batch/v1"

+ 2 - 2
pkg/metrics/kubemetrics.go

@@ -5,8 +5,8 @@ import (
 	"strings"
 	"sync"
 
-	"github.com/kubecost/opencost/pkg/clustercache"
-	"github.com/kubecost/opencost/pkg/prom"
+	"github.com/opencost/opencost/pkg/clustercache"
+	"github.com/opencost/opencost/pkg/prom"
 
 	"github.com/prometheus/client_golang/prometheus"
 	batchv1 "k8s.io/api/batch/v1"

+ 2 - 2
pkg/metrics/metricsconfig.go

@@ -8,8 +8,8 @@ import (
 	"path"
 	"sync"
 
-	"github.com/kubecost/opencost/pkg/env"
-	"github.com/kubecost/opencost/pkg/util/watcher"
+	"github.com/opencost/opencost/pkg/env"
+	"github.com/opencost/opencost/pkg/util/watcher"
 )
 
 var (

+ 2 - 2
pkg/metrics/namespacemetrics.go

@@ -1,8 +1,8 @@
 package metrics
 
 import (
-	"github.com/kubecost/opencost/pkg/clustercache"
-	"github.com/kubecost/opencost/pkg/prom"
+	"github.com/opencost/opencost/pkg/clustercache"
+	"github.com/opencost/opencost/pkg/prom"
 
 	"github.com/prometheus/client_golang/prometheus"
 	dto "github.com/prometheus/client_model/go"

+ 3 - 3
pkg/metrics/nodemetrics.go

@@ -3,9 +3,9 @@ package metrics
 import (
 	"strings"
 
-	"github.com/kubecost/opencost/pkg/clustercache"
-	"github.com/kubecost/opencost/pkg/log"
-	"github.com/kubecost/opencost/pkg/prom"
+	"github.com/opencost/opencost/pkg/clustercache"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/prom"
 	"github.com/prometheus/client_golang/prometheus"
 	dto "github.com/prometheus/client_model/go"
 	v1 "k8s.io/api/core/v1"

+ 2 - 2
pkg/metrics/podlabelmetrics.go

@@ -1,8 +1,8 @@
 package metrics
 
 import (
-	"github.com/kubecost/opencost/pkg/clustercache"
-	"github.com/kubecost/opencost/pkg/prom"
+	"github.com/opencost/opencost/pkg/clustercache"
+	"github.com/opencost/opencost/pkg/prom"
 	"github.com/prometheus/client_golang/prometheus"
 )
 

+ 3 - 3
pkg/metrics/podmetrics.go

@@ -3,9 +3,9 @@ package metrics
 import (
 	"fmt"
 
-	"github.com/kubecost/opencost/pkg/clustercache"
-	"github.com/kubecost/opencost/pkg/log"
-	"github.com/kubecost/opencost/pkg/prom"
+	"github.com/opencost/opencost/pkg/clustercache"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/prom"
 	"github.com/prometheus/client_golang/prometheus"
 	dto "github.com/prometheus/client_model/go"
 	v1 "k8s.io/api/core/v1"

+ 1 - 1
pkg/metrics/pvcmetrics.go

@@ -1,7 +1,7 @@
 package metrics
 
 import (
-	"github.com/kubecost/opencost/pkg/clustercache"
+	"github.com/opencost/opencost/pkg/clustercache"
 	"github.com/prometheus/client_golang/prometheus"
 	dto "github.com/prometheus/client_model/go"
 	v1 "k8s.io/api/core/v1"

+ 1 - 1
pkg/metrics/pvmetrics.go

@@ -1,7 +1,7 @@
 package metrics
 
 import (
-	"github.com/kubecost/opencost/pkg/clustercache"
+	"github.com/opencost/opencost/pkg/clustercache"
 	"github.com/prometheus/client_golang/prometheus"
 	dto "github.com/prometheus/client_model/go"
 	v1 "k8s.io/api/core/v1"

+ 2 - 2
pkg/metrics/servicemetrics.go

@@ -1,8 +1,8 @@
 package metrics
 
 import (
-	"github.com/kubecost/opencost/pkg/clustercache"
-	"github.com/kubecost/opencost/pkg/prom"
+	"github.com/opencost/opencost/pkg/clustercache"
+	"github.com/opencost/opencost/pkg/prom"
 
 	"github.com/prometheus/client_golang/prometheus"
 	dto "github.com/prometheus/client_model/go"

+ 2 - 2
pkg/metrics/statefulsetmetrics.go

@@ -1,8 +1,8 @@
 package metrics
 
 import (
-	"github.com/kubecost/opencost/pkg/clustercache"
-	"github.com/kubecost/opencost/pkg/prom"
+	"github.com/opencost/opencost/pkg/clustercache"
+	"github.com/opencost/opencost/pkg/prom"
 
 	"github.com/prometheus/client_golang/prometheus"
 	dto "github.com/prometheus/client_model/go"

+ 2 - 2
pkg/prom/diagnostics.go

@@ -4,8 +4,8 @@ import (
 	"fmt"
 	"time"
 
-	"github.com/kubecost/opencost/pkg/env"
-	"github.com/kubecost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/env"
+	"github.com/opencost/opencost/pkg/log"
 	prometheus "github.com/prometheus/client_golang/api"
 )
 

+ 1 - 1
pkg/prom/error.go

@@ -6,7 +6,7 @@ import (
 	"strings"
 	"sync"
 
-	"github.com/kubecost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/log"
 )
 
 // errorType used to check HasError

+ 1 - 1
pkg/prom/metrics.go

@@ -7,7 +7,7 @@ import (
 	"sort"
 	"strings"
 
-	"github.com/kubecost/opencost/pkg/util/json"
+	"github.com/opencost/opencost/pkg/util/json"
 )
 
 var invalidLabelCharRE = regexp.MustCompile(`[^a-zA-Z0-9_]`)

+ 5 - 5
pkg/prom/prom.go

@@ -11,11 +11,11 @@ import (
 	"strings"
 	"time"
 
-	"github.com/kubecost/opencost/pkg/collections"
-	"github.com/kubecost/opencost/pkg/log"
-	"github.com/kubecost/opencost/pkg/util/atomic"
-	"github.com/kubecost/opencost/pkg/util/fileutil"
-	"github.com/kubecost/opencost/pkg/util/httputil"
+	"github.com/opencost/opencost/pkg/collections"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util/atomic"
+	"github.com/opencost/opencost/pkg/util/fileutil"
+	"github.com/opencost/opencost/pkg/util/httputil"
 
 	golog "log"
 

+ 9 - 16
pkg/prom/query.go

@@ -8,11 +8,11 @@ import (
 	"strconv"
 	"time"
 
-	"github.com/kubecost/opencost/pkg/env"
-	"github.com/kubecost/opencost/pkg/errors"
-	"github.com/kubecost/opencost/pkg/log"
-	"github.com/kubecost/opencost/pkg/util/httputil"
-	"github.com/kubecost/opencost/pkg/util/json"
+	"github.com/opencost/opencost/pkg/env"
+	"github.com/opencost/opencost/pkg/errors"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util/httputil"
+	"github.com/opencost/opencost/pkg/util/json"
 	prometheus "github.com/prometheus/client_golang/api"
 )
 
@@ -189,19 +189,12 @@ func (ctx *Context) RawQuery(query string, t time.Time) ([]byte, error) {
 	q := u.Query()
 	q.Set("query", query)
 
-	if !t.IsZero() {
-		q.Set("time", strconv.FormatInt(t.Unix(), 10))
-	} else {
-		// for non-range queries, we set the timestamp for the query to time-offset
-		// this is a special use case that's typically only used when our primary
-		// prom db has delayed insertion (thanos, cortex, etc...)
-		if promQueryOffset != 0 && ctx.name != AllocationContextName {
-			q.Set("time", time.Now().Add(-promQueryOffset).UTC().Format(time.RFC3339))
-		} else {
-			q.Set("time", time.Now().UTC().Format(time.RFC3339))
-		}
+	if t.IsZero() {
+		t = time.Now()
 	}
 
+	q.Set("time", strconv.FormatInt(t.Unix(), 10))
+
 	u.RawQuery = q.Encode()
 
 	req, err := http.NewRequest(http.MethodPost, u.String(), nil)

+ 2 - 2
pkg/prom/ratelimitedclient_test.go

@@ -12,8 +12,8 @@ import (
 	"testing"
 	"time"
 
-	"github.com/kubecost/opencost/pkg/util"
-	"github.com/kubecost/opencost/pkg/util/httputil"
+	"github.com/opencost/opencost/pkg/util"
+	"github.com/opencost/opencost/pkg/util/httputil"
 	prometheus "github.com/prometheus/client_golang/api"
 )
 

+ 2 - 2
pkg/prom/result.go

@@ -6,8 +6,8 @@ import (
 	"strconv"
 	"strings"
 
-	"github.com/kubecost/opencost/pkg/log"
-	"github.com/kubecost/opencost/pkg/util"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util"
 )
 
 var (

+ 1 - 1
pkg/prom/validate.go

@@ -3,7 +3,7 @@ package prom
 import (
 	"fmt"
 
-	"github.com/kubecost/opencost/pkg/env"
+	"github.com/opencost/opencost/pkg/env"
 
 	prometheus "github.com/prometheus/client_golang/api"
 )

+ 3 - 3
pkg/services/clusters/clustermanager.go

@@ -8,9 +8,9 @@ import (
 
 	"github.com/google/uuid"
 
-	"github.com/kubecost/opencost/pkg/log"
-	"github.com/kubecost/opencost/pkg/util/fileutil"
-	"github.com/kubecost/opencost/pkg/util/json"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util/fileutil"
+	"github.com/opencost/opencost/pkg/util/json"
 
 	"sigs.k8s.io/yaml"
 )

+ 2 - 2
pkg/services/clusters/clustersendpoints.go

@@ -7,8 +7,8 @@ import (
 
 	"github.com/julienschmidt/httprouter"
 
-	"github.com/kubecost/opencost/pkg/log"
-	"github.com/kubecost/opencost/pkg/util/json"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util/json"
 )
 
 // DataEnvelope is a generic wrapper struct for http response data

+ 2 - 2
pkg/services/clusterservice.go

@@ -3,8 +3,8 @@ package services
 import (
 	"path"
 
-	"github.com/kubecost/opencost/pkg/env"
-	"github.com/kubecost/opencost/pkg/services/clusters"
+	"github.com/opencost/opencost/pkg/env"
+	"github.com/opencost/opencost/pkg/services/clusters"
 )
 
 // NewClusterManagerService creates a new HTTPService implementation driving cluster definition management

+ 1 - 1
pkg/services/services.go

@@ -4,7 +4,7 @@ import (
 	"sync"
 
 	"github.com/julienschmidt/httprouter"
-	"github.com/kubecost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/log"
 )
 
 // HTTPService defines an implementation prototype for an object capable of registering

+ 1 - 1
pkg/storage/azurestorage.go

@@ -18,7 +18,7 @@ import (
 	"sync"
 	"time"
 
-	"github.com/kubecost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/log"
 
 	"github.com/Azure/azure-pipeline-go/pipeline"
 	blob "github.com/Azure/azure-storage-blob-go/azblob"

+ 1 - 1
pkg/storage/filestorage.go

@@ -7,7 +7,7 @@ import (
 	gopath "path"
 	"path/filepath"
 
-	"github.com/kubecost/opencost/pkg/util/fileutil"
+	"github.com/opencost/opencost/pkg/util/fileutil"
 	"github.com/pkg/errors"
 )
 

+ 1 - 1
pkg/storage/gcsstorage.go

@@ -10,7 +10,7 @@ import (
 	"strings"
 
 	gcs "cloud.google.com/go/storage"
-	"github.com/kubecost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/log"
 	"github.com/pkg/errors"
 	"golang.org/x/oauth2/google"
 	"google.golang.org/api/iterator"

+ 1 - 1
pkg/storage/s3storage.go

@@ -13,7 +13,7 @@ import (
 	"strings"
 	"time"
 
-	"github.com/kubecost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/log"
 
 	aws "github.com/aws/aws-sdk-go-v2/aws"
 	awsconfig "github.com/aws/aws-sdk-go-v2/config"

+ 1 - 1
pkg/storage/storagetypes.go

@@ -3,7 +3,7 @@ package storage
 import (
 	"strings"
 
-	"github.com/kubecost/opencost/pkg/util/json"
+	"github.com/opencost/opencost/pkg/util/json"
 )
 
 /*

+ 1 - 1
pkg/storage/storagetypes_test.go

@@ -3,7 +3,7 @@ package storage
 import (
 	"testing"
 
-	"github.com/kubecost/opencost/pkg/util/json"
+	"github.com/opencost/opencost/pkg/util/json"
 )
 
 func assert(t *testing.T, condition bool, msg string) {

+ 2 - 2
pkg/thanos/thanos.go

@@ -10,8 +10,8 @@ import (
 	"sync"
 	"time"
 
-	"github.com/kubecost/opencost/pkg/env"
-	"github.com/kubecost/opencost/pkg/prom"
+	"github.com/opencost/opencost/pkg/env"
+	"github.com/opencost/opencost/pkg/prom"
 
 	prometheus "github.com/prometheus/client_golang/api"
 )

Некоторые файлы не были показаны из-за большого количества измененных файлов