Browse Source

Merge branch 'develop' into develop

Ajay Tripathy 2 years ago
parent
commit
644f2977ea
98 changed files with 8437 additions and 3850 deletions
  1. 1 1
      .github/PULL_REQUEST_TEMPLATE.md
  2. 4 4
      README.md
  3. 2 2
      configs/aws.json
  4. 6 5
      configs/azure.json
  5. 5 1
      docs/swagger.json
  6. 8 0
      kubernetes/opencost.yaml
  7. 57 3
      pkg/cloud/aws/provider.go
  8. 269 0
      pkg/cloud/aws/s3selectintegration.go
  9. 69 0
      pkg/cloud/aws/s3selectintegration_test.go
  10. 2 0
      pkg/cloud/aws/s3selectquerier.go
  11. 4 1
      pkg/cloud/azure/billingexportparser.go
  12. 19 3
      pkg/cloud/azure/provider.go
  13. 145 0
      pkg/cloud/azure/provider_test.go
  14. 7 0
      pkg/cloud/provider/provider.go
  15. 6 0
      pkg/cloud/provider/providerconfig.go
  16. 2 1
      pkg/cmd/costmodel/costmodel.go
  17. 29 24
      pkg/costmodel/aggregation.go
  18. 38 0
      pkg/costmodel/aggregation_test.go
  19. 2 2
      pkg/costmodel/allocation.go
  20. 86 15
      pkg/costmodel/allocation_helpers.go
  21. 4 3
      pkg/costmodel/allocation_helpers_test.go
  22. 1 0
      pkg/costmodel/allocation_types.go
  23. 1 1
      pkg/costmodel/assets.go
  24. 42 25
      pkg/costmodel/cluster.go
  25. 222 35
      pkg/costmodel/costmodel.go
  26. 150 0
      pkg/costmodel/costmodel_test.go
  27. 5 6
      pkg/costmodel/intervals.go
  28. 40 1
      pkg/costmodel/intervals_test.go
  29. 7 1
      pkg/env/costmodelenv.go
  30. 2 2
      pkg/filter/cloudcost/cloudcost.go
  31. 1 1
      pkg/filter/cloudcost/cloudcost_test.go
  32. 37 0
      pkg/filter21/allocation/fields.go
  33. 51 0
      pkg/filter21/allocation/parser.go
  34. 289 0
      pkg/filter21/allocation/parser_test.go
  35. 35 0
      pkg/filter21/asset/fields.go
  36. 50 0
      pkg/filter21/asset/parser.go
  37. 79 0
      pkg/filter21/ast/fields.go
  38. 100 34
      pkg/filter21/ast/lexer.go
  39. 71 5
      pkg/filter21/ast/lexer_test.go
  40. 193 0
      pkg/filter21/ast/ops.go
  41. 589 0
      pkg/filter21/ast/parser.go
  42. 44 0
      pkg/filter21/ast/tree.go
  43. 369 0
      pkg/filter21/ast/walker.go
  44. 52 0
      pkg/filter21/ast/walker_test.go
  45. 7 0
      pkg/filter21/filter.go
  46. 13 0
      pkg/filter21/matcher/allcut.go
  47. 11 0
      pkg/filter21/matcher/allpass.go
  48. 42 0
      pkg/filter21/matcher/and.go
  49. 188 0
      pkg/filter21/matcher/compiler.go
  50. 17 0
      pkg/filter21/matcher/matcher.go
  51. 467 0
      pkg/filter21/matcher/matcher_test.go
  52. 21 0
      pkg/filter21/matcher/not.go
  53. 42 0
      pkg/filter21/matcher/or.go
  54. 79 0
      pkg/filter21/matcher/stringmapmatcher.go
  55. 73 0
      pkg/filter21/matcher/stringmatcher.go
  56. 91 0
      pkg/filter21/matcher/stringslicematcher.go
  57. 226 0
      pkg/filter21/ops/ops.go
  58. 103 0
      pkg/filter21/ops/ops_test.go
  59. 40 0
      pkg/filter21/transform/pass.go
  60. 67 0
      pkg/filter21/transform/promlabels.go
  61. 42 0
      pkg/filter21/transform/unallocated.go
  62. 63 0
      pkg/filter21/util/stack.go
  63. 299 95
      pkg/kubecost/allocation.go
  64. 2 1
      pkg/kubecost/allocation_json.go
  65. 377 155
      pkg/kubecost/allocation_test.go
  66. 0 534
      pkg/kubecost/allocationfilter.go
  67. 153 1148
      pkg/kubecost/allocationfilter_test.go
  68. 260 0
      pkg/kubecost/allocationmatcher.go
  69. 64 0
      pkg/kubecost/allocationmatcher_test.go
  70. 89 11
      pkg/kubecost/allocationprops.go
  71. 151 23
      pkg/kubecost/allocationprops_test.go
  72. 27 20
      pkg/kubecost/asset.go
  73. 2 2
      pkg/kubecost/asset_json_test.go
  74. 105 0
      pkg/kubecost/assetmatcher.go
  75. 4 2
      pkg/kubecost/bingen.go
  76. 0 3
      pkg/kubecost/cloudusage.go
  77. 8 0
      pkg/kubecost/coverage.go
  78. 365 2
      pkg/kubecost/kubecost_codecs.go
  79. 12 0
      pkg/kubecost/kubecost_codecs_test.go
  80. 2 2
      pkg/kubecost/mock.go
  81. 5 3
      pkg/kubecost/query.go
  82. 39 26
      pkg/kubecost/summaryallocation.go
  83. 31 22
      pkg/kubecost/totals.go
  84. 8 8
      pkg/prom/diagnostics.go
  85. 2 2
      pkg/storage/s3storage.go
  86. 0 499
      pkg/util/allocationfilterutil/queryfilters.go
  87. 0 340
      pkg/util/allocationfilterutil/v2/parser.go
  88. 0 545
      pkg/util/allocationfilterutil/v2/parser_test.go
  89. 470 0
      pkg/util/filterutil/asset_test.go
  90. 743 0
      pkg/util/filterutil/filterutil.go
  91. 33 212
      pkg/util/filterutil/queryfilters_test.go
  92. 53 0
      pkg/util/filterutil/testhelpers.go
  93. 72 0
      pkg/util/filterutil/v1.go
  94. 214 0
      pkg/util/filterutil/v1_test.go
  95. 30 0
      pkg/util/typeutil/typeutil.go
  96. 20 18
      spec/opencost-specv01.md
  97. 8 0
      ui/Dockerfile
  98. 2 1
      ui/src/Reports.js

+ 1 - 1
.github/PULL_REQUEST_TEMPLATE.md

@@ -16,5 +16,5 @@
 ## Does this PR require changes to documentation?
 * 
 
-## Have you labeled this PR and its corresponding Issue as "next release" if it should be part of the next Opencost release? If not, why not?
+## Have you labeled this PR and its corresponding Issue as "next release" if it should be part of the next OpenCost release? If not, why not?
 * 

+ 4 - 4
README.md

@@ -21,13 +21,13 @@ To see the full functionality of OpenCost you can view [OpenCost features](https
 
 You can deploy OpenCost on any Kubernetes 1.8+ cluster in a matter of minutes, if not seconds!
 
-Visit the full documentation for [recommended install options](https://www.opencost.io/docs/install).
+Visit the full documentation for [recommended install options](https://www.opencost.io/docs/installation/install).
 
 ## Usage
 
-- [Cost APIs](https://www.opencost.io/docs/api)
-- [CLI / kubectl cost](https://www.opencost.io/docs/kubectl-cost)
-- [Prometheus Metrics](https://www.opencost.io/docs/prometheus)
+- [Cost APIs](https://www.opencost.io/docs/integrations/api)
+- [CLI / kubectl cost](https://www.opencost.io/docs/integrations/kubectl-cost)
+- [Prometheus Metrics](https://www.opencost.io/docs/integrations/prometheus)
 - Reference [User Interface](https://github.com/opencost/opencost/tree/develop/ui)
 
 ## Contributing

+ 2 - 2
configs/aws.json

@@ -12,11 +12,11 @@
     "internetNetworkEgress": "0.143",
     "spotLabel": "kops.k8s.io/instancegroup",
     "spotLabelValue": "spotinstance-nodes",
-    "awsServiceKeyName": "AKIXXX",
+    "awsServiceKeyName": "",
     "awsServiceKeySecret": "",
     "awsSpotDataRegion":"us-east-2",
     "awsSpotDataBucket": "x",
-    "awsSpotDataPrefix": "spotdata",
+    "awsSpotDataPrefix": "",
     "athenaBucketName": "s3://x",
     "athenaRegion": "us-east-1",
     "athenaDatabase": "",

+ 6 - 5
configs/azure.json

@@ -2,17 +2,18 @@
     "provider": "Azure",
     "description": "Azure estimates based on April 2019 advertised prices",
     "CPU": "0.03900",
-    "spotCPU": "0.007764", 
-    "RAM": "0.001917", 
+    "spotCPU": "0.007764",
+    "RAM": "0.001917",
+    "GPU": "0.0428925",
     "spotRAM": "0.000382",
-    "storage": "0.00005479452" ,
+    "storage": "0.00005479452",
     "zoneNetworkEgress": "0.01",
     "regionNetworkEgress": "0.01",
     "internetNetworkEgress": "0.0725",
     "spotLabel": "kubernetes.azure.com/scalesetpriority",
     "spotLabelValue": "spot",
     "azureSubscriptionID": "",
-    "azureClientID": "" ,
-    "azureClientSecret": "" ,
+    "azureClientID": "",
+    "azureClientSecret": "",
     "azureTenantID": ""
 }

+ 5 - 1
docs/swagger.json

@@ -69,7 +69,7 @@
           {
           "name": "aggregate",
           "in": "query",
-          "description": "Field by which to aggregate the results. Accepts: `cluster`, `node`, `namespace`, `controllerKind`, `controller`, `service`, `pod`, `container`, `label:<name>`, and `annotation:<name>`. Also accepts comma-separated lists for multi-aggregation, like `namespace,label:app`.",
+          "description": "Field by which to aggregate the results. Accepts: `all`, `cluster`, `node`, `namespace`, `controllerKind`, `controller`, `service`, `pod`, `container`, `label:<name>`, and `annotation:<name>`. Also accepts comma-separated lists for multi-aggregation, like `namespace,label:app`. Defaults to `cluster,node,namespace,pod,container`.",
           "required": false,
           "style": "form",
           "explode": true,
@@ -108,6 +108,10 @@
             "container": {
               "summary": "Aggregates by the containers present in the cluster",
               "value": "container"
+            },
+            "all": {
+              "summary": "Aggregates into a single allocation",
+              "value": "all"
             }
           }
         },

+ 8 - 0
kubernetes/opencost.yaml

@@ -159,6 +159,14 @@ spec:
             - name: CLUSTER_ID
               value: "cluster-one" # Default cluster ID to use if cluster_id is not set in Prometheus metrics.
           imagePullPolicy: Always
+          securityContext:
+            allowPrivilegeEscalation: false
+            capabilities:
+              drop:
+                - ALL
+            privileged: false
+            readOnlyRootFilesystem: true
+            runAsUser: 1001
         - image: quay.io/kubecost1/opencost-ui:latest
           name: opencost-ui
           resources:

+ 57 - 3
pkg/cloud/aws/provider.go

@@ -534,6 +534,12 @@ func (aws *AWS) UpdateConfig(r io.Reader, updateType string) (*models.CustomPric
 				return err
 			}
 
+			// If the sample nil service key name is set, zero it out so that it is not
+			// misinterpreted as a real service key.
+			if asfi.ServiceKeyName == "AKIXXX" {
+				asfi.ServiceKeyName = ""
+			}
+
 			c.ServiceKeyName = asfi.ServiceKeyName
 			if asfi.ServiceKeySecret != "" {
 				c.ServiceKeySecret = asfi.ServiceKeySecret
@@ -551,6 +557,13 @@ func (aws *AWS) UpdateConfig(r io.Reader, updateType string) (*models.CustomPric
 			if err != nil {
 				return err
 			}
+
+			// If the sample nil service key name is set, zero it out so that it is not
+			// misinterpreted as a real service key.
+			if aai.ServiceKeyName == "AKIXXX" {
+				aai.ServiceKeyName = ""
+			}
+
 			c.AthenaBucketName = aai.AthenaBucketName
 			c.AthenaRegion = aai.AthenaRegion
 			c.AthenaDatabase = aai.AthenaDatabase
@@ -1056,9 +1069,45 @@ func (aws *AWS) populatePricing(resp *http.Response, inputkeys map[string]bool)
 						aws.Pricing[spotKey].OnDemand = offerTerm
 						var cost string
 						if _, isMatch := OnDemandRateCodes[offerTerm.OfferTermCode]; isMatch {
-							cost = offerTerm.PriceDimensions[strings.Join([]string{sku.(string), offerTerm.OfferTermCode, HourlyRateCode}, ".")].PricePerUnit.USD
+							priceDimensionKey := strings.Join([]string{sku.(string), offerTerm.OfferTermCode, HourlyRateCode}, ".")
+							dimension, ok := offerTerm.PriceDimensions[priceDimensionKey]
+							if ok {
+								cost = dimension.PricePerUnit.USD
+							} else {
+								// this is an edge case seen in AWS CN pricing files, including here just in case
+								// if there is only one dimension, use it, even if the key is incorrect, otherwise assume defaults
+								if len(offerTerm.PriceDimensions) == 1 {
+									for key, backupDimension := range offerTerm.PriceDimensions {
+										cost = backupDimension.PricePerUnit.USD
+										log.DedupedWarningf(5, "using:%s for a price dimension instead of missing dimension: %s", offerTerm.PriceDimensions[key], priceDimensionKey)
+										break
+									}
+								} else if len(offerTerm.PriceDimensions) == 0 {
+									log.DedupedWarningf(5, "populatePricing: no pricing dimension available for: %s.", priceDimensionKey)
+								} else {
+									log.DedupedWarningf(5, "populatePricing: no assumable pricing dimension available for: %s.", priceDimensionKey)
+								}
+							}
 						} else if _, isMatch := OnDemandRateCodesCn[offerTerm.OfferTermCode]; isMatch {
-							cost = offerTerm.PriceDimensions[strings.Join([]string{sku.(string), offerTerm.OfferTermCode, HourlyRateCodeCn}, ".")].PricePerUnit.CNY
+							priceDimensionKey := strings.Join([]string{sku.(string), offerTerm.OfferTermCode, HourlyRateCodeCn}, ".")
+							dimension, ok := offerTerm.PriceDimensions[priceDimensionKey]
+							if ok {
+								cost = dimension.PricePerUnit.CNY
+							} else {
+								// fall through logic for handling inconsistencies in AWS CN pricing files
+								// if there is only one dimension, use it, even if the key is incorrect, otherwise assume defaults
+								if len(offerTerm.PriceDimensions) == 1 {
+									for key, backupDimension := range offerTerm.PriceDimensions {
+										cost = backupDimension.PricePerUnit.CNY
+										log.DedupedWarningf(5, "using:%s for a price dimension instead of missing dimension: %s", offerTerm.PriceDimensions[key], priceDimensionKey)
+										break
+									}
+								} else if len(offerTerm.PriceDimensions) == 0 {
+									log.DedupedWarningf(5, "populatePricing: no pricing dimension available for: %s.", priceDimensionKey)
+								} else {
+									log.DedupedWarningf(5, "populatePricing: no assumable pricing dimension available for: %s.", priceDimensionKey)
+								}
+							}
 						}
 						if strings.Contains(key, "EBS:VolumeP-IOPS.piops") {
 							// If the specific UsageType is the per IO cost used on io1 volumes
@@ -1401,7 +1450,6 @@ func (aws *AWS) ConfigureAuthWith(config *models.CustomPricing) error {
 
 // Gets the aws key id and secret
 func (aws *AWS) getAWSAuth(forceReload bool, cp *models.CustomPricing) (string, string) {
-
 	// 1. Check config values first (set from frontend UI)
 	if cp.ServiceKeyName != "" && cp.ServiceKeySecret != "" {
 		aws.ServiceAccountChecks.Set("hasKey", &models.ServiceAccountCheck{
@@ -1461,6 +1509,12 @@ func (aws *AWS) loadAWSAuthSecret(force bool) (*AWSAccessKey, error) {
 		return nil, err
 	}
 
+	// If the sample nil service key name is set, zero it out so that it is not
+	// misinterpreted as a real service key.
+	if ak.AccessKeyID == "AKIXXX" {
+		ak.AccessKeyID = ""
+	}
+
 	awsSecret = &ak
 	return awsSecret, nil
 }

+ 269 - 0
pkg/cloud/aws/s3selectintegration.go

@@ -0,0 +1,269 @@
+package aws
+
+import (
+	"encoding/csv"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/aws/aws-sdk-go-v2/service/s3"
+	"github.com/opencost/opencost/pkg/kubecost"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util/timeutil"
+)
+
+const s3SelectDateLayout = "2006-01-02T15:04:05Z"
+
+// S3Object is aliased as "s" in queries
+const s3SelectAccountID = `s."bill/PayerAccountId"`
+
+const s3SelectItemType = `s."lineItem/LineItemType"`
+const s3SelectStartDate = `s."lineItem/UsageStartDate"`
+const s3SelectProductCode = `s."lineItem/ProductCode"`
+const s3SelectResourceID = `s."lineItem/ResourceId"`
+
+const s3SelectIsNode = `SUBSTRING(s."lineItem/ResourceId",1,2) = 'i-'`
+const s3SelectIsVol = `SUBSTRING(s."lineItem/ResourceId", 1, 4) = 'vol-'`
+const s3SelectIsNetwork = `s."lineItem/UsageType" LIKE '%Bytes'`
+
+const s3SelectListCost = `s."lineItem/UnblendedCost"`
+const s3SelectNetCost = `s."lineItem/NetUnblendedCost"`
+
+// These two may be used for Amortized<Net>Cost
+const s3SelectRICost = `s."reservation/EffectiveCost"`
+const s3SelectSPCost = `s."savingsPlan/SavingsPlanEffectiveCost"`
+
+type S3SelectIntegration struct {
+	S3SelectQuerier
+}
+
+func (s3si *S3SelectIntegration) GetCloudCost(
+	start,
+	end time.Time,
+) (*kubecost.CloudCostSetRange, error) {
+	log.Infof(
+		"S3SelectIntegration[%s]: GetCloudCost: %s",
+		s3si.Key(),
+		kubecost.NewWindow(&start, &end).String(),
+	)
+
+	// Set midnight yesterday as last point in time reconciliation data
+	// can be pulled from to ensure complete days of data
+	midnightYesterday := time.Now().In(
+		time.UTC,
+	).Truncate(time.Hour*24).AddDate(0, 0, -1)
+	if end.After(midnightYesterday) {
+		end = midnightYesterday
+	}
+
+	// ccsr to populate with cloudcosts.
+	ccsr, err := kubecost.NewCloudCostSetRange(
+		start,
+		end,
+		timeutil.Day,
+		s3si.Key(),
+	)
+	if err != nil {
+		return nil, err
+	}
+	// acquire S3 client
+	client, err := s3si.GetS3Client()
+	if err != nil {
+		return nil, err
+	}
+	// Acquire query keys
+	queryKeys, err := s3si.GetQueryKeys(start, end, client)
+	if err != nil {
+		return nil, err
+	}
+	// Acquire headers
+	headers, err := s3si.GetHeaders(queryKeys, client)
+	if err != nil {
+		return nil, err
+	}
+	// Exactly what it says on the tin. Though is there a set equivalent
+	// in Go? This seems like a good use case for that.
+	allColumns := map[string]bool{}
+	for _, header := range headers {
+		allColumns[header] = true
+	}
+
+	formattedStart := start.Format("2006-01-02")
+	formattedEnd := end.Format("2006-01-02")
+	selectColumns := []string{
+		s3SelectStartDate,
+		s3SelectAccountID,
+		s3SelectResourceID,
+		s3SelectItemType,
+		s3SelectProductCode,
+		s3SelectIsNode,
+		s3SelectIsVol,
+		s3SelectIsNetwork,
+		s3SelectListCost,
+	}
+	// OC equivalent to KCM env flags relevant at all?
+	// Check for Reservation columns in CUR and query if available
+	checkReservations := allColumns[s3SelectRICost]
+	if checkReservations {
+		selectColumns = append(selectColumns, s3SelectRICost)
+	}
+
+	// Check for Savings Plan Columns in CUR and query if available
+	checkSavingsPlan := allColumns[s3SelectSPCost]
+	if checkSavingsPlan {
+		selectColumns = append(selectColumns, s3SelectSPCost)
+	}
+
+	// Build map of query columns to use for parsing query
+	columnIndexes := map[string]int{}
+	for i, column := range selectColumns {
+		columnIndexes[column] = i
+	}
+	// Build query
+	selectStr := strings.Join(selectColumns, ", ")
+	queryStr := `SELECT %s FROM s3object s
+	WHERE (CAST(s."lineItem/UsageStartDate" AS TIMESTAMP) BETWEEN CAST('%s' AS TIMESTAMP) AND CAST('%s' AS TIMESTAMP))
+	AND s."lineItem/ResourceId" <> ''
+	AND (
+		(
+			s."lineItem/ProductCode" = 'AmazonEC2' AND (
+				SUBSTRING(s."lineItem/ResourceId",1,2) = 'i-'
+				OR SUBSTRING(s."lineItem/ResourceId",1,4) = 'vol-'
+			)
+		)
+		OR s."lineItem/ProductCode" = 'AWSELB'
+       OR s."lineItem/ProductCode" = 'AmazonFSx'
+	)`
+	query := fmt.Sprintf(queryStr, selectStr, formattedStart, formattedEnd)
+
+	processResults := func(reader *csv.Reader) error {
+		_, err2 := reader.Read()
+		if err2 == io.EOF {
+			return nil
+		}
+		for {
+			row, err3 := reader.Read()
+			if err3 == io.EOF {
+				return nil
+			}
+
+			startStr := GetCSVRowValue(row, columnIndexes, s3SelectStartDate)
+			itemAccountID := GetCSVRowValue(row, columnIndexes, s3SelectAccountID)
+			itemProviderID := GetCSVRowValue(row, columnIndexes, s3SelectResourceID)
+			lineItemType := GetCSVRowValue(row, columnIndexes, s3SelectItemType)
+			itemProductCode := GetCSVRowValue(row, columnIndexes, s3SelectProductCode)
+			isNode, _ := strconv.ParseBool(GetCSVRowValue(row, columnIndexes, s3SelectIsNode))
+			isVol, _ := strconv.ParseBool(GetCSVRowValue(row, columnIndexes, s3SelectIsVol))
+			isNetwork, _ := strconv.ParseBool(GetCSVRowValue(row, columnIndexes, s3SelectIsNetwork))
+			var (
+				amortizedCost float64
+				listCost      float64
+				netCost       float64
+			)
+			// Get list and net costs
+			listCost, err = GetCSVRowValueFloat(row, columnIndexes, s3SelectListCost)
+			if err != nil {
+				return err
+			}
+			netCost, err = GetCSVRowValueFloat(row, columnIndexes, s3SelectNetCost)
+			if err != nil {
+				return err
+			}
+
+			// If there is a reservation_reservation_a_r_n on the line item use the awsRIPricingSUMColumn as cost
+			if checkReservations && lineItemType == "DiscountedUsage" {
+				amortizedCost, err = GetCSVRowValueFloat(row, columnIndexes, s3SelectRICost)
+				if err != nil {
+					log.Errorf(err.Error())
+					continue
+				}
+				// If there is a lineItemType of SavingsPlanCoveredUsage use the awsSPPricingSUMColumn
+			} else if checkSavingsPlan && lineItemType == "SavingsPlanCoveredUsage" {
+				amortizedCost, err = GetCSVRowValueFloat(row, columnIndexes, s3SelectSPCost)
+				if err != nil {
+					log.Errorf(err.Error())
+					continue
+				}
+			} else {
+				// Default to listCost
+				amortizedCost = listCost
+			}
+			category := SelectAWSCategory(isNode, isVol, isNetwork, itemProductCode, "")
+			// Retrieve final stanza of product code for ProviderID
+			if itemProductCode == "AWSELB" || itemProductCode == "AmazonFSx" {
+				itemProviderID = ParseARN(itemProviderID)
+			}
+
+			properties := kubecost.CloudCostProperties{}
+			properties.Provider = kubecost.AWSProvider
+			properties.AccountID = itemAccountID
+			properties.Category = category
+			properties.Service = itemProductCode
+			properties.ProviderID = itemProviderID
+
+			itemStart, err := time.Parse(s3SelectDateLayout, startStr)
+			if err != nil {
+				log.Infof(
+					"Unable to parse '%s': '%s'",
+					s3SelectStartDate,
+					err.Error(),
+				)
+				itemStart = time.Now()
+			}
+			itemStart = itemStart.Truncate(time.Hour * 24)
+			itemEnd := itemStart.AddDate(0, 0, 1)
+
+			cc := &kubecost.CloudCost{
+				Properties: &properties,
+				Window:     kubecost.NewWindow(&itemStart, &itemEnd),
+				ListCost: kubecost.CostMetric{
+					Cost: listCost,
+				},
+				NetCost: kubecost.CostMetric{
+					Cost: netCost,
+				},
+				AmortizedNetCost: kubecost.CostMetric{
+					Cost: amortizedCost,
+				},
+				AmortizedCost: kubecost.CostMetric{
+					Cost: amortizedCost,
+				},
+				InvoicedCost: kubecost.CostMetric{
+					Cost: netCost,
+				},
+			}
+			ccsr.LoadCloudCost(cc)
+		}
+	}
+	err = s3si.Query(query, queryKeys, client, processResults)
+	if err != nil {
+		return nil, err
+	}
+
+	return ccsr, nil
+}
+
+func (s3si *S3SelectIntegration) GetHeaders(queryKeys []string, client *s3.Client) ([]string, error) {
+	// Query to grab only header line from file
+	query := "SELECT * FROM S3OBJECT LIMIT 1"
+	var record []string
+
+	proccessheaders := func(reader *csv.Reader) error {
+		var err error
+		record, err = reader.Read()
+		if err != nil {
+			return err
+		}
+		return nil
+	}
+
+	// Use only the first query key with assumption that files share schema
+	err := s3si.Query(query, []string{queryKeys[0]}, client, proccessheaders)
+	if err != nil {
+		return nil, err
+	}
+
+	return record, nil
+}

+ 69 - 0
pkg/cloud/aws/s3selectintegration_test.go

@@ -0,0 +1,69 @@
+package aws
+
+import (
+	"os"
+	"testing"
+	"time"
+
+	"github.com/opencost/opencost/pkg/util/json"
+	"github.com/opencost/opencost/pkg/util/timeutil"
+)
+
+func TestS3Integration_GetCloudCost(t *testing.T) {
+	s3ConfigPath := os.Getenv("S3_CONFIGURATION")
+	if s3ConfigPath == "" {
+		t.Skip("skipping integration test, set environment variable S3_CONFIGURATION")
+	}
+	s3ConfigBin, err := os.ReadFile(s3ConfigPath)
+	if err != nil {
+		t.Fatalf("failed to read config file: %s", err.Error())
+	}
+	var s3Config S3Configuration
+	err = json.Unmarshal(s3ConfigBin, &s3Config)
+	if err != nil {
+		t.Fatalf("failed to unmarshal config from JSON: %s", err.Error())
+	}
+	testCases := map[string]struct {
+		integration *S3SelectIntegration
+		start       time.Time
+		end         time.Time
+		expected    bool
+	}{
+		// No CUR data is expected within 2 days of now
+		"too_recent_window": {
+			integration: &S3SelectIntegration{
+				S3SelectQuerier: S3SelectQuerier{
+					S3Connection: S3Connection{
+						S3Configuration: s3Config,
+					},
+				},
+			},
+			end:      time.Now(),
+			start:    time.Now().Add(-timeutil.Day),
+			expected: true,
+		},
+		// CUR data should be available
+		"last week window": {
+			integration: &S3SelectIntegration{
+				S3SelectQuerier: S3SelectQuerier{
+					S3Connection: S3Connection{
+						S3Configuration: s3Config,
+					},
+				},
+			},
+			end:      time.Now().Add(-7 * timeutil.Day),
+			start:    time.Now().Add(-8 * timeutil.Day),
+			expected: false,
+		},
+	}
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			actual, err := testCase.integration.GetCloudCost(testCase.start, testCase.end)
+			if err != nil {
+				t.Errorf("Other error during testing %s", err)
+			} else if actual.IsEmpty() != testCase.expected {
+				t.Errorf("Incorrect result, actual emptiness: %t, expected: %t", actual.IsEmpty(), testCase.expected)
+			}
+		})
+	}
+}

+ 2 - 0
pkg/cloud/aws/s3selectquerier.go

@@ -12,12 +12,14 @@ import (
 	"github.com/aws/aws-sdk-go-v2/aws"
 	"github.com/aws/aws-sdk-go-v2/service/s3"
 	s3Types "github.com/aws/aws-sdk-go-v2/service/s3/types"
+	"github.com/opencost/opencost/pkg/cloud"
 	"github.com/opencost/opencost/pkg/cloud/config"
 	"github.com/opencost/opencost/pkg/util/stringutil"
 )
 
 type S3SelectQuerier struct {
 	S3Connection
+	connectionStatus cloud.ConnectionStatus
 }
 
 func (s3sq *S3SelectQuerier) Equals(config config.Config) bool {

+ 4 - 1
pkg/cloud/azure/billingexportparser.go

@@ -44,6 +44,9 @@ func (brv *BillingRowValues) IsCompute(category string) bool {
 	if category == kubecost.NetworkCategory && brv.MeterCategory == "Virtual Network" {
 		return true
 	}
+	if category == kubecost.NetworkCategory && brv.MeterCategory == "Bandwidth" {
+		return true
+	}
 	return false
 }
 
@@ -265,7 +268,7 @@ func AzureSetProviderID(abv *BillingRowValues) string {
 		return fmt.Sprintf("%v", value2)
 	}
 
-	if category == kubecost.StorageCategory {
+	if category == kubecost.StorageCategory || (category == kubecost.NetworkCategory && abv.MeterCategory == "Bandwidth") {
 		if value2, ok2 := abv.Tags["creationSource"]; ok2 {
 			creationSource := fmt.Sprintf("%v", value2)
 			return strings.TrimPrefix(creationSource, "aks-")

+ 19 - 3
pkg/cloud/azure/provider.go

@@ -208,7 +208,7 @@ func getRegions(service string, subscriptionsClient subscriptions.Client, provid
 						if loc, ok := allLocations[displName]; ok {
 							supLocations[loc] = displName
 						} else {
-							log.Warnf("unsupported cloud region %q", loc)
+							log.Warnf("unsupported cloud region %q", displName)
 						}
 					}
 					break
@@ -226,7 +226,7 @@ func getRegions(service string, subscriptionsClient subscriptions.Client, provid
 						if loc, ok := allLocations[displName]; ok {
 							supLocations[loc] = displName
 						} else {
-							log.Warnf("unsupported cloud region %q", loc)
+							log.Warnf("unsupported cloud region %q", displName)
 						}
 					}
 					break
@@ -1415,12 +1415,28 @@ func (az *Azure) findCostForDisk(d *compute.Disk) (float64, error) {
 		storageClass = AzureDiskStandardStorageClass
 	}
 
-	key := *d.Location + "," + storageClass
+	loc := ""
+	if d.Location != nil {
+		loc = *d.Location
+	}
+	key := loc + "," + storageClass
 
+	if p, ok := az.Pricing[key]; !ok || p == nil {
+		return 0.0, fmt.Errorf("failed to find pricing for key: %s", key)
+	}
+	if az.Pricing[key].PV == nil {
+		return 0.0, fmt.Errorf("pricing for key '%s' has nil PV", key)
+	}
 	diskPricePerGBHour, err := strconv.ParseFloat(az.Pricing[key].PV.Cost, 64)
 	if err != nil {
 		return 0.0, fmt.Errorf("error converting to float: %s", err)
 	}
+	if d.DiskProperties == nil {
+		return 0.0, fmt.Errorf("disk properties are nil")
+	}
+	if d.DiskSizeGB == nil {
+		return 0.0, fmt.Errorf("disk size is nil")
+	}
 	cost := diskPricePerGBHour * timeutil.HoursPerMonth * float64(*d.DiskSizeGB)
 
 	return cost, nil

+ 145 - 0
pkg/cloud/azure/provider_test.go

@@ -1,12 +1,15 @@
 package azure
 
 import (
+	"fmt"
 	"testing"
 
+	"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-11-01/compute"
 	"github.com/Azure/azure-sdk-for-go/services/preview/commerce/mgmt/2015-06-01-preview/commerce"
 	"github.com/stretchr/testify/require"
 
 	"github.com/opencost/opencost/pkg/cloud/models"
+	"github.com/opencost/opencost/pkg/util/mathutil"
 )
 
 func TestParseAzureSubscriptionID(t *testing.T) {
@@ -95,3 +98,145 @@ func TestConvertMeterToPricings(t *testing.T) {
 		require.Equal(t, expected, results)
 	})
 }
+
+func TestAzure_findCostForDisk(t *testing.T) {
+	var loc string = "location"
+	var size int32 = 1
+
+	az := &Azure{
+		Pricing: map[string]*AzurePricing{
+			"location,nil": nil,
+			"location,nilpv": {
+				PV: nil,
+			},
+			"location,ssd": {
+				PV: &models.PV{
+					Cost: "1",
+				},
+			},
+		},
+	}
+
+	testCases := []struct {
+		name   string
+		disk   *compute.Disk
+		exp    float64
+		expErr error
+	}{
+		{
+			"disk is nil",
+			nil,
+			0.0,
+			fmt.Errorf("disk is empty"),
+		},
+		{
+			"nil location",
+			&compute.Disk{
+				Location: nil,
+				Sku: &compute.DiskSku{
+					Name: "ssd",
+				},
+				DiskProperties: &compute.DiskProperties{
+					DiskSizeGB: &size,
+				},
+			},
+			0.0,
+			fmt.Errorf("failed to find pricing for key: ,ssd"),
+		},
+		{
+			"nil disk properties",
+			&compute.Disk{
+				Location: &loc,
+				Sku: &compute.DiskSku{
+					Name: "ssd",
+				},
+				DiskProperties: nil,
+			},
+			0.0,
+			fmt.Errorf("disk properties are nil"),
+		},
+		{
+			"nil disk size",
+			&compute.Disk{
+				Location: &loc,
+				Sku: &compute.DiskSku{
+					Name: "ssd",
+				},
+				DiskProperties: &compute.DiskProperties{
+					DiskSizeGB: nil,
+				},
+			},
+			0.0,
+			fmt.Errorf("disk size is nil"),
+		},
+		{
+			"sku does not exist",
+			&compute.Disk{
+				Location: &loc,
+				Sku: &compute.DiskSku{
+					Name: "doesnotexist",
+				},
+				DiskProperties: &compute.DiskProperties{
+					DiskSizeGB: &size,
+				},
+			},
+			0.0,
+			fmt.Errorf("failed to find pricing for key: location,doesnotexist"),
+		},
+		{
+			"pricing is nil",
+			&compute.Disk{
+				Sku: &compute.DiskSku{
+					Name: "nil",
+				},
+				DiskProperties: &compute.DiskProperties{
+					DiskSizeGB: &size,
+				},
+			},
+			0.0,
+			fmt.Errorf("failed to find pricing for key: location,nil"),
+		},
+		{
+			"pricing.PV is nil",
+			&compute.Disk{
+				Sku: &compute.DiskSku{
+					Name: "nilpv",
+				},
+				DiskProperties: &compute.DiskProperties{
+					DiskSizeGB: &size,
+				},
+			},
+			0.0,
+			fmt.Errorf("pricing for key 'location,nilpv' has nil PV"),
+		},
+		{
+			"valid (ssd)",
+			&compute.Disk{
+				Location: &loc,
+				Sku: &compute.DiskSku{
+					Name: "ssd",
+				},
+				DiskProperties: &compute.DiskProperties{
+					DiskSizeGB: &size,
+				},
+			},
+			730.0,
+			nil,
+		},
+	}
+
+	for _, tc := range testCases {
+		t.Run(tc.name, func(t *testing.T) {
+			act, actErr := az.findCostForDisk(tc.disk)
+			if actErr != nil && tc.expErr == nil {
+				t.Fatalf("unexpected error: %s", actErr)
+			}
+			if tc.expErr != nil && actErr == nil {
+				t.Fatalf("missing expected error: %s", tc.expErr)
+			}
+			if !mathutil.Approximately(tc.exp, act) {
+				t.Fatalf("expected value %f; got %f", tc.exp, act)
+			}
+		})
+	}
+}

+ 7 - 0
pkg/cloud/provider/provider.go

@@ -162,6 +162,13 @@ func NewProvider(cache clustercache.ClusterCache, apiKey string, config *config.
 		cp.accountID = providerConfig.customPricing.ClusterAccountID
 	}
 
+	providerConfig.Update(func(cp *models.CustomPricing) error {
+		if cp.ServiceKeyName == "AKIXXX" {
+			cp.ServiceKeyName = ""
+		}
+		return nil
+	})
+
 	switch cp.provider {
 	case kubecost.CSVProvider:
 		log.Infof("Using CSV Provider with CSV at %s", env.GetCSVPath())

+ 6 - 0
pkg/cloud/provider/providerconfig.go

@@ -143,6 +143,12 @@ func (pc *ProviderConfig) loadConfig(writeIfNotExists bool) (*models.CustomPrici
 		pc.customPricing.ShareTenancyCosts = models.DefaultShareTenancyCost
 	}
 
+	// If the sample nil service key name is set, zero it out so that it is not
+	// misinterpreted as a real service key.
+	if pc.customPricing.ServiceKeyName == "AKIXXX" {
+		pc.customPricing.ServiceKeyName = ""
+	}
+
 	return pc.customPricing, nil
 }
 

+ 2 - 1
pkg/cmd/costmodel/costmodel.go

@@ -55,7 +55,8 @@ func Execute(opts *CostModelOpts) error {
 func StartExportWorker(ctx context.Context, model costmodel.AllocationModel) error {
 	exportPath := env.GetExportCSVFile()
 	if exportPath == "" {
-		return fmt.Errorf("%s is not set, skipping CSV exporter", exportPath)
+		log.Infof("%s is not set, CSV export is disabled", env.ExportCSVFile)
+		return nil
 	}
 	fm, err := filemanager.NewFileManager(exportPath)
 	if err != nil {

+ 29 - 24
pkg/costmodel/aggregation.go

@@ -2112,17 +2112,30 @@ func (a *Accesses) AggregateCostModelHandler(w http.ResponseWriter, r *http.Requ
 // ParseAggregationProperties attempts to parse and return aggregation properties
 // encoded under the given key. If none exist, or if parsing fails, an error
 // is returned with empty AllocationProperties.
-func ParseAggregationProperties(qp httputil.QueryParams, key string) ([]string, error) {
+func ParseAggregationProperties(aggregations []string) ([]string, error) {
 	aggregateBy := []string{}
-	for _, agg := range qp.GetList(key, ",") {
-		aggregate := strings.TrimSpace(agg)
-		if aggregate != "" {
-			if prop, err := kubecost.ParseProperty(aggregate); err == nil {
-				aggregateBy = append(aggregateBy, string(prop))
-			} else if strings.HasPrefix(aggregate, "label:") {
-				aggregateBy = append(aggregateBy, aggregate)
-			} else if strings.HasPrefix(aggregate, "annotation:") {
-				aggregateBy = append(aggregateBy, aggregate)
+	// In case of no aggregation option, aggregate to the container, with a key Cluster/Node/Namespace/Pod/Container
+	if len(aggregations) == 0 {
+		aggregateBy = []string{
+			kubecost.AllocationClusterProp,
+			kubecost.AllocationNodeProp,
+			kubecost.AllocationNamespaceProp,
+			kubecost.AllocationPodProp,
+			kubecost.AllocationContainerProp,
+		}
+	} else if len(aggregations) == 1 && aggregations[0] == "all" {
+		aggregateBy = []string{}
+	} else {
+		for _, agg := range aggregations {
+			aggregate := strings.TrimSpace(agg)
+			if aggregate != "" {
+				if prop, err := kubecost.ParseProperty(aggregate); err == nil {
+					aggregateBy = append(aggregateBy, string(prop))
+				} else if strings.HasPrefix(aggregate, "label:") {
+					aggregateBy = append(aggregateBy, aggregate)
+				} else if strings.HasPrefix(aggregate, "annotation:") {
+					aggregateBy = append(aggregateBy, aggregate)
+				}
 			}
 		}
 	}
@@ -2154,7 +2167,8 @@ func (a *Accesses) ComputeAllocationHandlerSummary(w http.ResponseWriter, r *htt
 	// aggregate results. Some fields allow a sub-field, which is distinguished
 	// with a colon; e.g. "label:app".
 	// Examples: "namespace", "namespace,label:app"
-	aggregateBy, err := ParseAggregationProperties(qp, "aggregate")
+	aggregations := qp.GetList("aggregate", ",")
+	aggregateBy, err := ParseAggregationProperties(aggregations)
 	if err != nil {
 		http.Error(w, fmt.Sprintf("Invalid 'aggregate' parameter: %s", err), http.StatusBadRequest)
 	}
@@ -2235,7 +2249,8 @@ func (a *Accesses) ComputeAllocationHandler(w http.ResponseWriter, r *http.Reque
 	// aggregate results. Some fields allow a sub-field, which is distinguished
 	// with a colon; e.g. "label:app".
 	// Examples: "namespace", "namespace,label:app"
-	aggregateBy, err := ParseAggregationProperties(qp, "aggregate")
+	aggregations := qp.GetList("aggregate", ",")
+	aggregateBy, err := ParseAggregationProperties(aggregations)
 	if err != nil {
 		http.Error(w, fmt.Sprintf("Invalid 'aggregate' parameter: %s", err), http.StatusBadRequest)
 	}
@@ -2265,9 +2280,9 @@ func (a *Accesses) ComputeAllocationHandler(w http.ResponseWriter, r *http.Reque
 	includeProportionalAssetResourceCosts := qp.GetBool("includeProportionalAssetResourceCosts", false)
 
 	// include aggregated labels/annotations if true
-	includeAggregatedMetadata := qp.GetBool("includeAggregatedMetadata", true)
+	includeAggregatedMetadata := qp.GetBool("includeAggregatedMetadata", false)
 
-	asr, err := a.Model.QueryAllocation(window, resolution, step, aggregateBy, includeIdle, idleByNode, includeProportionalAssetResourceCosts, includeAggregatedMetadata)
+	asr, err := a.Model.QueryAllocation(window, resolution, step, aggregateBy, includeIdle, idleByNode, includeProportionalAssetResourceCosts, includeAggregatedMetadata, accumulateBy)
 	if err != nil {
 		if strings.Contains(strings.ToLower(err.Error()), "bad request") {
 			WriteError(w, BadRequest(err.Error()))
@@ -2278,16 +2293,6 @@ func (a *Accesses) ComputeAllocationHandler(w http.ResponseWriter, r *http.Reque
 		return
 	}
 
-	// Accumulate, if requested
-	if accumulateBy != kubecost.AccumulateOptionNone {
-		asr, err = asr.Accumulate(accumulateBy)
-		if err != nil {
-			log.Errorf("error accumulating by %v: %s", accumulateBy, err)
-			WriteError(w, InternalServerError(fmt.Errorf("error accumulating by %v: %s", accumulateBy, err).Error()))
-			return
-		}
-	}
-
 	w.Write(WrapData(asr, nil))
 }
 

+ 38 - 0
pkg/costmodel/aggregation_test.go

@@ -3,6 +3,7 @@ package costmodel
 import (
 	"testing"
 
+	"github.com/opencost/opencost/pkg/kubecost"
 	"github.com/opencost/opencost/pkg/util"
 )
 
@@ -193,3 +194,40 @@ func TestScaleHourlyCostData(t *testing.T) {
 		}
 	}
 }
+
+func TestParseAggregationProperties_Default(t *testing.T) {
+	got, err := ParseAggregationProperties([]string{})
+	expected := []string{
+		kubecost.AllocationClusterProp,
+		kubecost.AllocationNodeProp,
+		kubecost.AllocationNamespaceProp,
+		kubecost.AllocationPodProp,
+		kubecost.AllocationContainerProp,
+	}
+
+	if err != nil {
+		t.Fatalf("TestParseAggregationPropertiesDefault: unexpected error: %s", err)
+	}
+
+	if len(expected) != len(got) {
+		t.Fatalf("TestParseAggregationPropertiesDefault: expected length of %d, got: %d", len(expected), len(got))
+	}
+
+	for i := range got {
+		if got[i] != expected[i] {
+			t.Fatalf("TestParseAggregationPropertiesDefault: expected[i] should be %s, got[i]:%s", expected[i], got[i])
+		}
+	}
+}
+
+func TestParseAggregationProperties_All(t *testing.T) {
+	got, err := ParseAggregationProperties([]string{"all"})
+
+	if err != nil {
+		t.Fatalf("TestParseAggregationPropertiesDefault: unexpected error: %s", err)
+	}
+
+	if len(got) != 0 {
+		t.Fatalf("TestParseAggregationPropertiesDefault: expected length of 0, got: %d", len(got))
+	}
+}

+ 2 - 2
pkg/costmodel/allocation.go

@@ -55,7 +55,7 @@ const (
 	queryFmtPodsWithReplicaSetOwner     = `sum(avg_over_time(kube_pod_owner{owner_kind="ReplicaSet", %s}[%s])) by (pod, owner_name, namespace ,%s)`
 	queryFmtReplicaSetsWithoutOwners    = `avg(avg_over_time(kube_replicaset_owner{owner_kind="<none>", owner_name="<none>", %s}[%s])) by (replicaset, namespace, %s)`
 	queryFmtReplicaSetsWithRolloutOwner = `avg(avg_over_time(kube_replicaset_owner{owner_kind="Rollout", %s}[%s])) by (replicaset, namespace, owner_kind, owner_name, %s)`
-	queryFmtLBCostPerHr                 = `avg(avg_over_time(kubecost_load_balancer_cost{%s}[%s])) by (namespace, service_name, %s)`
+	queryFmtLBCostPerHr                 = `avg(avg_over_time(kubecost_load_balancer_cost{%s}[%s])) by (namespace, service_name, ingress_ip, %s)`
 	queryFmtLBActiveMins                = `count(kubecost_load_balancer_cost{%s}) by (namespace, service_name, %s)[%s:%s]`
 	queryFmtOldestSample                = `min_over_time(timestamp(group(node_cpu_hourly_cost{%s}))[%s:%s])`
 	queryFmtNewestSample                = `max_over_time(timestamp(group(node_cpu_hourly_cost{%s}))[%s:%s])`
@@ -662,7 +662,7 @@ func (cm *CostModel) computeAllocation(start, end time.Time, resolution time.Dur
 	// split appropriately among each pod's container allocation.
 	podPVCMap := map[podKey][]*pvc{}
 	buildPodPVCMap(podPVCMap, pvMap, pvcMap, podMap, resPodPVCAllocation, podUIDKeyMap, ingestPodUID)
-	applyPVCsToPods(window, podMap, podPVCMap, pvcMap)
+	applyPVCsToPods(window, podMap, podPVCMap, pvcMap, resolution)
 
 	// Identify PVCs without pods and add pv costs to the unmounted Allocation for the pvc's cluster
 	applyUnmountedPVCs(window, podMap, pvcMap)

+ 86 - 15
pkg/costmodel/allocation_helpers.go

@@ -18,7 +18,15 @@ import (
 
 // This is a bit of a hack to work around garbage data from cadvisor
 // Ideally you cap each pod to the max CPU on its node, but that involves a bit more complexity, as it it would need to be done when allocations joins with asset data.
-const MAX_CPU_CAP = 512
+const CPU_SANITY_LIMIT = 512
+
+// Sanity Limit for PV usage, set to 10 PB, in bytes for now
+const KiB = 1024.0
+const MiB = 1024.0 * KiB
+const GiB = 1024.0 * MiB
+const TiB = 1024.0 * GiB
+const PiB = 1024.0 * TiB
+const PV_USAGE_SANITY_LIMIT_BYTES = 10.0 * PiB
 
 /* Pod Helpers */
 
@@ -231,7 +239,7 @@ func applyCPUCoresAllocated(podMap map[podKey]*pod, resCPUCoresAllocated []*prom
 			}
 
 			cpuCores := res.Values[0].Value
-			if cpuCores > MAX_CPU_CAP {
+			if cpuCores > CPU_SANITY_LIMIT {
 				log.Infof("[WARNING] Very large cpu allocation, clamping to %f", res.Values[0].Value*(thisPod.Allocations[container].Minutes()/60.0))
 				cpuCores = 0.0
 			}
@@ -292,7 +300,7 @@ func applyCPUCoresRequested(podMap map[podKey]*pod, resCPUCoresRequested []*prom
 			if thisPod.Allocations[container].CPUCores() < res.Values[0].Value {
 				thisPod.Allocations[container].CPUCoreHours = res.Values[0].Value * (thisPod.Allocations[container].Minutes() / 60.0)
 			}
-			if thisPod.Allocations[container].CPUCores() > MAX_CPU_CAP {
+			if thisPod.Allocations[container].CPUCores() > CPU_SANITY_LIMIT {
 				log.Infof("[WARNING] Very large cpu allocation, clamping! to %f", res.Values[0].Value*(thisPod.Allocations[container].Minutes()/60.0))
 				thisPod.Allocations[container].CPUCoreHours = res.Values[0].Value * (thisPod.Allocations[container].Minutes() / 60.0)
 			}
@@ -347,7 +355,7 @@ func applyCPUCoresUsedAvg(podMap map[podKey]*pod, resCPUCoresUsedAvg []*prom.Que
 			}
 
 			thisPod.Allocations[container].CPUCoreUsageAverage = res.Values[0].Value
-			if res.Values[0].Value > MAX_CPU_CAP {
+			if res.Values[0].Value > CPU_SANITY_LIMIT {
 				log.Infof("[WARNING] Very large cpu USAGE, dropping outlier")
 				thisPod.Allocations[container].CPUCoreUsageAverage = 0.0
 			}
@@ -926,6 +934,11 @@ func applyLabels(podMap map[podKey]*pod, nodeLabels map[nodeKey]map[string]strin
 				allocLabels = make(map[string]string)
 			}
 
+			nsLabels := alloc.Properties.NamespaceLabels
+			if nsLabels == nil {
+				nsLabels = make(map[string]string)
+			}
+
 			// Apply node labels first, then namespace labels, then pod labels
 			// so that pod labels overwrite namespace labels, which overwrite
 			// node labels.
@@ -943,6 +956,7 @@ func applyLabels(podMap map[podKey]*pod, nodeLabels map[nodeKey]map[string]strin
 			if labels, ok := namespaceLabels[nsKey]; ok {
 				for k, v := range labels {
 					allocLabels[k] = v
+					nsLabels[k] = v
 				}
 			}
 
@@ -953,6 +967,8 @@ func applyLabels(podMap map[podKey]*pod, nodeLabels map[nodeKey]map[string]strin
 			}
 
 			alloc.Properties.Labels = allocLabels
+			alloc.Properties.NamespaceLabels = nsLabels
+
 		}
 	}
 }
@@ -964,11 +980,18 @@ func applyAnnotations(podMap map[podKey]*pod, namespaceAnnotations map[string]ma
 			if allocAnnotations == nil {
 				allocAnnotations = make(map[string]string)
 			}
+
+			nsAnnotations := alloc.Properties.NamespaceAnnotations
+			if nsAnnotations == nil {
+				nsAnnotations = make(map[string]string)
+			}
+
 			// Apply namespace annotations first, then pod annotations so that
 			// pod labels overwrite namespace labels.
 			if labels, ok := namespaceAnnotations[key.Namespace]; ok {
 				for k, v := range labels {
 					allocAnnotations[k] = v
+					nsAnnotations[k] = v
 				}
 			}
 			if labels, ok := podAnnotations[key]; ok {
@@ -978,6 +1001,7 @@ func applyAnnotations(podMap map[podKey]*pod, namespaceAnnotations map[string]ma
 			}
 
 			alloc.Properties.Annotations = allocAnnotations
+			alloc.Properties.NamespaceAnnotations = nsAnnotations
 		}
 	}
 }
@@ -1326,7 +1350,8 @@ func getLoadBalancerCosts(lbMap map[serviceKey]*lbCost, resLBCost, resLBActiveMi
 			continue
 		}
 
-		lbStart, lbEnd := calculateStartAndEnd(res, resolution)
+		// load balancers have interpolation for costs, we don't need to offset the resolution
+		lbStart, lbEnd := calculateStartAndEnd(res, resolution, false)
 		if lbStart.IsZero() || lbEnd.IsZero() {
 			log.Warnf("CostModel.ComputeAllocation: pvc %s has no running time", serviceKey)
 		}
@@ -1342,11 +1367,32 @@ func getLoadBalancerCosts(lbMap map[serviceKey]*lbCost, resLBCost, resLBActiveMi
 		if err != nil {
 			continue
 		}
+
+		// get the ingress IP to determine if this is a private LB
+		ip, err := res.GetString("ingress_ip")
+		if err != nil {
+			log.Warnf("error getting ingress ip for key %s: %v, skipping", serviceKey, err)
+			// do not count the time that the service was being created or deleted
+			// ingress IP will be empty string
+			// only add cost to allocation when external IP is provisioned
+			if ip == "" {
+				continue
+			}
+		}
+
 		// Apply cost as price-per-hour * hours
 		if lb, ok := lbMap[serviceKey]; ok {
 			lbPricePerHr := res.Values[0].Value
-			hours := lb.End.Sub(lb.Start).Hours()
-			lb.TotalCost += lbPricePerHr * hours
+			// interpolate any missing data
+			resolutionHours := resolution.Hours()
+			resultHours := lb.End.Sub(lb.Start).Hours()
+			scaleFactor := (resolutionHours + resultHours) / resultHours
+
+			// after scaling, we can adjust the timings to reflect the interpolated data
+			lb.End = lb.End.Add(resolution)
+
+			lb.TotalCost += lbPricePerHr * resultHours * scaleFactor
+			lb.Private = privateIPCheck(ip)
 		} else {
 			log.DedupedWarningf(20, "CostModel: found minutes for key that does not exist: %s", serviceKey)
 		}
@@ -1391,6 +1437,22 @@ func applyLoadBalancersToPods(window kubecost.Window, podMap map[podKey]*pod, lb
 			alloc.LoadBalancerCost += lb.TotalCost * hours / totalHours
 		}
 
+		for _, alloc := range allocs {
+			if alloc.LoadBalancers == nil {
+				alloc.LoadBalancers = kubecost.LbAllocations{}
+			}
+
+			if _, found := alloc.LoadBalancers[sKey.String()]; found {
+				alloc.LoadBalancers[sKey.String()].Cost += alloc.LoadBalancerCost
+			} else {
+				alloc.LoadBalancers[sKey.String()] = &kubecost.LbAllocation{
+					Service: sKey.Namespace + "/" + sKey.Service,
+					Cost:    alloc.LoadBalancerCost,
+					Private: lb.Private,
+				}
+			}
+		}
+
 		// If there was no overlap apply to Unmounted pod
 		if len(allocHours) == 0 {
 			pod := getUnmountedPodForCluster(window, podMap, sKey.Cluster)
@@ -1720,7 +1782,7 @@ func buildPVMap(resolution time.Duration, pvMap map[pvKey]*pv, resPVCostPerGiBHo
 			continue
 		}
 
-		pvStart, pvEnd := calculateStartAndEnd(result, resolution)
+		pvStart, pvEnd := calculateStartAndEnd(result, resolution, true)
 		if pvStart.IsZero() || pvEnd.IsZero() {
 			log.Warnf("CostModel.ComputeAllocation: pv %s has no running time", key)
 		}
@@ -1764,7 +1826,13 @@ func applyPVBytes(pvMap map[pvKey]*pv, resPVBytes []*prom.QueryResult) {
 			continue
 		}
 
-		pvMap[key].Bytes = res.Values[0].Value
+		pvBytesUsed := res.Values[0].Value
+		if pvBytesUsed < PV_USAGE_SANITY_LIMIT_BYTES {
+			pvMap[key].Bytes = pvBytesUsed
+		} else {
+			pvMap[key].Bytes = 0
+			log.Warnf("PV usage exceeds sanity limit, clamping to zero")
+		}
 	}
 }
 
@@ -1789,7 +1857,7 @@ func buildPVCMap(resolution time.Duration, pvcMap map[pvcKey]*pvc, pvMap map[pvK
 		pvKey := newPVKey(cluster, volume)
 		pvcKey := newPVCKey(cluster, namespace, name)
 
-		pvcStart, pvcEnd := calculateStartAndEnd(res, resolution)
+		pvcStart, pvcEnd := calculateStartAndEnd(res, resolution, true)
 		if pvcStart.IsZero() || pvcEnd.IsZero() {
 			log.Warnf("CostModel.ComputeAllocation: pvc %s has no running time", pvcKey)
 		}
@@ -1890,7 +1958,8 @@ func buildPodPVCMap(podPVCMap map[podKey][]*pvc, pvMap map[pvKey]*pv, pvcMap map
 		}
 	}
 }
-func applyPVCsToPods(window kubecost.Window, podMap map[podKey]*pod, podPVCMap map[podKey][]*pvc, pvcMap map[pvcKey]*pvc) {
+
+func applyPVCsToPods(window kubecost.Window, podMap map[podKey]*pod, podPVCMap map[podKey][]*pvc, pvcMap map[pvcKey]*pvc, resolution time.Duration) {
 	// Because PVCs can be shared among pods, the respective pv cost
 	// needs to be evenly distributed to those pods based on time
 	// running, as well as the amount of time the pvc was shared.
@@ -1940,7 +2009,7 @@ func applyPVCsToPods(window kubecost.Window, podMap map[podKey]*pod, podPVCMap m
 		}
 
 		// Determine coefficients for each pvc-pod relation.
-		sharedPVCCostCoefficients := getPVCCostCoefficients(intervals, pvc)
+		sharedPVCCostCoefficients := getPVCCostCoefficients(intervals, pvc, resolution)
 
 		// Distribute pvc costs to Allocations
 		for thisPodKey, coeffComponents := range sharedPVCCostCoefficients {
@@ -2123,10 +2192,12 @@ func getUnmountedPodForNamespace(window kubecost.Window, podMap map[podKey]*pod,
 	return thisPod
 }
 
-func calculateStartAndEnd(result *prom.QueryResult, resolution time.Duration) (time.Time, time.Time) {
+func calculateStartAndEnd(result *prom.QueryResult, resolution time.Duration, offsetResolution bool) (time.Time, time.Time) {
 	s := time.Unix(int64(result.Values[0].Timestamp), 0).UTC()
-	// subtract resolution from start time to cover full time period
-	s = s.Add(-resolution)
+	if offsetResolution {
+		// subtract resolution from start time to cover full time period
+		s = s.Add(-resolution)
+	}
 	e := time.Unix(int64(result.Values[len(result.Values)-1].Timestamp), 0).UTC()
 	return s, e
 }

+ 4 - 3
pkg/costmodel/allocation_helpers_test.go

@@ -2,11 +2,12 @@ package costmodel
 
 import (
 	"fmt"
+	"testing"
+	"time"
+
 	"github.com/opencost/opencost/pkg/kubecost"
 	"github.com/opencost/opencost/pkg/prom"
 	"github.com/opencost/opencost/pkg/util"
-	"testing"
-	"time"
 )
 
 const Ki = 1024
@@ -498,7 +499,7 @@ func TestCalculateStartAndEnd(t *testing.T) {
 
 	for name, testCase := range testCases {
 		t.Run(name, func(t *testing.T) {
-			start, end := calculateStartAndEnd(testCase.result, testCase.resolution)
+			start, end := calculateStartAndEnd(testCase.result, testCase.resolution, true)
 			if !start.Equal(testCase.expectedStart) {
 				t.Errorf("start to not match expected %v : %v", start, testCase.expectedStart)
 			}

+ 1 - 0
pkg/costmodel/allocation_types.go

@@ -211,4 +211,5 @@ type lbCost struct {
 	TotalCost float64
 	Start     time.Time
 	End       time.Time
+	Private   bool
 }

+ 1 - 1
pkg/costmodel/assets.go

@@ -84,7 +84,7 @@ func (cm *CostModel) ComputeAssets(start, end time.Time) (*kubecost.AssetSet, er
 			e = end
 		}
 
-		loadBalancer := kubecost.NewLoadBalancer(lb.Name, lb.Cluster, lb.ProviderID, s, e, kubecost.NewWindow(&start, &end))
+		loadBalancer := kubecost.NewLoadBalancer(lb.Name, lb.Cluster, lb.ProviderID, s, e, kubecost.NewWindow(&start, &end), lb.Private)
 		cm.PropertiesFromCluster(loadBalancer.Properties)
 		loadBalancer.Cost = lb.Cost
 		assetSet.Insert(loadBalancer, nil)

+ 42 - 25
pkg/costmodel/cluster.go

@@ -2,6 +2,7 @@ package costmodel
 
 import (
 	"fmt"
+	"net"
 	"strconv"
 	"time"
 
@@ -142,12 +143,6 @@ type DiskIdentifier struct {
 }
 
 func ClusterDisks(client prometheus.Client, provider models.Provider, start, end time.Time) (map[DiskIdentifier]*Disk, error) {
-	// Query for the duration between start and end
-	durStr := timeutil.DurationString(end.Sub(start))
-	if durStr == "" {
-		return nil, fmt.Errorf("illegal duration value for %s", kubecost.NewClosedWindow(start, end))
-	}
-
 	// Start from the time "end", querying backwards
 	t := end
 
@@ -162,6 +157,10 @@ func ClusterDisks(client prometheus.Client, provider models.Provider, start, end
 		log.DedupedWarningf(3, "ClusterDisks(): Configured ETL resolution (%d seconds) is below the 60 seconds threshold. Overriding with 1 minute.", int(resolution.Seconds()))
 	}
 
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		return nil, fmt.Errorf("illegal duration value for %s", kubecost.NewClosedWindow(start, end))
+	}
 	// hourlyToCumulative is a scaling factor that, when multiplied by an hourly
 	// value, converts it to a cumulative value; i.e.
 	// [$/hr] * [min/res]*[hr/min] = [$/res]
@@ -548,12 +547,6 @@ func costTimesMinute(activeDataMap map[NodeIdentifier]activeData, costMap map[No
 }
 
 func ClusterNodes(cp models.Provider, client prometheus.Client, start, end time.Time) (map[NodeIdentifier]*Node, error) {
-	// Query for the duration between start and end
-	durStr := timeutil.DurationString(end.Sub(start))
-	if durStr == "" {
-		return nil, fmt.Errorf("illegal duration value for %s", kubecost.NewClosedWindow(start, end))
-	}
-
 	// Start from the time "end", querying backwards
 	t := end
 
@@ -568,14 +561,19 @@ func ClusterNodes(cp models.Provider, client prometheus.Client, start, end time.
 		log.DedupedWarningf(3, "ClusterNodes(): Configured ETL resolution (%d seconds) is below the 60 seconds threshold. Overriding with 1 minute.", int(resolution.Seconds()))
 	}
 
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		return nil, fmt.Errorf("illegal duration value for %s", kubecost.NewClosedWindow(start, end))
+	}
+
 	requiredCtx := prom.NewNamedContext(client, prom.ClusterContextName)
 	optionalCtx := prom.NewNamedContext(client, prom.ClusterOptionalContextName)
 
 	queryNodeCPUHourlyCost := fmt.Sprintf(`avg(avg_over_time(node_cpu_hourly_cost{%s}[%s])) by (%s, node, instance_type, provider_id)`, env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel())
-  queryNodeCPUCoresCapacity := fmt.Sprintf(`avg(avg_over_time(kube_node_status_capacity_cpu_cores{%s}[%s])) by (%s, node)`, env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel())
+	queryNodeCPUCoresCapacity := fmt.Sprintf(`avg(avg_over_time(kube_node_status_capacity_cpu_cores{%s}[%s])) by (%s, node)`, env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel())
 	queryNodeCPUCoresAllocatable := fmt.Sprintf(`avg(avg_over_time(kube_node_status_allocatable_cpu_cores{%s}[%s])) by (%s, node)`, env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel())
 	queryNodeRAMHourlyCost := fmt.Sprintf(`avg(avg_over_time(node_ram_hourly_cost{%s}[%s])) by (%s, node, instance_type, provider_id) / 1024 / 1024 / 1024`, env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel())
-  queryNodeRAMBytesCapacity := fmt.Sprintf(`avg(avg_over_time(kube_node_status_capacity_memory_bytes{%s}[%s])) by (%s, node)`, env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel())
+	queryNodeRAMBytesCapacity := fmt.Sprintf(`avg(avg_over_time(kube_node_status_capacity_memory_bytes{%s}[%s])) by (%s, node)`, env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel())
 	queryNodeRAMBytesAllocatable := fmt.Sprintf(`avg(avg_over_time(kube_node_status_allocatable_memory_bytes{%s}[%s])) by (%s, node)`, env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel())
 	queryNodeGPUCount := fmt.Sprintf(`avg(avg_over_time(node_gpu_count{%s}[%s])) by (%s, node, provider_id)`, env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel())
 	queryNodeGPUHourlyCost := fmt.Sprintf(`avg(avg_over_time(node_gpu_hourly_cost{%s}[%s])) by (%s, node, instance_type, provider_id)`, env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel())
@@ -717,14 +715,10 @@ type LoadBalancer struct {
 	Start      time.Time
 	End        time.Time
 	Minutes    float64
+	Private    bool
 }
 
 func ClusterLoadBalancers(client prometheus.Client, start, end time.Time) (map[LoadBalancerIdentifier]*LoadBalancer, error) {
-	// Query for the duration between start and end
-	durStr := timeutil.DurationString(end.Sub(start))
-	if durStr == "" {
-		return nil, fmt.Errorf("illegal duration value for %s", kubecost.NewClosedWindow(start, end))
-	}
 
 	// Start from the time "end", querying backwards
 	t := end
@@ -740,6 +734,12 @@ func ClusterLoadBalancers(client prometheus.Client, start, end time.Time) (map[L
 		log.DedupedWarningf(3, "ClusterLoadBalancers(): Configured ETL resolution (%d seconds) is below the 60 seconds threshold. Overriding with 1 minute.", int(resolution.Seconds()))
 	}
 
+	// Query for the duration between start and end
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		return nil, fmt.Errorf("illegal duration value for %s", kubecost.NewClosedWindow(start, end))
+	}
+
 	ctx := prom.NewNamedContext(client, prom.ClusterContextName)
 
 	queryLBCost := fmt.Sprintf(`avg(avg_over_time(kubecost_load_balancer_cost{%s}[%s])) by (namespace, service_name, %s, ingress_ip)`, env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel())
@@ -829,6 +829,12 @@ func ClusterLoadBalancers(client prometheus.Client, start, end time.Time) (map[L
 			continue
 		}
 
+		providerID, err := result.GetString("ingress_ip")
+		if err != nil {
+			log.DedupedWarningf(5, "ClusterLoadBalancers: LB cost data missing ingress_ip")
+			// only update asset cost when an actual IP was returned
+			continue
+		}
 		key := LoadBalancerIdentifier{
 			Cluster:   cluster,
 			Namespace: namespace,
@@ -838,16 +844,28 @@ func ClusterLoadBalancers(client prometheus.Client, start, end time.Time) (map[L
 		// Apply cost as price-per-hour * hours
 		if lb, ok := loadBalancerMap[key]; ok {
 			lbPricePerHr := result.Values[0].Value
-			hrs := lb.Minutes / 60.0
+
+			// interpolate any missing data
+			resultMins := lb.Minutes
+			scaleFactor := (resultMins + resolution.Minutes()) / resultMins
+
+			hrs := (lb.Minutes * scaleFactor) / 60.0
 			lb.Cost += lbPricePerHr * hrs
+			lb.Private = privateIPCheck(providerID)
 		} else {
-			log.DedupedWarningf(20, "ClusterLoadBalancers: found minutes for key that does not exist: %s", key)
+			log.DedupedWarningf(20, "ClusterLoadBalancers: found minutes for key that does not exist: %v", key)
 		}
 	}
 
 	return loadBalancerMap, nil
 }
 
+// Check if an ip is private.
+func privateIPCheck(ip string) bool {
+	ipAddress := net.ParseIP(ip)
+	return ipAddress.IsPrivate()
+}
+
 // ComputeClusterCosts gives the cumulative and monthly-rate cluster costs over a window of time for all clusters.
 func (a *Accesses) ComputeClusterCosts(client prometheus.Client, provider models.Provider, window, offset time.Duration, withBreakdown bool) (map[string]*ClusterCosts, error) {
 	if window < 10*time.Minute {
@@ -859,8 +877,6 @@ func (a *Accesses) ComputeClusterCosts(client prometheus.Client, provider models
 
 	mins := end.Sub(start).Minutes()
 
-	windowStr := timeutil.DurationString(window)
-
 	// minsPerResolution determines accuracy and resource use for the following
 	// queries. Smaller values (higher resolution) result in better accuracy,
 	// but more expensive queries, and vice-a-versa.
@@ -872,6 +888,8 @@ func (a *Accesses) ComputeClusterCosts(client prometheus.Client, provider models
 		log.DedupedWarningf(3, "ComputeClusterCosts(): Configured ETL resolution (%d seconds) is below the 60 seconds threshold. Overriding with 1 minute.", int(resolution.Seconds()))
 	}
 
+	windowStr := timeutil.DurationString(window)
+
 	// hourlyToCumulative is a scaling factor that, when multiplied by an hourly
 	// value, converts it to a cumulative value; i.e.
 	// [$/hr] * [min/res]*[hr/min] = [$/res]
@@ -1343,8 +1361,6 @@ func pvCosts(diskMap map[DiskIdentifier]*Disk, resolution time.Duration, resActi
 		e := time.Unix(int64(result.Values[len(result.Values)-1].Timestamp), 0)
 		mins := e.Sub(s).Minutes()
 
-		// TODO niko/assets if mins >= threshold, interpolate for missing data?
-
 		diskMap[key].End = e
 		diskMap[key].Start = s
 		diskMap[key].Minutes = mins
@@ -1418,6 +1434,7 @@ func pvCosts(diskMap map[DiskIdentifier]*Disk, resolution time.Duration, resActi
 				Breakdown: &ClusterCostsBreakdown{},
 			}
 		}
+
 		diskMap[key].Cost = cost * (diskMap[key].Bytes / 1024 / 1024 / 1024) * (diskMap[key].Minutes / 60)
 		providerID, _ := result.GetString("provider_id") // just put the providerID set up here, it's the simplest query.
 		if providerID != "" {

+ 222 - 35
pkg/costmodel/costmodel.go

@@ -500,11 +500,9 @@ func (cm *CostModel) ComputeCostData(cli prometheusClient.Client, cp costAnalyze
 				// for the units of memory and CPU.
 				ramRequestBytes := container.Resources.Requests.Memory().Value()
 
-				// Because RAM (and CPU) information isn't coming from Prometheus, it won't
-				// have a timestamp associated with it. We need to provide a timestamp,
-				// otherwise the vector op that gets applied to take the max of usage
-				// and request won't work properly and will only take into account
-				// usage.
+				// Because information on container RAM & CPU requests isn't
+				// coming from Prometheus, it won't have a timestamp associated
+				// with it. We need to provide a timestamp.
 				RAMReqV := []*util.Vector{
 					{
 						Value:     float64(ramRequestBytes),
@@ -582,8 +580,25 @@ func (cm *CostModel) ComputeCostData(cli prometheusClient.Client, cp costAnalyze
 					ClusterID:       clusterID,
 					ClusterName:     cm.ClusterMap.NameFor(clusterID),
 				}
-				costs.CPUAllocation = getContainerAllocation(costs.CPUReq, costs.CPUUsed, "CPU")
-				costs.RAMAllocation = getContainerAllocation(costs.RAMReq, costs.RAMUsed, "RAM")
+
+				var cpuReq, cpuUse *util.Vector
+				if len(costs.CPUReq) > 0 {
+					cpuReq = costs.CPUReq[0]
+				}
+				if len(costs.CPUUsed) > 0 {
+					cpuUse = costs.CPUUsed[0]
+				}
+				costs.CPUAllocation = getContainerAllocation(cpuReq, cpuUse, "CPU")
+
+				var ramReq, ramUse *util.Vector
+				if len(costs.RAMReq) > 0 {
+					ramReq = costs.RAMReq[0]
+				}
+				if len(costs.RAMUsed) > 0 {
+					ramUse = costs.RAMUsed[0]
+				}
+				costs.RAMAllocation = getContainerAllocation(ramReq, ramUse, "RAM")
+
 				if filterNamespace == "" {
 					containerNameCost[newKey] = costs
 				} else if costs.Namespace == filterNamespace {
@@ -650,8 +665,25 @@ func (cm *CostModel) ComputeCostData(cli prometheusClient.Client, cp costAnalyze
 				ClusterID:       c.ClusterID,
 				ClusterName:     cm.ClusterMap.NameFor(c.ClusterID),
 			}
-			costs.CPUAllocation = getContainerAllocation(costs.CPUReq, costs.CPUUsed, "CPU")
-			costs.RAMAllocation = getContainerAllocation(costs.RAMReq, costs.RAMUsed, "RAM")
+
+			var cpuReq, cpuUse *util.Vector
+			if len(costs.CPUReq) > 0 {
+				cpuReq = costs.CPUReq[0]
+			}
+			if len(costs.CPUUsed) > 0 {
+				cpuUse = costs.CPUUsed[0]
+			}
+			costs.CPUAllocation = getContainerAllocation(cpuReq, cpuUse, "CPU")
+
+			var ramReq, ramUse *util.Vector
+			if len(costs.RAMReq) > 0 {
+				ramReq = costs.RAMReq[0]
+			}
+			if len(costs.RAMUsed) > 0 {
+				ramUse = costs.RAMUsed[0]
+			}
+			costs.RAMAllocation = getContainerAllocation(ramReq, ramUse, "RAM")
+
 			if filterNamespace == "" {
 				containerNameCost[key] = costs
 				missingContainers[key] = costs
@@ -828,32 +860,62 @@ func findDeletedNodeInfo(cli prometheusClient.Client, missingNodes map[string]*c
 	return nil
 }
 
-func getContainerAllocation(req []*util.Vector, used []*util.Vector, allocationType string) []*util.Vector {
-	// The result of the normalize operation will be a new []*util.Vector to replace the requests
-	allocationOp := func(r *util.Vector, x *float64, y *float64) bool {
-		if x != nil && y != nil {
-			x1 := *x
-			if math.IsNaN(x1) {
-				log.Warnf("NaN value found during %s allocation calculation for requests.", allocationType)
-				x1 = 0.0
-			}
-			y1 := *y
-			if math.IsNaN(y1) {
-				log.Warnf("NaN value found during %s allocation calculation for used.", allocationType)
-				y1 = 0.0
-			}
-
-			r.Value = math.Max(x1, y1)
-		} else if x != nil {
-			r.Value = *x
-		} else if y != nil {
-			r.Value = *y
+// getContainerAllocation takes the max between request and usage. This function
+// returns a slice containing a single element describing the container's
+// allocation.
+//
+// Additionally, the timestamp of the allocation will be the highest value
+// timestamp between the two vectors. This mitigates situations where
+// Timestamp=0. This should have no effect on the metrics emitted by the
+// CostModelMetricsEmitter
+func getContainerAllocation(req *util.Vector, used *util.Vector, allocationType string) []*util.Vector {
+	var result []*util.Vector
+
+	if req != nil && used != nil {
+		x1 := req.Value
+		if math.IsNaN(x1) {
+			log.Warnf("NaN value found during %s allocation calculation for requests.", allocationType)
+			x1 = 0.0
+		}
+		y1 := used.Value
+		if math.IsNaN(y1) {
+			log.Warnf("NaN value found during %s allocation calculation for used.", allocationType)
+			y1 = 0.0
+		}
+		result = []*util.Vector{
+			{
+				Value:     math.Max(x1, y1),
+				Timestamp: math.Max(req.Timestamp, used.Timestamp),
+			},
+		}
+		if result[0].Value == 0 && result[0].Timestamp == 0 {
+			log.Warnf("No request or usage data found during %s allocation calculation. Setting allocation to 0.", allocationType)
+		}
+	} else if req != nil {
+		result = []*util.Vector{
+			{
+				Value:     req.Value,
+				Timestamp: req.Timestamp,
+			},
+		}
+	} else if used != nil {
+		result = []*util.Vector{
+			{
+				Value:     used.Value,
+				Timestamp: used.Timestamp,
+			},
+		}
+	} else {
+		log.Warnf("No request or usage data found during %s allocation calculation. Setting allocation to 0.", allocationType)
+		result = []*util.Vector{
+			{
+				Value:     0,
+				Timestamp: float64(time.Now().UTC().Unix()),
+			},
 		}
-
-		return true
 	}
 
-	return util.ApplyVectorOp(req, used, allocationOp)
+	return result
 }
 
 func addPVData(cache clustercache.ClusterCache, pvClaimMapping map[string]*PersistentVolumeClaimData, cloud costAnalyzerCloud.Provider) error {
@@ -2299,17 +2361,19 @@ func measureTimeAsync(start time.Time, threshold time.Duration, name string, ch
 	}
 }
 
-func (cm *CostModel) QueryAllocation(window kubecost.Window, resolution, step time.Duration, aggregate []string, includeIdle, idleByNode, includeProportionalAssetResourceCosts, includeAggregatedMetadata bool) (*kubecost.AllocationSetRange, error) {
+func (cm *CostModel) QueryAllocation(window kubecost.Window, resolution, step time.Duration, aggregate []string, includeIdle, idleByNode, includeProportionalAssetResourceCosts, includeAggregatedMetadata bool, accumulateBy kubecost.AccumulateOption) (*kubecost.AllocationSetRange, error) {
 	// Validate window is legal
 	if window.IsOpen() || window.IsNegative() {
 		return nil, fmt.Errorf("illegal window: %s", window)
 	}
 
+	var totalsStore kubecost.TotalsStore
 	// Idle is required for proportional asset costs
 	if includeProportionalAssetResourceCosts {
 		if !includeIdle {
 			return nil, errors.New("bad request - includeIdle must be set true if includeProportionalAssetResourceCosts is true")
 		}
+		totalsStore = kubecost.NewMemoryTotalsStore()
 	}
 
 	// Begin with empty response
@@ -2319,6 +2383,7 @@ func (cm *CostModel) QueryAllocation(window kubecost.Window, resolution, step ti
 	// appending each to the response.
 	stepStart := *window.Start()
 	stepEnd := stepStart.Add(step)
+	var isAzure bool
 	for window.End().After(stepStart) {
 		allocSet, err := cm.ComputeAllocation(stepStart, stepEnd, resolution)
 		if err != nil {
@@ -2331,6 +2396,25 @@ func (cm *CostModel) QueryAllocation(window kubecost.Window, resolution, step ti
 				return nil, fmt.Errorf("error computing assets for %s: %w", kubecost.NewClosedWindow(stepStart, stepEnd), err)
 			}
 
+			if includeProportionalAssetResourceCosts {
+
+				// AKS is a special case - there can be a maximum of 2
+				// load balancers (1 public and 1 private) in an AKS cluster
+				// therefore, when calculating PARCs for load balancers,
+				// we must know if this is an AKS cluster
+				for _, node := range assetSet.Nodes {
+					if _, found := node.Labels["label_kubernetes_azure_com_cluster"]; found {
+						isAzure = true
+						break
+					}
+				}
+
+				_, err := kubecost.UpdateAssetTotalsStore(totalsStore, assetSet)
+				if err != nil {
+					log.Errorf("ETL: error updating asset resource totals for %s: %s", assetSet.Window, err)
+				}
+			}
+
 			idleSet, err := computeIdleAllocations(allocSet, assetSet, true)
 			if err != nil {
 				return nil, fmt.Errorf("error computing idle allocations for %s: %w", kubecost.NewClosedWindow(stepStart, stepEnd), err)
@@ -2360,6 +2444,109 @@ func (cm *CostModel) QueryAllocation(window kubecost.Window, resolution, step ti
 		return nil, fmt.Errorf("error aggregating for %s: %w", window, err)
 	}
 
+	// Accumulate, if requested
+	if accumulateBy != kubecost.AccumulateOptionNone {
+		asr, err = asr.Accumulate(accumulateBy)
+		if err != nil {
+			log.Errorf("error accumulating by %v: %s", accumulateBy, err)
+			return nil, fmt.Errorf("error accumulating by %v: %s", accumulateBy, err)
+		}
+
+		// when accumulating and returning PARCs, we need the totals for the
+		// accumulated windows to accurately compute a fraction
+		if includeProportionalAssetResourceCosts {
+			assetSet, err := cm.ComputeAssets(*asr.Window().Start(), *asr.Window().End())
+			if err != nil {
+				return nil, fmt.Errorf("error computing assets for %s: %w", kubecost.NewClosedWindow(*asr.Window().Start(), *asr.Window().End()), err)
+			}
+
+			_, err = kubecost.UpdateAssetTotalsStore(totalsStore, assetSet)
+			if err != nil {
+				log.Errorf("ETL: error updating asset resource totals for %s: %s", kubecost.NewClosedWindow(*asr.Window().Start(), *asr.Window().End()), err)
+			}
+
+		}
+	}
+
+	if includeProportionalAssetResourceCosts {
+
+		for _, as := range asr.Allocations {
+			totalStoreByNode, ok := totalsStore.GetAssetTotalsByNode(as.Start(), as.End())
+			if !ok {
+				log.Errorf("unable to locate allocation totals for node for window %v - %v", as.Start(), as.End())
+				return nil, fmt.Errorf("unable to locate allocation totals for node for window %v - %v", as.Start(), as.End())
+			}
+
+			totalStoreByCluster, ok := totalsStore.GetAssetTotalsByCluster(as.Start(), as.End())
+			if !ok {
+				log.Errorf("unable to locate allocation totals for cluster for window %v - %v", as.Start(), as.End())
+				return nil, fmt.Errorf("unable to locate allocation totals for cluster for window %v - %v", as.Start(), as.End())
+			}
+
+			var totalPublicLbCost, totalPrivateLbCost float64
+			if isAzure {
+				// loop through all assetTotals, adding all load balancer costs by public and private
+				for _, tot := range totalStoreByNode {
+					if tot.PrivateLoadBalancer {
+						totalPrivateLbCost += tot.LoadBalancerCost
+					} else {
+						totalPublicLbCost += tot.LoadBalancerCost
+					}
+				}
+			}
+
+			// loop through each allocation set, using total cost from totals store
+			for _, alloc := range as.Allocations {
+				for rawKey, parc := range alloc.ProportionalAssetResourceCosts {
+
+					key := strings.TrimSuffix(strings.ReplaceAll(rawKey, ",", "/"), "/")
+					// for each parc , check the totals store for each
+					// on a totals hit, set the corresponding total and calculate percentage
+					var totals *kubecost.AssetTotals
+					if totalsLoc, found := totalStoreByCluster[key]; found {
+						totals = totalsLoc
+					}
+
+					if totalsLoc, found := totalStoreByNode[key]; found {
+						totals = totalsLoc
+					}
+
+					if totals == nil {
+						log.Errorf("unable to locate asset totals for allocation %s", key)
+						return nil, fmt.Errorf("unable to locate allocation totals for allocation")
+
+					}
+
+					parc.CPUTotalCost = totals.CPUCost
+					parc.GPUTotalCost = totals.GPUCost
+					parc.RAMTotalCost = totals.RAMCost
+					if !isAzure {
+						parc.LoadBalancerTotalCost = totals.LoadBalancerCost
+					} else if len(alloc.LoadBalancers) > 0 {
+						// Azure is a special case - use computed totals above
+						// use the lbAllocations in the object to determine if
+						// this PARC is a public or private load balancer
+						// then set the total accordingly
+						// AKS only has 1 public and 1 private load balancer
+
+						lbAlloc, found := alloc.LoadBalancers[key]
+						if found {
+							if lbAlloc.Private {
+								parc.LoadBalancerTotalCost = totalPrivateLbCost
+							} else {
+								parc.LoadBalancerTotalCost = totalPublicLbCost
+							}
+						}
+					}
+
+					kubecost.ComputePercentages(&parc)
+					alloc.ProportionalAssetResourceCosts[rawKey] = parc
+				}
+			}
+
+		}
+	}
+
 	return asr, nil
 }
 
@@ -2373,10 +2560,10 @@ func computeIdleAllocations(allocSet *kubecost.AllocationSet, assetSet *kubecost
 
 	if idleByNode {
 		allocTotals = kubecost.ComputeAllocationTotals(allocSet, kubecost.AllocationNodeProp)
-		assetTotals = kubecost.ComputeAssetTotals(assetSet, kubecost.AssetNodeProp)
+		assetTotals = kubecost.ComputeAssetTotals(assetSet, true)
 	} else {
 		allocTotals = kubecost.ComputeAllocationTotals(allocSet, kubecost.AllocationClusterProp)
-		assetTotals = kubecost.ComputeAssetTotals(assetSet, kubecost.AssetClusterProp)
+		assetTotals = kubecost.ComputeAssetTotals(assetSet, false)
 	}
 
 	start, end := *allocSet.Window.Start(), *allocSet.Window.End()

+ 150 - 0
pkg/costmodel/costmodel_test.go

@@ -2,6 +2,8 @@ package costmodel
 
 import (
 	"testing"
+
+	"github.com/opencost/opencost/pkg/util"
 )
 
 func Test_CostData_GetController_CronJob(t *testing.T) {
@@ -65,3 +67,151 @@ func Test_CostData_GetController_CronJob(t *testing.T) {
 		})
 	}
 }
+
+func Test_getContainerAllocation(t *testing.T) {
+	cases := []struct {
+		name string
+		cd   CostData
+
+		expectedCPUAllocation []*util.Vector
+		expectedRAMAllocation []*util.Vector
+	}{
+		{
+			name: "Requests greater than usage",
+			cd: CostData{
+				CPUReq:  []*util.Vector{{Value: 1.0, Timestamp: 1686929350}},
+				CPUUsed: []*util.Vector{{Value: .01, Timestamp: 1686929350}},
+				RAMReq:  []*util.Vector{{Value: 10000000, Timestamp: 1686929350}},
+				RAMUsed: []*util.Vector{{Value: 5500000, Timestamp: 1686929350}},
+			},
+
+			expectedCPUAllocation: []*util.Vector{{Value: 1.0, Timestamp: 1686929350}},
+			expectedRAMAllocation: []*util.Vector{{Value: 10000000, Timestamp: 1686929350}},
+		},
+		{
+			name: "Requests less than usage",
+			cd: CostData{
+				CPUReq:  []*util.Vector{{Value: 1.0, Timestamp: 1686929350}},
+				CPUUsed: []*util.Vector{{Value: 2.2, Timestamp: 1686929350}},
+				RAMReq:  []*util.Vector{{Value: 10000000, Timestamp: 1686929350}},
+				RAMUsed: []*util.Vector{{Value: 75000000, Timestamp: 1686929350}},
+			},
+
+			expectedCPUAllocation: []*util.Vector{{Value: 2.2, Timestamp: 1686929350}},
+			expectedRAMAllocation: []*util.Vector{{Value: 75000000, Timestamp: 1686929350}},
+		},
+		{
+			// Expected behavior for getContainerAllocation is to always use the
+			// highest Timestamp value. The significance of 10 seconds comes
+			// from the current default in ApplyVectorOp() in
+			// pkg/util/vector.go.
+			name: "Mismatched timestamps less than 10 seconds apart",
+			cd: CostData{
+				CPUReq:  []*util.Vector{{Value: 1.0, Timestamp: 1686929354}},
+				CPUUsed: []*util.Vector{{Value: .01, Timestamp: 1686929350}},
+				RAMReq:  []*util.Vector{{Value: 10000000, Timestamp: 1686929354}},
+				RAMUsed: []*util.Vector{{Value: 5500000, Timestamp: 1686929350}},
+			},
+
+			expectedCPUAllocation: []*util.Vector{{Value: 1.0, Timestamp: 1686929354}},
+			expectedRAMAllocation: []*util.Vector{{Value: 10000000, Timestamp: 1686929354}},
+		},
+		{
+			// Expected behavior for getContainerAllocation is to always use the
+			// hightest Timestamp value. The significance of 10 seconds comes
+			// from the current default in ApplyVectorOp() in
+			// pkg/util/vector.go.
+			name: "Mismatched timestamps greater than 10 seconds apart",
+			cd: CostData{
+				CPUReq:  []*util.Vector{{Value: 1.0, Timestamp: 1686929399}},
+				CPUUsed: []*util.Vector{{Value: .01, Timestamp: 1686929350}},
+				RAMReq:  []*util.Vector{{Value: 10000000, Timestamp: 1686929399}},
+				RAMUsed: []*util.Vector{{Value: 5500000, Timestamp: 1686929350}},
+			},
+
+			expectedCPUAllocation: []*util.Vector{{Value: 1.0, Timestamp: 1686929399}},
+			expectedRAMAllocation: []*util.Vector{{Value: 10000000, Timestamp: 1686929399}},
+		},
+		{
+			name: "Requests has no values",
+			cd: CostData{
+				CPUReq:  []*util.Vector{{Value: 0, Timestamp: 0}},
+				CPUUsed: []*util.Vector{{Value: .01, Timestamp: 1686929350}},
+				RAMReq:  []*util.Vector{{Value: 0, Timestamp: 0}},
+				RAMUsed: []*util.Vector{{Value: 5500000, Timestamp: 1686929350}},
+			},
+
+			expectedCPUAllocation: []*util.Vector{{Value: .01, Timestamp: 1686929350}},
+			expectedRAMAllocation: []*util.Vector{{Value: 5500000, Timestamp: 1686929350}},
+		},
+		{
+			name: "Usage has no values",
+			cd: CostData{
+				CPUReq:  []*util.Vector{{Value: 1.0, Timestamp: 1686929350}},
+				CPUUsed: []*util.Vector{{Value: 0, Timestamp: 0}},
+				RAMReq:  []*util.Vector{{Value: 10000000, Timestamp: 1686929350}},
+				RAMUsed: []*util.Vector{{Value: 0, Timestamp: 0}},
+			},
+
+			expectedCPUAllocation: []*util.Vector{{Value: 1.0, Timestamp: 1686929350}},
+			expectedRAMAllocation: []*util.Vector{{Value: 10000000, Timestamp: 1686929350}},
+		},
+		{
+			// WRN Log should be thrown
+			name: "Both have no values",
+			cd: CostData{
+				CPUReq:  []*util.Vector{{Value: 0, Timestamp: 0}},
+				CPUUsed: []*util.Vector{{Value: 0, Timestamp: 0}},
+				RAMReq:  []*util.Vector{{Value: 0, Timestamp: 0}},
+				RAMUsed: []*util.Vector{{Value: 0, Timestamp: 0}},
+			},
+
+			expectedCPUAllocation: []*util.Vector{{Value: 0, Timestamp: 0}},
+			expectedRAMAllocation: []*util.Vector{{Value: 0, Timestamp: 0}},
+		},
+		{
+			name: "Requests is Nil",
+			cd: CostData{
+				CPUReq:  []*util.Vector{nil},
+				CPUUsed: []*util.Vector{{Value: .01, Timestamp: 1686929350}},
+				RAMReq:  []*util.Vector{nil},
+				RAMUsed: []*util.Vector{{Value: 5500000, Timestamp: 1686929350}},
+			},
+
+			expectedCPUAllocation: []*util.Vector{{Value: .01, Timestamp: 1686929350}},
+			expectedRAMAllocation: []*util.Vector{{Value: 5500000, Timestamp: 1686929350}},
+		},
+		{
+			name: "Usage is nil",
+			cd: CostData{
+				CPUReq:  []*util.Vector{{Value: 1.0, Timestamp: 1686929350}},
+				CPUUsed: []*util.Vector{nil},
+				RAMReq:  []*util.Vector{{Value: 10000000, Timestamp: 1686929350}},
+				RAMUsed: []*util.Vector{nil},
+			},
+
+			expectedCPUAllocation: []*util.Vector{{Value: 1.0, Timestamp: 1686929350}},
+			expectedRAMAllocation: []*util.Vector{{Value: 10000000, Timestamp: 1686929350}},
+		},
+	}
+
+	for _, c := range cases {
+		t.Run(c.name, func(t *testing.T) {
+			cpuAllocation := getContainerAllocation(c.cd.CPUReq[0], c.cd.CPUUsed[0], "CPU")
+			ramAllocation := getContainerAllocation(c.cd.RAMReq[0], c.cd.RAMUsed[0], "RAM")
+
+			if cpuAllocation[0].Value != c.expectedCPUAllocation[0].Value {
+				t.Errorf("CPU Allocation mismatch. Expected Value: %f. Got: %f", cpuAllocation[0].Value, c.expectedCPUAllocation[0].Value)
+			}
+			if cpuAllocation[0].Timestamp != c.expectedCPUAllocation[0].Timestamp {
+				t.Errorf("CPU Allocation mismatch. Expected Timestamp: %f. Got: %f", cpuAllocation[0].Timestamp, c.expectedCPUAllocation[0].Timestamp)
+			}
+			if ramAllocation[0].Value != c.expectedRAMAllocation[0].Value {
+				t.Errorf("RAM Allocation mismatch. Expected Value: %f. Got: %f", ramAllocation[0].Value, c.expectedRAMAllocation[0].Value)
+			}
+			if ramAllocation[0].Timestamp != c.expectedRAMAllocation[0].Timestamp {
+				t.Errorf("RAM Allocation mismatch. Expected Timestamp: %f. Got: %f", ramAllocation[0].Timestamp, c.expectedRAMAllocation[0].Timestamp)
+			}
+		})
+	}
+}

+ 5 - 6
pkg/costmodel/intervals.go

@@ -79,20 +79,20 @@ func getIntervalPointsFromWindows(windows map[podKey]kubecost.Window) IntervalPo
 // getPVCCostCoefficients gets a coefficient which represents the scale
 // factor that each PVC in a pvcIntervalMap and corresponding slice of
 // IntervalPoints intervals uses to calculate a cost for that PVC's PV.
-func getPVCCostCoefficients(intervals IntervalPoints, thisPVC *pvc) map[podKey][]CoefficientComponent {
+func getPVCCostCoefficients(intervals IntervalPoints, thisPVC *pvc, resolution time.Duration) map[podKey][]CoefficientComponent {
 	// pvcCostCoefficientMap has a format such that the individual coefficient
 	// components are preserved for testing purposes.
 	pvcCostCoefficientMap := make(map[podKey][]CoefficientComponent)
 
-	pvcWindow := kubecost.NewWindow(&thisPVC.Start, &thisPVC.End)
-
+	// Reset the start time by the offset as well. so that offset is not used in coefficient calculation!
+	startTime := thisPVC.Start.Add(resolution)
+	pvcWindow := kubecost.NewWindow(&startTime, &thisPVC.End)
 	unmountedKey := getUnmountedPodKey(thisPVC.Cluster)
 
 	var void struct{}
 	activeKeys := map[podKey]struct{}{}
 
-	currentTime := thisPVC.Start
-
+	currentTime := startTime
 	// For each interval i.e. for any time a pod-PVC relation ends or starts...
 	for _, point := range intervals {
 		// If the current point happens at a later time than the previous point
@@ -142,7 +142,6 @@ func getPVCCostCoefficients(intervals IntervalPoints, thisPVC *pvc) map[podKey][
 			},
 		)
 	}
-
 	return pvcCostCoefficientMap
 }
 

+ 40 - 1
pkg/costmodel/intervals_test.go

@@ -164,11 +164,24 @@ func TestGetPVCCostCoefficients(t *testing.T) {
 	pod4Key := newPodKey("cluster1", "namespace1", "pod4")
 	ummountedPodKey := newPodKey("cluster1", kubecost.UnmountedSuffix, kubecost.UnmountedSuffix)
 
+	zeroDuration, _ := time.ParseDuration("0m0s")
+	fiveMinOffset, _ := time.ParseDuration("5m")
+	// Reflects the pvc that is offset by duration, the actual case that happens in allocation workflow.
+	// before core-370, the offset was causing a unmounted shared pvc coefficient map for the duration of the offset.
+	pvcWithDurationOffset := &pvc{
+		Bytes:     0,
+		Name:      "pvc1",
+		Cluster:   "cluster1",
+		Namespace: "namespace1",
+		Start:     time.Date(2021, 2, 19, 8, 0, 0, 0, time.UTC).Add(-fiveMinOffset),
+		End:       time.Date(2021, 2, 19, 9, 0, 0, 0, time.UTC),
+	}
 	cases := []struct {
 		name           string
 		pvc            *pvc
 		pvcIntervalMap map[podKey]kubecost.Window
 		intervals      []IntervalPoint
+		resolution     time.Duration
 		expected       map[podKey][]CoefficientComponent
 	}{
 		{
@@ -184,6 +197,7 @@ func TestGetPVCCostCoefficients(t *testing.T) {
 				NewIntervalPoint(time.Date(2021, 2, 19, 9, 0, 0, 0, time.UTC), "end", pod3Key),
 				NewIntervalPoint(time.Date(2021, 2, 19, 9, 0, 0, 0, time.UTC), "end", pod1Key),
 			},
+			resolution: zeroDuration,
 			expected: map[podKey][]CoefficientComponent{
 				pod1Key: {
 					{0.5, 0.25},
@@ -212,6 +226,7 @@ func TestGetPVCCostCoefficients(t *testing.T) {
 				NewIntervalPoint(time.Date(2021, 2, 19, 8, 30, 0, 0, time.UTC), "end", pod1Key),
 				NewIntervalPoint(time.Date(2021, 2, 19, 9, 0, 0, 0, time.UTC), "end", pod2Key),
 			},
+			resolution: zeroDuration,
 			expected: map[podKey][]CoefficientComponent{
 				pod1Key: {
 					{1.0, 0.5},
@@ -230,6 +245,7 @@ func TestGetPVCCostCoefficients(t *testing.T) {
 				NewIntervalPoint(time.Date(2021, 2, 19, 9, 0, 0, 0, time.UTC), "end", pod1Key),
 				NewIntervalPoint(time.Date(2021, 2, 19, 9, 0, 0, 0, time.UTC), "end", pod2Key),
 			},
+			resolution: zeroDuration,
 			expected: map[podKey][]CoefficientComponent{
 				pod1Key: {
 					{0.5, 0.5},
@@ -249,6 +265,7 @@ func TestGetPVCCostCoefficients(t *testing.T) {
 				NewIntervalPoint(time.Date(2021, 2, 19, 8, 0, 0, 0, time.UTC), "start", pod1Key),
 				NewIntervalPoint(time.Date(2021, 2, 19, 9, 0, 0, 0, time.UTC), "end", pod1Key),
 			},
+			resolution: zeroDuration,
 			expected: map[podKey][]CoefficientComponent{
 				pod1Key: {
 					{1.0, 1.0},
@@ -264,6 +281,7 @@ func TestGetPVCCostCoefficients(t *testing.T) {
 				NewIntervalPoint(time.Date(2021, 2, 19, 8, 45, 0, 0, time.UTC), "start", pod2Key),
 				NewIntervalPoint(time.Date(2021, 2, 19, 9, 0, 0, 0, time.UTC), "end", pod2Key),
 			},
+			resolution: zeroDuration,
 			expected: map[podKey][]CoefficientComponent{
 				pod1Key: {
 					{1.0, 0.25},
@@ -283,6 +301,7 @@ func TestGetPVCCostCoefficients(t *testing.T) {
 				NewIntervalPoint(time.Date(2021, 2, 19, 8, 15, 0, 0, time.UTC), "start", pod1Key),
 				NewIntervalPoint(time.Date(2021, 2, 19, 8, 45, 0, 0, time.UTC), "end", pod1Key),
 			},
+			resolution: zeroDuration,
 			expected: map[podKey][]CoefficientComponent{
 				pod1Key: {
 					{1.0, 0.5},
@@ -293,11 +312,31 @@ func TestGetPVCCostCoefficients(t *testing.T) {
 				},
 			},
 		},
+		// Test case to ensure the offset doesnt cause any unmounted
+		{
+			name: "pvcMap with duration offset not causing any unmounted entry in sharedPV Coefficient",
+			pvc:  pvcWithDurationOffset,
+			intervals: []IntervalPoint{
+				NewIntervalPoint(time.Date(2021, 2, 19, 8, 0, 0, 0, time.UTC), "start", pod1Key),
+				NewIntervalPoint(time.Date(2021, 2, 19, 8, 30, 0, 0, time.UTC), "start", pod2Key),
+				NewIntervalPoint(time.Date(2021, 2, 19, 8, 30, 0, 0, time.UTC), "end", pod1Key),
+				NewIntervalPoint(time.Date(2021, 2, 19, 9, 0, 0, 0, time.UTC), "end", pod2Key),
+			},
+			resolution: fiveMinOffset,
+			expected: map[podKey][]CoefficientComponent{
+				pod1Key: {
+					{1.0, 0.5},
+				},
+				pod2Key: {
+					{1.0, 0.5},
+				},
+			},
+		},
 	}
 
 	for _, testCase := range cases {
 		t.Run(testCase.name, func(t *testing.T) {
-			result := getPVCCostCoefficients(testCase.intervals, testCase.pvc)
+			result := getPVCCostCoefficients(testCase.intervals, testCase.pvc, testCase.resolution)
 
 			if !reflect.DeepEqual(result, testCase.expected) {
 				t.Errorf("getPVCCostCoefficients test failed: %s: Got %+v but expected %+v", testCase.name, result, testCase.expected)

+ 7 - 1
pkg/env/costmodelenv.go

@@ -226,7 +226,13 @@ func IsEmitKsmV1MetricsOnly() bool {
 // GetAWSAccessKeyID returns the environment variable value for AWSAccessKeyIDEnvVar which represents
 // the AWS access key for authentication
 func GetAWSAccessKeyID() string {
-	return Get(AWSAccessKeyIDEnvVar, "")
+	awsAccessKeyID := Get(AWSAccessKeyIDEnvVar, "")
+	// If the sample nil service key name is set, zero it out so that it is not
+	// misinterpreted as a real service key.
+	if awsAccessKeyID == "AKIXXX" {
+		awsAccessKeyID = ""
+	}
+	return awsAccessKeyID
 }
 
 // GetAWSAccessKeySecret returns the environment variable value for AWSAccessKeySecretEnvVar which represents

+ 2 - 2
pkg/filter/util/cloudcost.go → pkg/filter/cloudcost/cloudcost.go

@@ -1,4 +1,4 @@
-package util
+package cloudcost
 
 import (
 	"reflect"
@@ -105,7 +105,7 @@ func filterV1SingleValueFromList(rawFilterValues []string, field string) filter.
 		}
 
 		if wildcard {
-			subFilter.Op = kubecost.FilterStartsWith
+			subFilter.Op = filter.StringStartsWith
 		}
 
 		result.Filters = append(result.Filters, subFilter)

+ 1 - 1
pkg/filter/util/cloudcost_test.go → pkg/filter/cloudcost/cloudcost_test.go

@@ -1,4 +1,4 @@
-package util
+package cloudcost
 
 import (
 	"testing"

+ 37 - 0
pkg/filter21/allocation/fields.go

@@ -0,0 +1,37 @@
+package allocation
+
+// AllocationField is an enum that represents Allocation-specific fields that can be
+// filtered on (namespace, label, etc.)
+type AllocationField string
+
+// If you add a AllocationFilterField, make sure to update field maps to return the correct
+// Allocation value
+// does not enforce exhaustive pattern matching on "enum" types.
+const (
+	FieldClusterID      AllocationField = "cluster"
+	FieldNode           AllocationField = "node"
+	FieldNamespace      AllocationField = "namespace"
+	FieldControllerKind AllocationField = "controllerKind"
+	FieldControllerName AllocationField = "controllerName"
+	FieldPod            AllocationField = "pod"
+	FieldContainer      AllocationField = "container"
+	FieldProvider       AllocationField = "provider"
+	FieldServices       AllocationField = "services"
+	FieldLabel          AllocationField = "label"
+	FieldAnnotation     AllocationField = "annotation"
+)
+
+// AllocationAlias represents an alias field type for allocations.
+// Filtering based on label aliases (team, department, etc.) should be a
+// responsibility of the query handler. By the time it reaches this
+// structured representation, we shouldn't have to be aware of what is
+// aliased to what.
+type AllocationAlias string
+
+const (
+	AliasDepartment  AllocationAlias = "department"
+	AliasEnvironment AllocationAlias = "environment"
+	AliasOwner       AllocationAlias = "owner"
+	AliasProduct     AllocationAlias = "product"
+	AliasTeam        AllocationAlias = "team"
+)

+ 51 - 0
pkg/filter21/allocation/parser.go

@@ -0,0 +1,51 @@
+package allocation
+
+import "github.com/opencost/opencost/pkg/filter21/ast"
+
+// a slice of all the allocation field instances the lexer should recognize as
+// valid left-hand comparators
+var allocationFilterFields []*ast.Field = []*ast.Field{
+	ast.NewField(FieldClusterID),
+	ast.NewField(FieldNode),
+	ast.NewField(FieldNamespace),
+	ast.NewField(FieldControllerName),
+	ast.NewField(FieldControllerKind),
+	ast.NewField(FieldContainer),
+	ast.NewField(FieldPod),
+	ast.NewField(FieldProvider),
+	ast.NewAliasField(AliasDepartment),
+	ast.NewAliasField(AliasEnvironment),
+	ast.NewAliasField(AliasOwner),
+	ast.NewAliasField(AliasProduct),
+	ast.NewAliasField(AliasTeam),
+	ast.NewSliceField(FieldServices),
+	ast.NewMapField(FieldLabel),
+	ast.NewMapField(FieldAnnotation),
+}
+
+// fieldMap is a lazily loaded mapping from AllocationField to ast.Field
+var fieldMap map[AllocationField]*ast.Field
+
+// DefaultFieldByName returns only default allocation filter fields by name.
+func DefaultFieldByName(field AllocationField) *ast.Field {
+	if fieldMap == nil {
+		fieldMap = make(map[AllocationField]*ast.Field, len(allocationFilterFields))
+		for _, f := range allocationFilterFields {
+			ff := *f
+			fieldMap[AllocationField(ff.Name)] = &ff
+		}
+	}
+
+	if af, ok := fieldMap[field]; ok {
+		afcopy := *af
+		return &afcopy
+	}
+
+	return nil
+}
+
+// NewAllocationFilterParser creates a new `ast.FilterParser` implementation
+// which uses allocation specific fields
+func NewAllocationFilterParser() ast.FilterParser {
+	return ast.NewFilterParser(allocationFilterFields)
+}

+ 289 - 0
pkg/filter21/allocation/parser_test.go

@@ -0,0 +1,289 @@
+package allocation
+
+import (
+	"errors"
+	"fmt"
+	"testing"
+
+	"github.com/hashicorp/go-multierror"
+	"github.com/opencost/opencost/pkg/filter21/ast"
+)
+
+var parser ast.FilterParser = NewAllocationFilterParser()
+
+func TestParse(t *testing.T) {
+	cases := []struct {
+		name  string
+		input string
+	}{
+		{
+			name: "Empty",
+			input: `              
+			
+			`,
+		},
+		{
+			name:  "Single",
+			input: `namespace: "kubecost"`,
+		},
+		{
+			name:  "Single Group",
+			input: `(namespace: "kubecost")`,
+		},
+		{
+			name:  "Single Double Group",
+			input: `((namespace: "kubecost"))`,
+		},
+		{
+			name:  "And 2x Expression",
+			input: `(namespace: "kubecost" + services~:"foo")`,
+		},
+		{
+			name:  "And 4x Expression",
+			input: `(namespace: "kubecost" + services~:"foo" + cluster:"cluster-one" + controllerKind:"deployment")`,
+		},
+		{
+			name:  "Nested And Groups",
+			input: `namespace: "kubecost" + services~:"foo" + (cluster:"cluster-one" + controllerKind:"deployment")`,
+		},
+		{
+			name:  "Nested Or Groups",
+			input: `namespace: "kubecost" | services~:"foo" | (cluster:"cluster-one" | controllerKind:"deployment")`,
+		},
+		{
+			name:  "Nested AndOr Groups",
+			input: `namespace: "kubecost" + services~:"foo" + (cluster:"cluster-one" | controllerKind:"deployment")`,
+		},
+		{
+			name:  "Nested OrAnd Groups",
+			input: `namespace: "kubecost" | services~:"foo" | (cluster:"cluster-one" + controllerKind:"deployment")`,
+		},
+		{
+			name:  "Nested OrAndOr Groups",
+			input: `namespace: "kubecost" | services~:"foo" | (cluster:"cluster-one" + controllerKind:"deployment") | namespace:"bar","test"`,
+		},
+		{
+			name:  "Non-uniform Whitespace",
+			input: `node:"node a b c" , "node 12 3"` + string('\n') + "+" + string('\n') + string('\r') + `namespace : "kubecost"`,
+		},
+		{
+			name:  "Group Or Comparison",
+			input: `(namespace:"kubecost" | cluster<~:"cluster-") + services~:"foo"`,
+		},
+		{
+			name:  "Group Or Group",
+			input: `(label~:"foo" + label[foo]:"bar") | (label!~:"foo" + annotation~:"foo" + annotation[foo]:"bar")`,
+		},
+		{
+			name:  "MultiDepth Groups",
+			input: `namespace: "kubecost" | ((services~:"foo" | (cluster:"cluster-one" + controllerKind:"deployment") | namespace:"bar","test") + cluster~:"cluster-")`,
+		},
+		{
+			name: "Long Query",
+			input: `
+				namespace:"kubecost" +
+				(label[app]:"cost_analyzer" +
+				annotation[a1]:"b2" +
+				cluster:"cluster-one") +
+				node!:
+				"node-123",
+				"node-456" +
+				controllerName:
+				"kubecost-cost-analyzer",
+				"kubecost-prometheus-server" +
+				controllerKind!:
+				"daemonset",
+				"statefulset",
+				"job" +
+				container!:"123-abc_foo" +
+				pod!:"aaaaaaaaaaaaaaaaaaaaaaaaa" +
+				services~:"abc123" + 
+				owner!:"kubecost"
+			`,
+		},
+	}
+
+	for i, c := range cases {
+		t.Run(fmt.Sprintf("%d:%s", i, c.name), func(t *testing.T) {
+			t.Logf("Query: %s", c.input)
+			tree, err := parser.Parse(c.input)
+			if err != nil {
+				t.Fatalf("Unexpected parse error: %s", err)
+			}
+			t.Logf("%s", ast.ToPreOrderString(tree))
+		})
+	}
+}
+
+func TestFailingParses(t *testing.T) {
+	cases := []struct {
+		name   string
+		input  string
+		errors int
+	}{
+		{
+			name:   "Empty Parens",
+			input:  `()`,
+			errors: 1,
+		},
+		{
+			name:   "Invalid Op",
+			input:  `namespace.:"kubecost"`,
+			errors: 1,
+		},
+		{
+			name:   "Extra Closing Paren",
+			input:  `(namespace:"kubecost"))`,
+			errors: 1,
+		},
+		{
+			name:   "Extra Opening Paren",
+			input:  `((namespace:"kubecost")`,
+			errors: 1,
+		},
+		{
+			name:   "Or And Mixing",
+			input:  `namespace:"kubecost" | services~:"foo" + cluster:"bar"`,
+			errors: 1,
+		},
+		{
+			name:   "And Or Mixing",
+			input:  `namespace:"kubecost" + services~:"foo" | cluster:"bar"`,
+			errors: 1,
+		},
+		{
+			name:   "And Or Mixing With Extra Closing Paren",
+			input:  `(namespace:"kubecost" + (services~:"foo" | cluster:"bar") | controllerKind<~:"dep"))`,
+			errors: 2,
+		},
+		// NOTE: This test includes coverage for an extra closing paren _early_, which basically enforces an
+		// NOTE: early return. Scoping errors don't allow the parser to continue collecting errors.
+		{
+			name:   "And Or Mixing With Extra Early Closing Paren",
+			input:  `(namespace:"kubecost" + (services~:"foo" | cluster:"bar")) | controllerKind<~:"dep")`,
+			errors: 1,
+		},
+	}
+
+	for i, c := range cases {
+		t.Run(fmt.Sprintf("%d:%s", i, c.name), func(t *testing.T) {
+			t.Logf("Query: %s", c.input)
+			tree, err := parser.Parse(c.input)
+			if err == nil {
+				t.Fatalf("Expected parsing failure. Instead, got a valid tree: \n%s\n", ast.ToPreOrderString(tree))
+			}
+
+			t.Logf("Errors: %s\n", err)
+
+			mErr := errors.Unwrap(err)
+			totalErrors := len(mErr.(*multierror.Error).Errors)
+			if totalErrors != c.errors {
+				t.Fatalf("Expected %d errors from parsing. Got %d", c.errors, totalErrors)
+			}
+		})
+	}
+}
+
+func TestShortPrint(t *testing.T) {
+	cases := []struct {
+		name  string
+		input string
+	}{
+		{
+			name: "Empty",
+			input: `              
+			
+			`,
+		},
+		{
+			name:  "Single",
+			input: `namespace: "kubecost"`,
+		},
+		{
+			name:  "Single Group",
+			input: `(namespace: "kubecost")`,
+		},
+		{
+			name:  "Single Double Group",
+			input: `((namespace: "kubecost"))`,
+		},
+		{
+			name:  "And 2x Expression",
+			input: `(namespace: "kubecost" + services~:"foo")`,
+		},
+		{
+			name:  "And 4x Expression",
+			input: `(namespace: "kubecost" + services~:"foo" + cluster:"cluster-one" + controllerKind:"deployment")`,
+		},
+		{
+			name:  "Nested And Groups",
+			input: `namespace: "kubecost" + services~:"foo" + (cluster:"cluster-one" + controllerKind:"deployment")`,
+		},
+		{
+			name:  "Nested Or Groups",
+			input: `namespace: "kubecost" | services~:"foo" | (cluster:"cluster-one" | controllerKind:"deployment")`,
+		},
+		{
+			name:  "Nested AndOr Groups",
+			input: `namespace: "kubecost" + services~:"foo" + (cluster:"cluster-one" | controllerKind:"deployment")`,
+		},
+		{
+			name:  "Nested OrAnd Groups",
+			input: `namespace: "kubecost" | services~:"foo" | (cluster:"cluster-one" + controllerKind:"deployment")`,
+		},
+		{
+			name:  "Nested OrAndOr Groups",
+			input: `namespace: "kubecost" | services~:"foo" | (cluster:"cluster-one" + controllerKind:"deployment") | namespace:"bar","test"`,
+		},
+		{
+			name:  "Non-uniform Whitespace",
+			input: `node:"node a b c" , "node 12 3"` + string('\n') + "+" + string('\n') + string('\r') + `namespace : "kubecost"`,
+		},
+		{
+			name:  "Group Or Comparison",
+			input: `(namespace:"kubecost" | cluster<~:"cluster-") + services~:"foo"`,
+		},
+		{
+			name:  "Group Or Group",
+			input: `(label~:"foo" + label[foo]:"bar") | (label!~:"foo" + annotation~:"foo" + annotation[foo]:"bar")`,
+		},
+		{
+			name:  "MultiDepth Groups",
+			input: `namespace: "kubecost" | ((services~:"foo" | (cluster:"cluster-one" + controllerKind:"deployment") | namespace:"bar","test") + cluster~:"cluster-")`,
+		},
+		{
+			name: "Long Query",
+			input: `
+				namespace:"kubecost" +
+				(label[app]:"cost_analyzer" +
+				annotation[a1]:"b2" +
+				cluster:"cluster-one") +
+				node!:
+				"node-123",
+				"node-456" +
+				controllerName:
+				"kubecost-cost-analyzer",
+				"kubecost-prometheus-server" +
+				controllerKind!:
+				"daemonset",
+				"statefulset",
+				"job" +
+				container!:"123-abc_foo" +
+				pod!:"aaaaaaaaaaaaaaaaaaaaaaaaa" +
+				services~:"abc123" + 
+				owner!:"kubecost"
+			`,
+		},
+	}
+
+	for i, c := range cases {
+		t.Run(fmt.Sprintf("%d:%s", i, c.name), func(t *testing.T) {
+			t.Logf("Query: %s", c.input)
+			tree, err := parser.Parse(c.input)
+			if err != nil {
+				t.Fatalf("Unexpected parse error: %s", err)
+			}
+			t.Logf("%s", ast.ToPreOrderShortString(tree))
+		})
+	}
+}

+ 35 - 0
pkg/filter21/asset/fields.go

@@ -0,0 +1,35 @@
+package asset
+
+// AssetField is an enum that represents Asset-specific fields that can be
+// filtered on (namespace, label, etc.)
+type AssetField string
+
+// If you add a AssetField, make sure to update field maps to return the correct
+// Asset value does not enforce exhaustive pattern matching on "enum" types.
+const (
+	FieldName       AssetField = "name"
+	FieldType       AssetField = "assetType"
+	FieldCategory   AssetField = "category"
+	FieldClusterID  AssetField = "cluster"
+	FieldProject    AssetField = "project"
+	FieldProvider   AssetField = "provider"
+	FieldProviderID AssetField = "providerID"
+	FieldAccount    AssetField = "account"
+	FieldService    AssetField = "service"
+	FieldLabel      AssetField = "label"
+)
+
+// AssetAlias represents an alias field type for assets.
+// Filtering based on label aliases (team, department, etc.) should be a
+// responsibility of the query handler. By the time it reaches this
+// structured representation, we shouldn't have to be aware of what is
+// aliased to what.
+type AssetAlias string
+
+const (
+	DepartmentProp  AssetAlias = "department"
+	EnvironmentProp AssetAlias = "environment"
+	OwnerProp       AssetAlias = "owner"
+	ProductProp     AssetAlias = "product"
+	TeamProp        AssetAlias = "team"
+)

+ 50 - 0
pkg/filter21/asset/parser.go

@@ -0,0 +1,50 @@
+package asset
+
+import "github.com/opencost/opencost/pkg/filter21/ast"
+
+// a slice of all the asset field instances the lexer should recognize as
+// valid left-hand comparators
+var assetFilterFields []*ast.Field = []*ast.Field{
+	ast.NewField(FieldType),
+	ast.NewField(FieldName),
+	ast.NewField(FieldCategory),
+	ast.NewField(FieldClusterID),
+	ast.NewField(FieldProject),
+	ast.NewField(FieldProvider),
+	ast.NewField(FieldProviderID),
+	ast.NewField(FieldAccount),
+	ast.NewField(FieldService),
+	ast.NewMapField(FieldLabel),
+	ast.NewAliasField(DepartmentProp),
+	ast.NewAliasField(EnvironmentProp),
+	ast.NewAliasField(ProductProp),
+	ast.NewAliasField(OwnerProp),
+	ast.NewAliasField(TeamProp),
+}
+
+// fieldMap is a lazily loaded mapping from AllocationField to ast.Field
+var fieldMap map[AssetField]*ast.Field
+
+// DefaultFieldByName returns only default allocation filter fields by name.
+func DefaultFieldByName(field AssetField) *ast.Field {
+	if fieldMap == nil {
+		fieldMap = make(map[AssetField]*ast.Field, len(assetFilterFields))
+		for _, f := range assetFilterFields {
+			ff := *f
+			fieldMap[AssetField(ff.Name)] = &ff
+		}
+	}
+
+	if af, ok := fieldMap[field]; ok {
+		afcopy := *af
+		return &afcopy
+	}
+
+	return nil
+}
+
+// NewAssetFilterParser creates a new `ast.FilterParser` implementation
+// which uses asset specific fields
+func NewAssetFilterParser() ast.FilterParser {
+	return ast.NewFilterParser(assetFilterFields)
+}

+ 79 - 0
pkg/filter21/ast/fields.go

@@ -0,0 +1,79 @@
+package ast
+
+// FieldType is an enumeration of specific types relevant to lexing and
+// parsing a filter.
+type FieldType int
+
+const (
+	FieldTypeDefault FieldType = iota
+	FieldTypeSlice
+	FieldTypeMap
+	FieldTypeAlias
+)
+
+// Field is a Lexer input which acts as a mapping of identifiers used to lex/parse filters.
+type Field struct {
+	// Name contains the name of the specific field as it appears in language.
+	Name string
+
+	fieldType FieldType
+}
+
+// Field equivalence is determined by name and type.
+func (f *Field) Equal(other *Field) bool {
+	if f == nil || other == nil {
+		return false
+	}
+
+	return f.Name == other.Name && f.fieldType == other.fieldType
+}
+
+// IsSlice returns true if the field is a slice. This instructs the lexer that the field
+// should allow contains operations.
+func (f *Field) IsSlice() bool {
+	return f.fieldType == FieldTypeSlice
+}
+
+// IsMap returns true if the field is a map. This instructs the lexer that the field should
+// allow keyed-access operations.
+func (f *Field) IsMap() bool {
+	return f.fieldType == FieldTypeMap
+}
+
+// IsAlias returns true if the field is an alias type. This instructs the lexer that the field
+// is an alias for custom logical resolution by an external compiler.
+func (f *Field) IsAlias() bool {
+	return f.fieldType == FieldTypeAlias
+}
+
+// NewField creates a default string field using the provided name.
+func NewField[T ~string](name T) *Field {
+	return &Field{
+		Name:      string(name),
+		fieldType: FieldTypeDefault,
+	}
+}
+
+// NewSliceField creates a slice field using the provided name.
+func NewSliceField[T ~string](name T) *Field {
+	return &Field{
+		Name:      string(name),
+		fieldType: FieldTypeSlice,
+	}
+}
+
+// NewMapField creates a new map field using the provided name.
+func NewMapField[T ~string](name T) *Field {
+	return &Field{
+		Name:      string(name),
+		fieldType: FieldTypeMap,
+	}
+}
+
+// NewAliasField creates a new alias field using the provided name.
+func NewAliasField[T ~string](name T) *Field {
+	return &Field{
+		Name:      string(name),
+		fieldType: FieldTypeAlias,
+	}
+}

+ 100 - 34
pkg/util/allocationfilterutil/v2/lexer.go → pkg/filter21/ast/lexer.go

@@ -1,11 +1,9 @@
-package allocationfilterutil
+package ast
 
 import (
 	"fmt"
 
 	multierror "github.com/hashicorp/go-multierror"
-
-	"github.com/opencost/opencost/pkg/kubecost"
 )
 
 // ============================================================================
@@ -21,37 +19,29 @@ const (
 	colon tokenKind = iota // ':'
 	comma                  // ','
 	plus                   // '+'
+	or                     // '|'
+
+	bangColon           // '!:'
+	tildeColon          // '~:'
+	bangTildeColon      // '!~:'
+	startTildeColon     // '<~:'
+	bangStartTildeColon // '!<~:'
+	tildeEndColon       // '~>:'
+	bangTildeEndColon   // '!~>:'
 
-	bangColon // '!:'
+	parenOpen  // '('
+	parenClose // ')'
 
 	str // '"foo"'
 
-	filterField1 // 'namespace', 'cluster'
-	filterField2 // 'label', 'annotation'
-	keyedAccess  // '[app]', '[foo]', etc.
-	identifier   // K8s valid name + sanitized Prom: 'app', 'abc_label'
+	filterField // 'namespace', 'cluster'
+	mapField    // 'label', 'annotation'
+	keyedAccess // '[app]', '[foo]', etc.
+	identifier  // K8s valid name + sanitized Prom: 'app', 'abc_label'
 
 	eof
 )
 
-// These maps serve a dual purpose. (1) to help the lexer identify special
-// strings that should become filterField1/2 instead of identifiers and (2) to
-// help the parser convert tokens into AllocationFilterConditions.
-var ff1ToKCFilterField = map[string]kubecost.FilterField{
-	"cluster":        kubecost.FilterClusterID,
-	"node":           kubecost.FilterNode,
-	"namespace":      kubecost.FilterNamespace,
-	"controllerName": kubecost.FilterControllerName,
-	"controllerKind": kubecost.FilterControllerKind,
-	"container":      kubecost.FilterContainer,
-	"pod":            kubecost.FilterPod,
-	"services":       kubecost.FilterServices,
-}
-var ff2ToKCFilterField = map[string]kubecost.FilterField{
-	"label":      kubecost.FilterLabel,
-	"annotation": kubecost.FilterAnnotation,
-}
-
 func (tk tokenKind) String() string {
 	switch tk {
 	case colon:
@@ -60,13 +50,31 @@ func (tk tokenKind) String() string {
 		return "comma"
 	case plus:
 		return "plus"
+	case or:
+		return "or"
 	case bangColon:
 		return "bangColon"
+	case tildeColon:
+		return "tildeColon"
+	case bangTildeColon:
+		return "bangTildeColon"
+	case startTildeColon:
+		return "startTildeColon"
+	case bangStartTildeColon:
+		return "bangStartTildeColon"
+	case tildeEndColon:
+		return "tildeEndColon"
+	case bangTildeEndColon:
+		return "bangTildeEndColon"
+	case parenOpen:
+		return "parenOpen"
+	case parenClose:
+		return "parenClose"
 	case str:
 		return "str"
-	case filterField1:
+	case filterField:
 		return "filterField1"
-	case filterField2:
+	case mapField:
 		return "filterField2"
 	case keyedAccess:
 		return "keyedAccess"
@@ -100,6 +108,9 @@ type scanner struct {
 	tokens []token
 	errors []error
 
+	fields    map[string]*Field
+	mapFields map[string]*Field
+
 	lexemeStartByte int
 	nextByte        int
 }
@@ -169,12 +180,62 @@ func (s *scanner) scanToken() {
 		s.addToken(comma)
 	case '+':
 		s.addToken(plus)
+	case '|':
+		s.addToken(or)
 	case '!':
 		if s.match(':') {
 			s.addToken(bangColon)
+		} else if s.match('~') {
+			if s.match(':') {
+				s.addToken(bangTildeColon)
+			} else if s.match('>') {
+				if s.match(':') {
+					s.addToken(bangTildeEndColon)
+				} else {
+					s.errors = append(s.errors, fmt.Errorf("Position %d: Unexpected '>'", s.nextByte-1))
+				}
+			} else {
+				s.errors = append(s.errors, fmt.Errorf("Position %d: Unexpected '~'", s.nextByte-1))
+			}
+		} else if s.match('<') {
+			if s.match('~') {
+				if s.match(':') {
+					s.addToken(bangStartTildeColon)
+				} else {
+					s.errors = append(s.errors, fmt.Errorf("Position %d: Unexpected '~'", s.nextByte-1))
+				}
+			} else {
+				s.errors = append(s.errors, fmt.Errorf("Position %d: Unexpected '<'", s.nextByte-1))
+			}
 		} else {
 			s.errors = append(s.errors, fmt.Errorf("Position %d: Unexpected '!'", s.nextByte-1))
 		}
+	case '(':
+		s.addToken(parenOpen)
+	case ')':
+		s.addToken(parenClose)
+	case '<':
+		if s.match('~') {
+			if s.match(':') {
+				s.addToken(startTildeColon)
+			} else {
+				s.errors = append(s.errors, fmt.Errorf("Position %d: Unexpected '~'", s.nextByte-1))
+			}
+		} else {
+			s.errors = append(s.errors, fmt.Errorf("Position %d: Unexpected '<'", s.nextByte-1))
+		}
+	case '~':
+		if s.match(':') {
+			s.addToken(tildeColon)
+		} else if s.match('>') {
+			if s.match(':') {
+				s.addToken(tildeEndColon)
+			} else {
+				s.errors = append(s.errors, fmt.Errorf("Position %d: Unexpected '>'", s.nextByte-1))
+			}
+		} else {
+			s.errors = append(s.errors, fmt.Errorf("Position %d: Unexpected '~'", s.nextByte-1))
+		}
 	// strings
 	case '"':
 		s.string()
@@ -255,17 +316,22 @@ func (s *scanner) identifier() {
 	}
 
 	tokenText := s.source[s.lexemeStartByte:s.nextByte]
-	if _, ok := ff1ToKCFilterField[tokenText]; ok {
-		s.addToken(filterField1)
-	} else if _, ok := ff2ToKCFilterField[tokenText]; ok {
-		s.addToken(filterField2)
+	if _, ok := s.fields[tokenText]; ok {
+		s.addToken(filterField)
+	} else if _, ok := s.mapFields[tokenText]; ok {
+		s.addToken(mapField)
 	} else {
 		s.addToken(identifier)
 	}
 }
 
-func lexAllocationFilterV2(raw string) ([]token, error) {
-	s := scanner{source: raw}
+// lex will generate a slice of tokens provided a raw string and the filter field definitions
+func lex(raw string, fields map[string]*Field, mapFields map[string]*Field) ([]token, error) {
+	s := scanner{
+		source:    raw,
+		fields:    fields,
+		mapFields: mapFields,
+	}
 	s.scanTokens()
 
 	if len(s.errors) > 0 {

+ 71 - 5
pkg/util/allocationfilterutil/v2/lexer_test.go → pkg/filter21/ast/lexer_test.go

@@ -1,9 +1,40 @@
-package allocationfilterutil
+package ast
 
 import (
 	"testing"
 )
 
+var allocFields map[string]*Field = map[string]*Field{
+	"cluster":        NewField("cluster"),
+	"node":           NewField("node"),
+	"namespace":      NewField("namespace"),
+	"controllerName": NewField("controllerName"),
+	"controllerKind": NewField("controllerKind"),
+	"container":      NewField("container"),
+	"pod":            NewField("pod"),
+	"services":       NewSliceField("services"),
+}
+
+var allocMapFields map[string]*Field = map[string]*Field{
+	"label":      NewMapField("label"),
+	"annotation": NewMapField("annotation"),
+}
+
+func TestLexerGroup(t *testing.T) {
+	tokens, err := lex(
+		`cluster:"cluster-one"+namespace:"kubecost"+(controllerKind!:"daemonset","deployment")+controllerName:"kubecost-network-costs"+container:"kubecost-network-costs"`,
+		allocFields,
+		allocMapFields)
+
+	if err != nil {
+		t.Errorf("Error: %s", err)
+	}
+
+	for _, token := range tokens {
+		t.Logf("%s", token)
+	}
+}
+
 func TestLexer(t *testing.T) {
 	cases := []struct {
 		name string
@@ -32,14 +63,49 @@ func TestLexer(t *testing.T) {
 			input:    "+",
 			expected: []token{{kind: plus, s: "+"}, {kind: eof}},
 		},
+		{
+			name:     "or",
+			input:    "|",
+			expected: []token{{kind: or, s: "|"}, {kind: eof}},
+		},
 		{
 			name:     "bangColon",
 			input:    "!:",
 			expected: []token{{kind: bangColon, s: "!:"}, {kind: eof}},
 		},
+		{
+			name:     "tildeColon",
+			input:    "~:",
+			expected: []token{{kind: tildeColon, s: "~:"}, {kind: eof}},
+		},
+		{
+			name:     "bangTildeColon",
+			input:    "!~:",
+			expected: []token{{kind: bangTildeColon, s: "!~:"}, {kind: eof}},
+		},
+		{
+			name:     "startTildeColon",
+			input:    "<~:",
+			expected: []token{{kind: startTildeColon, s: "<~:"}, {kind: eof}},
+		},
+		{
+			name:     "bangStartTildeColon",
+			input:    "!<~:",
+			expected: []token{{kind: bangStartTildeColon, s: "!<~:"}, {kind: eof}},
+		},
+		{
+			name:     "tildeEndColon",
+			input:    "~>:",
+			expected: []token{{kind: tildeEndColon, s: "~>:"}, {kind: eof}},
+		},
+		{
+			name:     "bangTildeEndColon",
+			input:    "!~>:",
+			expected: []token{{kind: bangTildeEndColon, s: "!~>:"}, {kind: eof}},
+		},
 		{
 			name: "multiple symbols",
-			// This is a valid string to lex but not to parse.
+			// This is a valid string to parse but not to lex
 			input:    "!::,+",
 			expected: []token{{kind: bangColon, s: "!:"}, {kind: colon, s: ":"}, {kind: comma, s: ","}, {kind: plus, s: "+"}, {kind: eof}},
 		},
@@ -79,12 +145,12 @@ func TestLexer(t *testing.T) {
 			name:  "whitespace separated accesses",
 			input: `node : "abc" , "def" ` + string('\r') + string('\n') + string('\t') + `namespace : "123"`,
 			expected: []token{
-				{kind: filterField1, s: "node"},
+				{kind: filterField, s: "node"},
 				{kind: colon, s: ":"},
 				{kind: str, s: "abc"},
 				{kind: comma, s: ","},
 				{kind: str, s: "def"},
-				{kind: filterField1, s: "namespace"},
+				{kind: filterField, s: "namespace"},
 				{kind: colon, s: ":"},
 				{kind: str, s: "123"},
 				{kind: eof},
@@ -95,7 +161,7 @@ func TestLexer(t *testing.T) {
 	for _, c := range cases {
 		t.Run(c.name, func(t *testing.T) {
 			t.Logf("Input: '%s'", c.input)
-			result, err := lexAllocationFilterV2(c.input)
+			result, err := lex(c.input, allocFields, allocMapFields)
 			if c.expectError && err == nil {
 				t.Errorf("expected error but got nil")
 			} else if !c.expectError && err != nil {

+ 193 - 0
pkg/filter21/ast/ops.go

@@ -0,0 +1,193 @@
+package ast
+
+// FilterOp is an enum that represents operations that can be performed
+// when filtering (equality, inequality, etc.)
+type FilterOp string
+
+// If you add a FilterOp, MAKE SURE TO UPDATE ALL FILTER IMPLEMENTATIONS! Go
+// does not enforce exhaustive pattern matching on "enum" types.
+const (
+	// FilterOpEquals is the equality operator
+	//
+	// "kube-system" FilterOpEquals "kube-system" = true
+	// "kube-syste" FilterOpEquals "kube-system" = false
+	FilterOpEquals FilterOp = "equals"
+
+	// FilterOpNotEquals is the inverse of equals.
+	FilterOpNotEquals = "notequals"
+
+	// FilterOpContains supports string fields, slice fields, and map fields.
+	// For maps, this is equivalent to map.HasKey(x)
+	//
+	// "kube-system" FilterOpContains "e-s" = true
+	// ["a", "b", "c"] FilterOpContains "a" = true
+	// { "namespace": "kubecost", "cluster": "cluster-one" } FilterOpContains "namespace" = true
+	FilterOpContains = "contains"
+
+	// FilterOpNotContains is the inverse of contains.
+	FilterOpNotContains = "notcontains"
+
+	// FilterOpContainsPrefix is like FilterOpContains, but checks against the start of a string.
+	// For maps, this checks to see if any of the keys start with the prefix
+	//
+	// "kube-system" ContainsPrefix "kube" = true
+	// ["kube-system", "abc123"] ContainsPrefix "kube" = true
+	// { "kube-label": "test", "abc": "123" } ContainsPrefix "ab" = true
+	FilterOpContainsPrefix = "containsprefix"
+
+	// FilterOpNotContainsPrefix is the inverse of FilterOpContainsPrefix
+	FilterOpNotContainsPrefix = "notcontainsprefix"
+
+	// FilterOpContainsSuffix is like FilterOpContains, but checks against the end of a string.
+	// For maps, this checks to see if any of the keys end with the suffix
+	//
+	// "kube-system" ContainsSuffix "system" = true
+	// ["kube-system", "abc123"] ContainsSuffix "system" = true
+	// { "kube-label": "test", "abc": "123" } ContainsSuffix "-label" = true
+	FilterOpContainsSuffix = "containssuffix"
+
+	// FilterOpNotContainsSuffix is the inverse of FilterOpContainsSuffix
+	FilterOpNotContainsSuffix = "notcontainssuffix"
+
+	// FilterOpVoid is base-depth operator that is used for an empty filter
+	FilterOpVoid = "void"
+
+	// FilterOpContradiction is a base-depth operator that filters all data.
+	FilterOpContradiction = "contradiction"
+
+	// FilterOpAnd is an operator that succeeds if all parameters succeed.
+	FilterOpAnd = "and"
+
+	// FilterOpOr is an operator that succeeds if any parameter succeeds
+	FilterOpOr = "or"
+
+	// FilterOpNot is an operator that contains a single operand
+	FilterOpNot = "not"
+)
+
+// VoidOp is base-depth operator that is used for an empty filter
+type VoidOp struct{}
+
+// Op returns the FilterOp enumeration value for the operator.
+func (_ *VoidOp) Op() FilterOp {
+	return FilterOpVoid
+}
+
+// ContradictionOp is a base-depth operator that filters all data.
+type ContradictionOp struct{}
+
+// Op returns the FilterOp enumeration value for the operator.
+func (_ *ContradictionOp) Op() FilterOp {
+	return FilterOpContradiction
+}
+
+// AndOp is a filter operation that contains a flat list of nodes which should all resolve
+// to true in order for the result to be true.
+type AndOp struct {
+	Operands []FilterNode
+}
+
+// Op returns the FilterOp enumeration value for the operator.
+func (_ *AndOp) Op() FilterOp {
+	return FilterOpAnd
+}
+
+// Add appends a filter node to the flat list of operands within the AND operator
+func (ao *AndOp) Add(node FilterNode) {
+	ao.Operands = append(ao.Operands, node)
+}
+
+// OrOp is a filter operation that contains a flat list of nodes which at least one node
+// should resolve to true in order for the result to be true.
+type OrOp struct {
+	Operands []FilterNode
+}
+
+// Op returns the FilterOp enumeration value for the operator.
+func (_ *OrOp) Op() FilterOp {
+	return FilterOpOr
+}
+
+// Add appends a filter node to the flat list of operands within the OR operator
+func (oo *OrOp) Add(node FilterNode) {
+	oo.Operands = append(oo.Operands, node)
+}
+
+// NotOp is a filter operation that logically inverts result of the child operand.
+type NotOp struct {
+	Operand FilterNode
+}
+
+// Op returns the FilterOp enumeration value for the operator.
+func (_ *NotOp) Op() FilterOp {
+	return FilterOpNot
+}
+
+// Add sets the not operand to the parameter
+func (no *NotOp) Add(node FilterNode) {
+	no.Operand = node
+}
+
+// EqualOp is a filter operation that compares a resolvable identifier (Left) to a
+// string value (Right)
+type EqualOp struct {
+	// Left contains a resolvable Identifier (property of an input type) which can be
+	// used to compare against the Right value.
+	Left Identifier
+
+	// Right contains the value which we wish to compare the resolved identifier to.
+	Right string
+}
+
+// Op returns the FilterOp enumeration value for the operator.
+func (_ *EqualOp) Op() FilterOp {
+	return FilterOpEquals
+}
+
+// ContainsOp is a filter operation that checks to see if a resolvable identifier (Left) contains a
+// string value (Right)
+type ContainsOp struct {
+	// Left contains a resolvable Identifier (property of an input type) which can be
+	// used to query against using the Right value.
+	Left Identifier
+
+	// Right contains the value which we use to search the resolved Left identifier with.
+	Right string
+}
+
+// Op returns the FilterOp enumeration value for the operator.
+func (_ *ContainsOp) Op() FilterOp {
+	return FilterOpContains
+}
+
+// ContainsPrefixOp is a filter operation that checks to see if a resolvable identifier (Left) starts with a
+// string value (Right)
+type ContainsPrefixOp struct {
+	// Left contains a resolvable Identifier (property of an input type) which can be
+	// used to query against using the Right value.
+	Left Identifier
+
+	// Right contains the value which we use to search the resolved Left identifier with.
+	Right string
+}
+
+// Op returns the FilterOp enumeration value for the operator.
+func (_ *ContainsPrefixOp) Op() FilterOp {
+	return FilterOpContainsPrefix
+}
+
+// ContainsSuffixOp is a filter operation that checks to see if a resolvable identifier (Left) ends with a
+// string value (Right)
+type ContainsSuffixOp struct {
+	// Left contains a resolvable Identifier (property of an input type) which can be
+	// used to query against using the Right value.
+	Left Identifier
+
+	// Right contains the value which we use to search the resolved Left identifier with.
+	Right string
+}
+
+// Op returns the FilterOp enumeration value for the operator.
+func (_ *ContainsSuffixOp) Op() FilterOp {
+	return FilterOpContainsSuffix
+}

+ 589 - 0
pkg/filter21/ast/parser.go

@@ -0,0 +1,589 @@
+// allocationfilterutil provides functionality for parsing V2 of the Kubecost
+// filter language for Allocation types.
+//
+// e.g. "filter=namespace:kubecost+controllerkind:deployment"
+package ast
+
+import (
+	"fmt"
+
+	"github.com/hashicorp/go-multierror"
+)
+
+// The grammar is approximately as follows:
+//
+// <filter>         ::= <filter-element> (<group-op> <filter-element>)*
+// <filter-element> ::= <comparison> | <group-filter>
+// <group-filter>   ::= '(' <filter> ')'
+// <group-op>       ::= '+' | '|'
+// <comparison>     ::= <filter-key> <filter-op> <filter-value>
+// <filter-key>     ::= <map-field> <keyed-access> | <filter-field>
+// <filter-op>      ::= ':' | '!:' | '~:' | '!~:' | '<~:' | '!<~:' | '~>:' | '!~>:'
+// <filter-value>   ::= '"' <identifier> '"' (',' <filter-value>)*
+// <keyed-access>   ::= '[' <identifier> ']'
+// <map-field>      ::= --- (fields passed into lexer)
+// <filter-field>   ::= --- (fields passed into lexer)
+// <identifier>     ::= --- valid K8s name or Prom-sanitized K8s name
+
+// ============================================================================
+// Parser
+//
+// Based on the Parser class in Chapter 6: Parsing Expressions of Crafting
+// Interpreters by Robert Nystrom
+// ============================================================================
+
+// parseError produces error messages tailored to the needs of the parser
+func parseError(t token, message string) error {
+	if t.kind == eof {
+		return fmt.Errorf("at end: %s", message)
+	}
+
+	return fmt.Errorf("at '%s': %s", t.s, message)
+}
+
+type parser struct {
+	tokens  []token
+	current int
+
+	fields    map[string]*Field
+	mapFields map[string]*Field
+}
+
+// ----------------------------------------------------------------------------
+// Parser helper methods for token handling
+// ----------------------------------------------------------------------------
+
+func (p *parser) atEnd() bool {
+	return p.peek().kind == eof
+}
+
+func (p *parser) advance() token {
+	if !p.atEnd() {
+		p.current += 1
+	}
+
+	return p.previous()
+}
+
+func (p *parser) previous() token {
+	return p.tokens[p.current-1]
+}
+
+// match return true and advances the parser by one token if the next token has
+// a kind that matches one of the arguments. Otherwise, it returns false and
+// DOES NOT advance the parser.
+func (p *parser) match(tokenKinds ...tokenKind) bool {
+	for _, kind := range tokenKinds {
+		if p.check(kind) {
+			p.advance()
+			return true
+		}
+	}
+	return false
+}
+
+// check returns true iff the next token matches the provided kind.
+func (p *parser) check(tk tokenKind) bool {
+	if p.atEnd() {
+		return false
+	}
+	return p.peek().kind == tk
+}
+
+func (p *parser) peek() token {
+	return p.tokens[p.current]
+}
+
+// consume is a "next token must be this kind" method. If the next token is of
+// the correct kind, the parser is advanced and that token is returned. If it
+// is not of the correct kind, a parse error is returned and the parser is NOT
+// advanced.
+func (p *parser) consume(tk tokenKind, message string) (token, error) {
+	if p.check(tk) {
+		return p.advance(), nil
+	}
+
+	return token{}, parseError(p.peek(), message)
+}
+
+// synchronize attempts to skip forward until the next tokenKind, indicating the
+// start of a new (plus, or, or parenClose).
+func (p *parser) synchronize(tokens ...tokenKind) {
+	if len(tokens) == 0 {
+		return
+	}
+
+	for !p.atEnd() {
+		kind := p.peek().kind
+		for _, token := range tokens {
+			if kind == token {
+				return
+			}
+		}
+
+		p.advance()
+	}
+}
+
+// ----------------------------------------------------------------------------
+// Parser grammar rules as recursive descent methods
+// ----------------------------------------------------------------------------
+
+// filter is the main method of the parser. It turns the token stream into an
+// FilterNode tree, reporting parse errors that occurred along the way. The depth
+// parameter is the number of edges from the node to the tree's root node, which
+// is initially 0. As we recurse into the tree, the depth will increase.
+func (p *parser) filter(depth int) (FilterNode, error) {
+	var errs *multierror.Error
+
+	// ----------------------------------------------------------------------------
+	//  Capture Starting Op
+	// ----------------------------------------------------------------------------
+	// Since every valid filter starts with an operand, this is always our first
+	// step. Depending on the _next_ token, we can either stop here or use a grouping
+	// operator (+ or |).
+	var top FilterNode
+
+	// If we determine after parsing the first op that we have a group op, we'll create
+	// the group based on the operator and push the top into the group.
+	var f FilterGroup = nil
+
+	// Special Case: Empty Filter on depth = 0 and first token is eof
+	if depth == 0 && p.peek().kind == eof {
+		return &VoidOp{}, errs.ErrorOrNil()
+	}
+
+	// Open Paren indicates a new filter depth, so we recursively call filter with depth+1.
+	if p.match(parenOpen) {
+		node, err := p.filter(depth + 1)
+		if err != nil {
+			errs = multierror.Append(errs, err)
+		} else {
+			top = node
+		}
+	} else {
+		comparison, err := p.comparison()
+		if err != nil {
+			errs = multierror.Append(errs, err)
+			p.synchronize(plus, or, parenClose)
+		} else {
+			top = comparison
+		}
+	}
+
+	// Handles case `( <comparison> )` with no grouping ops.
+	if p.match(parenClose) {
+		if depth <= 0 {
+			errs = multierror.Append(errs, fmt.Errorf("Found ')' without matching '('"))
+		}
+
+		return top, errs.ErrorOrNil()
+	}
+
+	// ----------------------------------------------------------------------------
+	//  Determine Group Operator
+	// ----------------------------------------------------------------------------
+	// Once we land here, we expect an operator as the next token. This operator will
+	// determine the group for this scope and be used to continue parsing as long as
+	// the operators following the initial are _the same_.
+	//
+	// For instance:
+	// ( <comparison> | <comparison> | <comparison> ) is allowed
+	// ( <comparison> + <comparison> + (<comparison> | <comparison>)) is allowed
+	// ( <comparison> | <comparison> + <comparison> ) is _NOT_ allowed
+
+	// Create the proper grouping operator based on the current token kind,
+	// then use a while to capture each repition of the _same_ operator.
+	selectedOp := p.peek().kind
+	if selectedOp == plus || selectedOp == or {
+		if selectedOp == plus {
+			f = &AndOp{}
+		} else if selectedOp == or {
+			f = &OrOp{}
+		}
+
+		// Once we determine we are using a group operator, it's safe to push
+		// the current top level operand into the group
+		f.Add(top)
+
+		// Capture each repetition
+		for p.match(selectedOp) {
+			if p.match(parenOpen) {
+				node, err := p.filter(depth + 1)
+				if err != nil {
+					errs = multierror.Append(errs, err)
+				} else {
+					f.Add(node)
+				}
+			} else {
+				right, err := p.comparison()
+				if err != nil {
+					errs = multierror.Append(errs, err)
+					p.synchronize(plus, or, parenClose)
+				} else {
+					f.Add(right)
+				}
+			}
+
+			if p.match(parenClose) {
+				if depth <= 0 {
+					errs = multierror.Append(errs, fmt.Errorf("Found ')' without matching '('"))
+				}
+
+				return f, errs.ErrorOrNil()
+			}
+
+			// The following code enforces continued use of a single operator within a scope.
+			// ie: (a | b + c) is disallowed
+			//
+			// In order to continue parsing (to continue to collect parse errors), we need to fast-
+			// forward to the next instance of an operator or scope close.
+			nextOp := p.peek().kind
+			if nextOp != eof && nextOp != selectedOp {
+				errs = multierror.Append(errs, fmt.Errorf("Found \"%s\", Expected \"%s\"", nextOp.String(), selectedOp.String()))
+				// since we were peeking for this check, to correctly synchronize, we must advance at least once
+				p.advance()
+				p.synchronize(plus, or, parenClose)
+
+				// since it's possible to synchronize to a paren close, we need to ensure we correctly pop the
+				// current scope if that's the case.
+				if p.match(parenClose) {
+					return f, errs.ErrorOrNil()
+				}
+			}
+		}
+	}
+
+	// It should not be possible to reach this point on a non-zero depth, so we
+	// must have a () mismatch
+	if depth > 0 {
+		errs = multierror.Append(errs, fmt.Errorf("Found '(' without matching ')'"))
+	}
+
+	// If we didn't have a grouping operator, we simply return the single op
+	if f == nil {
+		return top, errs.ErrorOrNil()
+	}
+
+	return f, errs.ErrorOrNil()
+}
+
+func (p *parser) comparison() (FilterNode, error) {
+	field, key, err := p.filterKey()
+	if err != nil {
+		return nil, err
+	}
+
+	opToken, err := p.filterOp()
+	if err != nil {
+		return nil, err
+	}
+
+	var op FilterOp
+
+	switch opToken.kind {
+	case colon:
+		// for ':' using a slice or key-less map, treat as '~:'
+		if field.IsSlice() || (field.IsMap() && key == "") {
+			op = FilterOpContains
+		} else {
+			op = FilterOpEquals
+		}
+	case bangColon:
+		// for '!:' using a slice or key-less map, treat as '!~:'
+		if field.IsSlice() || (field.IsMap() && key == "") {
+			op = FilterOpNotContains
+		} else {
+			op = FilterOpNotEquals
+		}
+	case tildeColon:
+		op = FilterOpContains
+	case bangTildeColon:
+		op = FilterOpNotContains
+	case startTildeColon:
+		op = FilterOpContainsPrefix
+	case bangStartTildeColon:
+		op = FilterOpNotContainsPrefix
+	case tildeEndColon:
+		op = FilterOpContainsSuffix
+	case bangTildeEndColon:
+		op = FilterOpNotContainsSuffix
+	default:
+		return nil, parseError(opToken, "implementation problem: unhandled op token")
+	}
+
+	values, err := p.filterValues()
+	if err != nil {
+		return nil, err
+	}
+
+	switch opToken.kind {
+	// In the != case, a sequence of filter values is ANDed
+	// Example:
+	// namespace!:"foo","bar" -> (and (notequals namespace foo)
+	//                                (notequals namespace bar))
+	case bangColon, bangTildeColon, bangStartTildeColon, bangTildeEndColon:
+		// Only a single filter value, don't need to wrap in AND
+		if len(values) == 1 {
+			node, err := toFilterNode(field, key, op, values[0])
+			if err != nil {
+				return nil, fmt.Errorf("Parse Error: %s", err)
+			}
+
+			return node, nil
+		}
+
+		// Multiple filter values, wrap in AND
+		baseFilter := &AndOp{}
+		for _, v := range values {
+			node, err := toFilterNode(field, key, op, v)
+			if err != nil {
+				return nil, fmt.Errorf("Parse Error: %s", err)
+			}
+
+			baseFilter.Operands = append(baseFilter.Operands, node)
+		}
+
+		return baseFilter, nil
+
+	default:
+		// Only a single filter value, don't need to wrap in OR
+		if len(values) == 1 {
+			node, err := toFilterNode(field, key, op, values[0])
+			if err != nil {
+				return nil, fmt.Errorf("Parse Error: %s", err)
+			}
+
+			return node, nil
+		}
+
+		// Multiple filter values, wrap in OR
+		baseFilter := &OrOp{}
+		for _, v := range values {
+			node, err := toFilterNode(field, key, op, v)
+			if err != nil {
+				return nil, fmt.Errorf("Parse Error: %s", err)
+			}
+
+			baseFilter.Operands = append(baseFilter.Operands, node)
+		}
+
+		return baseFilter, nil
+	}
+
+}
+
+// filterKey parses a series of tokens that represent a "filter key", returning
+// an error if a filter key cannot be constructed.
+//
+// Examples:
+// tokens = [filterField2:label keyedAccess:app] -> FilterLabel, app, nil
+// tokens = [filterField1:namespace] -> FilterNamespace, "", nil
+func (p *parser) filterKey() (field *Field, key string, err error) {
+	if p.match(mapField) {
+		rawField := p.previous().s
+		mappedField, ok := p.mapFields[rawField]
+		if !ok {
+			return nil, "", parseError(p.previous(), "expect key-mapped filter field, like 'label' or 'annotation'")
+		}
+
+		// keyed-access is optional after a map field
+		if p.match(keyedAccess) {
+			key = p.previous().s
+		} else {
+			key = ""
+		}
+
+		return mappedField, key, nil
+	}
+
+	_, err = p.consume(filterField, "expect filter field")
+	if err != nil {
+		return nil, "", err
+	}
+
+	rawField := p.previous().s
+	mappedField, ok := p.fields[rawField]
+	if !ok {
+		return nil, "", parseError(p.previous(), "expect known filter field, like 'cluster' or 'namespace'")
+	}
+
+	return mappedField, "", nil
+}
+
+func (p *parser) filterOp() (token, error) {
+	if p.match(colon, bangColon, tildeColon, bangTildeColon, startTildeColon, bangStartTildeColon, tildeEndColon, bangTildeEndColon) {
+		return p.previous(), nil
+	}
+
+	return token{}, parseError(p.peek(), "expect filter op like ':', '!:', '~:', or '!~:'")
+}
+
+func (p *parser) filterValues() ([]string, error) {
+	vals := []string{}
+
+	_, err := p.consume(str, "expect string as filter value")
+	if err != nil {
+		return nil, err
+	}
+	vals = append(vals, p.previous().s)
+
+	for p.match(comma) {
+		_, err := p.consume(str, "expect string as filter value")
+		if err != nil {
+			return nil, err
+		}
+
+		vals = append(vals, p.previous().s)
+	}
+
+	return vals, nil
+}
+
+func toFilterNode(field *Field, key string, op FilterOp, value string) (FilterNode, error) {
+	switch op {
+	case FilterOpEquals:
+		return &EqualOp{
+			Left: Identifier{
+				Field: field,
+				Key:   key,
+			},
+			Right: value,
+		}, nil
+
+	case FilterOpNotEquals:
+		return &NotOp{
+			Operand: &EqualOp{
+				Left: Identifier{
+					Field: field,
+					Key:   key,
+				},
+				Right: value,
+			},
+		}, nil
+
+	case FilterOpContains:
+		return &ContainsOp{
+			Left: Identifier{
+				Field: field,
+				Key:   key,
+			},
+			Right: value,
+		}, nil
+
+	case FilterOpNotContains:
+		return &NotOp{
+			Operand: &ContainsOp{
+				Left: Identifier{
+					Field: field,
+					Key:   key,
+				},
+				Right: value,
+			},
+		}, nil
+
+	case FilterOpContainsPrefix:
+		return &ContainsPrefixOp{
+			Left: Identifier{
+				Field: field,
+				Key:   key,
+			},
+			Right: value,
+		}, nil
+
+	case FilterOpNotContainsPrefix:
+		return &NotOp{
+			Operand: &ContainsPrefixOp{
+				Left: Identifier{
+					Field: field,
+					Key:   key,
+				},
+				Right: value,
+			},
+		}, nil
+
+	case FilterOpContainsSuffix:
+		return &ContainsSuffixOp{
+			Left: Identifier{
+				Field: field,
+				Key:   key,
+			},
+			Right: value,
+		}, nil
+
+	case FilterOpNotContainsSuffix:
+		return &NotOp{
+			Operand: &ContainsSuffixOp{
+				Left: Identifier{
+					Field: field,
+					Key:   key,
+				},
+				Right: value,
+			},
+		}, nil
+
+	default:
+		return nil, fmt.Errorf("Failed to parse op: %s", op)
+	}
+}
+
+// FilterParser is an object capable of parsing a filter string into a `FilterNode`
+// AST
+type FilterParser interface {
+	// Parse parses a filter string into a FilterNode AST.
+	Parse(filter string) (FilterNode, error)
+}
+
+// default implementation of FilterParser
+type defaultFilterParser struct {
+	fields    map[string]*Field
+	mapFields map[string]*Field
+}
+
+// Parse parses a filter string into a FilterNode AST.
+func (dfp *defaultFilterParser) Parse(filter string) (FilterNode, error) {
+	tokens, err := lex(filter, dfp.fields, dfp.mapFields)
+	if err != nil {
+		return nil, fmt.Errorf("lexing filter: %w", err)
+	}
+
+	p := parser{
+		tokens:    tokens,
+		fields:    dfp.fields,
+		mapFields: dfp.mapFields,
+	}
+
+	parsedFilter, err := p.filter(0)
+	if err != nil {
+		return nil, fmt.Errorf("parsing filter: %w", err)
+	}
+
+	return parsedFilter, nil
+}
+
+// splits a slice of Field instances into a map of fields (key'd by name) that have no key-based
+// access and those that have key-based access.
+func fieldsToMaps(fs []*Field) (fields map[string]*Field, mapFields map[string]*Field) {
+	fields = make(map[string]*Field)
+	mapFields = make(map[string]*Field)
+
+	for _, f := range fs {
+		if f.IsMap() {
+			mapFields[f.Name] = f
+		} else {
+			fields[f.Name] = f
+		}
+	}
+	return
+}
+
+// NewFilterParser creates a new `FilterParser` instance with the provided `Field` definitions to
+// use when lexing and parsing.
+func NewFilterParser(fields []*Field) FilterParser {
+	f, m := fieldsToMaps(fields)
+
+	return &defaultFilterParser{
+		fields:    f,
+		mapFields: m,
+	}
+}

+ 44 - 0
pkg/filter21/ast/tree.go

@@ -0,0 +1,44 @@
+package ast
+
+// FilterNode is the the base instance of a tree leaf node, which is a conditional operator
+// which contains operands that may also be leaf nodes. A go type-switch should be used to
+// reduce the FilterNode to a concrete type to operate on. If only the type of operator is
+// required, the `Op()` field can be used.
+type FilterNode interface {
+	Op() FilterOp
+}
+
+// FilterGroup is a specialized interface for ops which can collect N operands.
+type FilterGroup interface {
+	FilterNode
+
+	// Adds a new leaf node to the FilterGroup
+	Add(FilterNode)
+}
+
+// Identifier is a struct that contains the data required to resolve a specific operand to a concrete
+// value during operator compilation.
+type Identifier struct {
+	Field *Field
+	Key   string
+}
+
+// Equal returns true if the identifiers are equal
+func (id *Identifier) Equal(ident Identifier) bool {
+	return id.Field.Equal(ident.Field) && id.Key == ident.Key
+}
+
+// String returns the string representation for the Identifier
+func (id *Identifier) String() string {
+	if id == nil {
+		return "<nil>"
+	}
+	if id.Field == nil {
+		return "<nil field>"
+	}
+	s := id.Field.Name
+	if id.Key != "" {
+		s += "[" + id.Key + "]"
+	}
+	return s
+}

+ 369 - 0
pkg/filter21/ast/walker.go

@@ -0,0 +1,369 @@
+package ast
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/opencost/opencost/pkg/filter21/util"
+	"golang.org/x/text/cases"
+	"golang.org/x/text/language"
+)
+
+// used to apply a title to pipeline
+var titleCaser cases.Caser = cases.Title(language.Und, cases.NoLower)
+var lowerCaser cases.Caser = cases.Lower(language.Und)
+
+// TraversalState represents the state of the current leaf node in a traversal
+// of the filter  Any grouping ops will include an Enter on their first
+// occurence, and an Exit when leaving the op state.
+type TraversalState int
+
+const (
+	// TraversalStateNone is used whenever a binary op leaf node is traversed.
+	TraversalStateNone TraversalState = iota
+
+	// TraversalStateEnter is used when a group op leaf node is traversed (and, or, not)
+	TraversalStateEnter
+
+	// TraversalStateExit is used wwhen a group op leaf node is popped (and, or, not).
+	TraversalStateExit
+)
+
+// TransformLeaves produces a new tree, leaving non-leaf nodes (e.g. And, Or)
+// intact and replacing leaf nodes (e.g. Equals, Contains) with the result of
+// calling leafTransformer(node).
+func TransformLeaves(node FilterNode, transformer func(FilterNode) FilterNode) FilterNode {
+	if node == nil {
+		return nil
+	}
+
+	// For group ops, we need to execute the callback with an Enter,
+	// recursively call traverse, then execute the callback with an Exit.
+	switch n := node.(type) {
+	case *NotOp:
+		return &NotOp{
+			Operand: TransformLeaves(n.Operand, transformer),
+		}
+	case *AndOp:
+		var newOperands []FilterNode
+		for _, o := range n.Operands {
+			newOperands = append(newOperands, TransformLeaves(o, transformer))
+		}
+		return &AndOp{
+			Operands: newOperands,
+		}
+	case *OrOp:
+		var newOperands []FilterNode
+		for _, o := range n.Operands {
+			newOperands = append(newOperands, TransformLeaves(o, transformer))
+		}
+		return &OrOp{
+			Operands: newOperands,
+		}
+
+	// Remaining nodes are assumed to be leaves
+	default:
+		return transformer(node)
+	}
+}
+
+// PreOrderTraversal accepts a root `FilterNode` and calls the f callback on
+// each leaf node it traverses. When entering "group" leaf nodes (leaf nodes
+// which contain other leaf nodes), a TraversalStateEnter/Exit will be includes
+// to denote each depth. In short, the callback will be executed twice for each
+// "group" op, once before entering, and once bofore exiting.
+func PreOrderTraversal(node FilterNode, f func(FilterNode, TraversalState)) {
+	if node == nil {
+		return
+	}
+
+	// For group ops, we need to execute the callback with an Enter,
+	// recursively call traverse, then execute the callback with an Exit.
+	switch n := node.(type) {
+	case *NotOp:
+		f(node, TraversalStateEnter)
+		PreOrderTraversal(n.Operand, f)
+		f(node, TraversalStateExit)
+
+	case *AndOp:
+		f(node, TraversalStateEnter)
+		for _, o := range n.Operands {
+			PreOrderTraversal(o, f)
+		}
+		f(node, TraversalStateExit)
+
+	case *OrOp:
+		f(node, TraversalStateEnter)
+		for _, o := range n.Operands {
+			PreOrderTraversal(o, f)
+		}
+		f(node, TraversalStateExit)
+
+	// Otherwise, we just linearly traverse
+	default:
+		f(node, TraversalStateNone)
+	}
+
+}
+
+// ToPreOrderString runs a PreOrderTraversal and generates an indented tree structure string
+// format for the provided tree root.
+func ToPreOrderString(node FilterNode) string {
+	var sb strings.Builder
+	indent := 0
+
+	printNode := func(n FilterNode, action TraversalState) {
+		if action == TraversalStateEnter {
+			sb.WriteString(OpStringFor(n, action, indent))
+			indent++
+		} else if action == TraversalStateExit {
+			indent--
+			sb.WriteString(OpStringFor(n, action, indent))
+		} else {
+			sb.WriteString(OpStringFor(n, action, indent))
+		}
+	}
+
+	PreOrderTraversal(node, printNode)
+
+	return sb.String()
+}
+
+// ToPreOrderShortString runs a PreOrderTraversal and generates a condensed tree structure string
+// format for the provided tree root.
+func ToPreOrderShortString(node FilterNode) string {
+	var sb strings.Builder
+
+	printNode := func(n FilterNode, action TraversalState) {
+		if action == TraversalStateEnter {
+			sb.WriteString(ShortOpStringFor(n, action))
+		} else if action == TraversalStateExit {
+			sb.WriteString(ShortOpStringFor(n, action))
+		} else {
+			sb.WriteString(ShortOpStringFor(n, action))
+		}
+	}
+
+	PreOrderTraversal(node, printNode)
+
+	return sb.String()
+}
+
+// OpStringFor returns a string for the provided leaf node, traversal state, and current
+// depth.
+func OpStringFor(node FilterNode, traversalState TraversalState, depth int) string {
+	prefix := indent(depth)
+
+	if traversalState == TraversalStateExit {
+		return prefix + "}\n"
+	}
+
+	if traversalState == TraversalStateEnter {
+		return prefix + titleCaser.String(string(node.Op())) + " {\n"
+	}
+
+	open := prefix + titleCaser.String(string(node.Op())) + " { "
+
+	switch n := node.(type) {
+	case *VoidOp:
+		open += ")"
+	case *EqualOp:
+		open += fmt.Sprintf("Left: %s, Right: %s }\n", n.Left.String(), n.Right)
+	case *ContainsOp:
+		open += fmt.Sprintf("Left: %s, Right: %s }\n", n.Left.String(), n.Right)
+	case *ContainsPrefixOp:
+		open += fmt.Sprintf("Left: %s, Right: %s }\n", n.Left.String(), n.Right)
+	case *ContainsSuffixOp:
+		open += fmt.Sprintf("Left: %s, Right: %s }\n", n.Left.String(), n.Right)
+	default:
+		open += "}\n"
+	}
+
+	return open
+}
+
+// ShortOpStringFor returns a condensed string for the provided leaf node, traversal state, and current
+// depth.
+func ShortOpStringFor(node FilterNode, traversalState TraversalState) string {
+	if traversalState == TraversalStateExit {
+		return ")"
+	}
+
+	if traversalState == TraversalStateEnter {
+		return lowerCaser.String(string(node.Op())) + "("
+	}
+
+	open := lowerCaser.String(string(node.Op())) + "("
+
+	switch n := node.(type) {
+	case *VoidOp:
+		open += ")"
+	case *EqualOp:
+		open += fmt.Sprintf("%s,%s)", condenseIdent(n.Left), n.Right)
+	case *ContainsOp:
+		open += fmt.Sprintf("%s,%s)", condenseIdent(n.Left), n.Right)
+	case *ContainsPrefixOp:
+		open += fmt.Sprintf("%s,%s)", condenseIdent(n.Left), n.Right)
+	case *ContainsSuffixOp:
+		open += fmt.Sprintf("%s,%s)", condenseIdent(n.Left), n.Right)
+	default:
+		open += ")"
+	}
+
+	return open
+}
+
+// condenses an identifier string
+func condenseIdent(ident Identifier) string {
+	s := condense(ident.Field.Name)
+	if ident.Key != "" {
+		s += "[" + ident.Key + "]"
+	}
+	return s
+}
+
+func condense(s string) string {
+	lc := lowerCaser.String(s)
+	if len(lc) > 2 {
+		return lc[:2]
+	}
+	return lc
+}
+
+// Clone deep copies and returns the AST parameter.
+func Clone(filter FilterNode) FilterNode {
+	var result FilterNode = &VoidOp{}
+	var currentOps *util.Stack[FilterGroup] = util.NewStack[FilterGroup]()
+
+	PreOrderTraversal(filter, func(fn FilterNode, state TraversalState) {
+		if fn == nil {
+			return
+		}
+		switch n := fn.(type) {
+		case *AndOp:
+			if state == TraversalStateEnter {
+				currentOps.Push(&AndOp{})
+			} else if state == TraversalStateExit {
+				if currentOps.Length() > 1 {
+					current := currentOps.Pop()
+					currentOps.Top().Add(current)
+				} else {
+					result = currentOps.Pop()
+				}
+			}
+		case *OrOp:
+			if state == TraversalStateEnter {
+				currentOps.Push(&OrOp{})
+			} else if state == TraversalStateExit {
+				if currentOps.Length() > 1 {
+					current := currentOps.Pop()
+					currentOps.Top().Add(current)
+				} else {
+					result = currentOps.Pop()
+				}
+			}
+		case *NotOp:
+			if state == TraversalStateEnter {
+				currentOps.Push(&NotOp{})
+			} else if state == TraversalStateExit {
+				if currentOps.Length() > 1 {
+					current := currentOps.Pop()
+					currentOps.Top().Add(current)
+				} else {
+					result = currentOps.Pop()
+				}
+			}
+		case *ContradictionOp:
+			if currentOps.Length() == 0 {
+				result = &ContradictionOp{}
+			} else {
+				currentOps.Top().Add(&ContradictionOp{})
+			}
+		case *EqualOp:
+			var field Field
+			if n.Left.Field != nil {
+				field = *n.Left.Field
+			}
+			sm := &EqualOp{
+				Left: Identifier{
+					Field: &field,
+					Key:   n.Left.Key,
+				},
+				Right: n.Right,
+			}
+
+			if currentOps.Length() == 0 {
+				result = sm
+			} else {
+				currentOps.Top().Add(sm)
+			}
+
+		case *ContainsOp:
+			var field Field
+			if n.Left.Field != nil {
+				field = *n.Left.Field
+			}
+			sm := &ContainsOp{
+				Left: Identifier{
+					Field: &field,
+					Key:   n.Left.Key,
+				},
+				Right: n.Right,
+			}
+
+			if currentOps.Length() == 0 {
+				result = sm
+			} else {
+				currentOps.Top().Add(sm)
+			}
+
+		case *ContainsPrefixOp:
+			var field Field
+			if n.Left.Field != nil {
+				field = *n.Left.Field
+			}
+			sm := &ContainsPrefixOp{
+				Left: Identifier{
+					Field: &field,
+					Key:   n.Left.Key,
+				},
+				Right: n.Right,
+			}
+
+			if currentOps.Length() == 0 {
+				result = sm
+			} else {
+				currentOps.Top().Add(sm)
+			}
+
+		case *ContainsSuffixOp:
+			var field Field
+			if n.Left.Field != nil {
+				field = *n.Left.Field
+			}
+			sm := &ContainsSuffixOp{
+				Left: Identifier{
+					Field: &field,
+					Key:   n.Left.Key,
+				},
+				Right: n.Right,
+			}
+
+			if currentOps.Length() == 0 {
+				result = sm
+			} else {
+				currentOps.Top().Add(sm)
+			}
+		}
+	})
+
+	return result
+}
+
+// returns an 2-space indention for each depth
+func indent(depth int) string {
+	if depth <= 0 {
+		return ""
+	}
+	return strings.Repeat("  ", depth)
+}

+ 52 - 0
pkg/filter21/ast/walker_test.go

@@ -0,0 +1,52 @@
+package ast
+
+import (
+	"fmt"
+)
+
+func ExampleTransformLeaves() {
+	originalTree := &AndOp{
+		Operands: []FilterNode{
+			&EqualOp{
+				Left: Identifier{
+					Field: &Field{
+						Name: "field1",
+					},
+					Key: "foo",
+				},
+				Right: "bar",
+			},
+
+			&EqualOp{
+				Left: Identifier{
+					Field: &Field{
+						Name: "field2",
+					},
+				},
+				Right: "bar",
+			},
+		},
+	}
+
+	// This transformer applies "Not" to all leaves
+	transformFunc := func(node FilterNode) FilterNode {
+		switch concrete := node.(type) {
+		case *AndOp, *OrOp, *NotOp:
+			panic("Leaf transformer should not be called on non-leaf nodes")
+		default:
+			return &NotOp{Operand: concrete}
+		}
+	}
+
+	newTree := TransformLeaves(originalTree, transformFunc)
+	fmt.Println(ToPreOrderString(newTree))
+	// Output:
+	// And {
+	//   Not {
+	//     Equals { Left: field1[foo], Right: bar }
+	//   }
+	//   Not {
+	//     Equals { Left: field2, Right: bar }
+	//   }
+	// }
+}

+ 7 - 0
pkg/filter21/filter.go

@@ -0,0 +1,7 @@
+package filter
+
+import "github.com/opencost/opencost/pkg/filter21/ast"
+
+// Filter is just the root node of an AST. There are various compiler implementations
+// available to create data source specific filtering from the AST.
+type Filter = ast.FilterNode

+ 13 - 0
pkg/filter21/matcher/allcut.go

@@ -0,0 +1,13 @@
+package matcher
+
+// AllCut is a matcher that matches nothing. This is useful
+// for applications like authorization, where a user/group/role may be disallowed
+// from viewing data entirely.
+type AllCut[T any] struct{}
+
+// String returns the string representation of the matcher instance
+func (ac *AllCut[T]) String() string { return "(AllCut)" }
+
+// Matches is the canonical in-Go function for determining if T
+// matches a specific implementation's rules.
+func (ac *AllCut[T]) Matches(T) bool { return false }

+ 11 - 0
pkg/filter21/matcher/allpass.go

@@ -0,0 +1,11 @@
+package matcher
+
+// AllPass is a filter that matches everything and is the same as no filter. It is implemented here as a guard
+// against universal operations occurring in the absence of filters.
+type AllPass[T any] struct{}
+
+func (n *AllPass[T]) String() string { return "(AllPass)" }
+
+// Matches is the canonical in-Go function for determining if T
+// matches a specific implementation's rules.
+func (n *AllPass[T]) Matches(T) bool { return true }

+ 42 - 0
pkg/filter21/matcher/and.go

@@ -0,0 +1,42 @@
+package matcher
+
+import (
+	"fmt"
+)
+
+// And is a set of filters that should be evaluated as a logical
+// AND.
+type And[T any] struct {
+	Matchers []Matcher[T]
+}
+
+func (a *And[T]) Add(m Matcher[T]) {
+	a.Matchers = append(a.Matchers, m)
+}
+
+func (a *And[T]) String() string {
+	s := "(and"
+	for _, f := range a.Matchers {
+		s += fmt.Sprintf(" %s", f)
+	}
+
+	s += ")"
+	return s
+}
+
+// Matches is the canonical in-Go function for determining if T
+// matches a AND match rules.
+func (a *And[T]) Matches(that T) bool {
+	filters := a.Matchers
+	if len(filters) == 0 {
+		return true
+	}
+
+	for _, filter := range filters {
+		if !filter.Matches(that) {
+			return false
+		}
+	}
+
+	return true
+}

+ 188 - 0
pkg/filter21/matcher/compiler.go

@@ -0,0 +1,188 @@
+package matcher
+
+import (
+	"fmt"
+
+	"github.com/opencost/opencost/pkg/filter21/ast"
+	"github.com/opencost/opencost/pkg/filter21/transform"
+	"github.com/opencost/opencost/pkg/filter21/util"
+)
+
+// FieldMapper is the adapter which can fetch actual T instance data of type U
+// leveraging the ast.Identifier definition.
+type FieldMapper[T any, U any] func(T, ast.Identifier) (U, error)
+
+// StringFieldMapper is the adapter which can fetch actual T instance data of type string
+// leveraging the ast.Identifier definition.
+type StringFieldMapper[T any] FieldMapper[T, string]
+
+// SliceFieldMapper is the adapter which can fetch actual T instance data of type []string
+// leveraging the ast.Identifier definition.
+type SliceFieldMapper[T any] FieldMapper[T, []string]
+
+// SliceFieldMapper is the adapter which can fetch actual T instance data of type map[string]string
+// leveraging the ast.Identifier definition.
+type MapFieldMapper[T any] FieldMapper[T, map[string]string]
+
+// MatchCompiler compiles an `ast.FilterNode` into a Matcher[T] implementation.
+type MatchCompiler[T any] struct {
+	stringMatcher *StringMatcherFactory[T]
+	sliceMatcher  *StringSliceMatcherFactory[T]
+	mapMatcher    *StringMapMatcherFactory[T]
+	passes        []transform.CompilerPass
+}
+
+// NewMatchCompiler creates a new MatchCompiler for T instances provided the funcs which
+// can map ast.Identifier instances to a specific T field
+func NewMatchCompiler[T any](
+	stringFieldMapper StringFieldMapper[T],
+	sliceFieldMapper SliceFieldMapper[T],
+	mapFieldMapper MapFieldMapper[T],
+	passes ...transform.CompilerPass,
+) *MatchCompiler[T] {
+	return &MatchCompiler[T]{
+		stringMatcher: NewStringMatcherFactory(stringFieldMapper),
+		sliceMatcher:  NewStringSliceMatcherFactory(sliceFieldMapper),
+		mapMatcher:    NewStringMapMatcherFactory(mapFieldMapper),
+		passes:        passes,
+	}
+}
+
+// Compile accepts an `ast.FilterNode` tree and compiles it into a `Matcher[T]` implementation
+// which can be used to match T instances dynamically.
+func (mc *MatchCompiler[T]) Compile(filter ast.FilterNode) (Matcher[T], error) {
+	// apply compiler passes on parsed ast
+	var err error
+	filter, err = transform.ApplyAll(filter, mc.passes)
+	if err != nil {
+		return nil, fmt.Errorf("applying compiler passes: %w", err)
+	}
+
+	// if the root node is a void op, return an allpass
+	if _, ok := filter.(*ast.VoidOp); ok {
+		return &AllPass[T]{}, nil
+	}
+
+	var result Matcher[T]
+	var currentOps *util.Stack[MatcherGroup[T]] = util.NewStack[MatcherGroup[T]]()
+
+	// handle leaf is the ast walker func. group ops get pushed onto a stack on
+	// the Enter state, and popped on the Exit state. Any ops between Enter and
+	// Exit are added to the group. If there are no more groups on the stack after
+	// an Exit state, we set the result to the final group.
+	handleLeaf := func(leaf ast.FilterNode, state ast.TraversalState) {
+		switch n := leaf.(type) {
+		case *ast.AndOp:
+			if state == ast.TraversalStateEnter {
+				currentOps.Push(&And[T]{})
+			} else if state == ast.TraversalStateExit {
+				if currentOps.Length() > 1 {
+					current := currentOps.Pop()
+					currentOps.Top().Add(current)
+				} else {
+					result = currentOps.Pop()
+				}
+			}
+		case *ast.OrOp:
+			if state == ast.TraversalStateEnter {
+				currentOps.Push(&Or[T]{})
+			} else if state == ast.TraversalStateExit {
+				if currentOps.Length() > 1 {
+					current := currentOps.Pop()
+					currentOps.Top().Add(current)
+				} else {
+					result = currentOps.Pop()
+				}
+			}
+
+		case *ast.NotOp:
+			if state == ast.TraversalStateEnter {
+				currentOps.Push(&Not[T]{})
+			} else if state == ast.TraversalStateExit {
+				if currentOps.Length() > 1 {
+					current := currentOps.Pop()
+					currentOps.Top().Add(current)
+				} else {
+					result = currentOps.Pop()
+				}
+			}
+		case *ast.ContradictionOp:
+			if currentOps.Length() == 0 {
+				result = &AllCut[T]{}
+			} else {
+				currentOps.Top().Add(&AllCut[T]{})
+			}
+		case *ast.EqualOp:
+			sm := mc.stringMatcher.NewStringMatcher(n.Op(), n.Left, n.Right)
+			if currentOps.Length() == 0 {
+				result = sm
+			} else {
+				currentOps.Top().Add(sm)
+			}
+
+		case *ast.ContainsOp:
+			f := n.Left.Field
+			key := n.Left.Key
+
+			var sm Matcher[T]
+			if f.IsSlice() {
+				sm = mc.sliceMatcher.NewStringSliceMatcher(n.Op(), n.Left, n.Right)
+			} else if f.IsMap() && key == "" {
+				sm = mc.mapMatcher.NewStringMapMatcher(n.Op(), n.Left, n.Right)
+			} else {
+				sm = mc.stringMatcher.NewStringMatcher(n.Op(), n.Left, n.Right)
+			}
+
+			if currentOps.Length() == 0 {
+				result = sm
+			} else {
+				currentOps.Top().Add(sm)
+			}
+
+		case *ast.ContainsPrefixOp:
+			f := n.Left.Field
+			key := n.Left.Key
+
+			var sm Matcher[T]
+			if f.IsSlice() {
+				sm = mc.sliceMatcher.NewStringSliceMatcher(n.Op(), n.Left, n.Right)
+			} else if f.IsMap() && key == "" {
+				sm = mc.mapMatcher.NewStringMapMatcher(n.Op(), n.Left, n.Right)
+			} else {
+				sm = mc.stringMatcher.NewStringMatcher(n.Op(), n.Left, n.Right)
+			}
+
+			if currentOps.Length() == 0 {
+				result = sm
+			} else {
+				currentOps.Top().Add(sm)
+			}
+
+		case *ast.ContainsSuffixOp:
+			f := n.Left.Field
+			key := n.Left.Key
+
+			var sm Matcher[T]
+			if f.IsSlice() {
+				sm = mc.sliceMatcher.NewStringSliceMatcher(n.Op(), n.Left, n.Right)
+			} else if f.IsMap() && key == "" {
+				sm = mc.mapMatcher.NewStringMapMatcher(n.Op(), n.Left, n.Right)
+			} else {
+				sm = mc.stringMatcher.NewStringMatcher(n.Op(), n.Left, n.Right)
+			}
+
+			if currentOps.Length() == 0 {
+				result = sm
+			} else {
+				currentOps.Top().Add(sm)
+			}
+		}
+	}
+
+	ast.PreOrderTraversal(filter, handleLeaf)
+	if result == nil {
+		return &AllPass[T]{}, nil
+	}
+
+	return result, nil
+}

+ 17 - 0
pkg/filter21/matcher/matcher.go

@@ -0,0 +1,17 @@
+package matcher
+
+// Matcher represents anything that can be used to match against given generic type T.
+type Matcher[T any] interface {
+	String() string
+
+	// Matches is the canonical in-Go function for determining if T
+	// matches a specific implementation's rules.
+	Matches(T) bool
+}
+
+// MatcherGroup is useful for dynamically creating group based matchers.
+type MatcherGroup[T any] interface {
+	Matcher[T]
+
+	Add(Matcher[T])
+}

+ 467 - 0
pkg/filter21/matcher/matcher_test.go

@@ -0,0 +1,467 @@
+package matcher_test
+
+import (
+	"fmt"
+	"strings"
+	"testing"
+
+	"github.com/opencost/opencost/pkg/filter21/allocation"
+	"github.com/opencost/opencost/pkg/filter21/ast"
+	"github.com/opencost/opencost/pkg/filter21/matcher"
+	"github.com/opencost/opencost/pkg/filter21/transform"
+)
+
+// MatcherCompiler for Allocation instances providing functions which map identifers
+// to values within the allocation
+var allocCompiler = matcher.NewMatchCompiler(
+	AllocFieldMap,
+	AllocSliceFieldMap,
+	AllocMapFieldMap,
+	transform.PrometheusKeySanitizePass(),
+	transform.UnallocatedReplacementPass(),
+)
+
+// AST parser for allocation syntax
+var allocParser ast.FilterParser = allocation.NewAllocationFilterParser()
+
+func newAlloc(props *AllocationProperties) *Allocation {
+	a := &Allocation{
+		Properties: props,
+	}
+
+	a.Name = a.Properties.String()
+	return a
+}
+
+func TestCompileAndMatch(t *testing.T) {
+	cases := []struct {
+		input          string
+		shouldMatch    []*Allocation
+		shouldNotMatch []*Allocation
+	}{
+		{
+			input: `namespace:"kubecost"`,
+			shouldMatch: []*Allocation{
+				newAlloc(&AllocationProperties{Namespace: "kubecost"}),
+			},
+			shouldNotMatch: []*Allocation{
+				newAlloc(&AllocationProperties{Namespace: "kube-system"}),
+			},
+		},
+		{
+			input: `cluster:"cluster-one"+namespace:"kubecost"+controllerKind:"daemonset"+controllerName:"kubecost-network-costs"+container:"kubecost-network-costs"`,
+			shouldMatch: []*Allocation{
+				newAlloc(&AllocationProperties{
+					Cluster:        "cluster-one",
+					Namespace:      "kubecost",
+					ControllerKind: "daemonset",
+					Controller:     "kubecost-network-costs",
+					Pod:            "kubecost-network-costs-abc123",
+					Container:      "kubecost-network-costs",
+				}),
+			},
+			shouldNotMatch: []*Allocation{
+				newAlloc(&AllocationProperties{
+					Cluster:        "cluster-one",
+					Namespace:      "default",
+					ControllerKind: "deployment",
+					Controller:     "workload-abc",
+					Pod:            "workload-abc-123abc",
+					Container:      "abc",
+				}),
+			},
+		},
+		{
+			input: `namespace!:"kubecost","kube-system"`,
+			shouldMatch: []*Allocation{
+				newAlloc(&AllocationProperties{Namespace: "abc"}),
+			},
+			shouldNotMatch: []*Allocation{
+				newAlloc(&AllocationProperties{Namespace: "kubecost"}),
+				newAlloc(&AllocationProperties{Namespace: "kube-system"}),
+			},
+		},
+		{
+			input: `namespace:"kubecost","kube-system"`,
+			shouldMatch: []*Allocation{
+				newAlloc(&AllocationProperties{Namespace: "kubecost"}),
+				newAlloc(&AllocationProperties{Namespace: "kube-system"}),
+			},
+			shouldNotMatch: []*Allocation{
+				newAlloc(&AllocationProperties{Namespace: "abc"}),
+			},
+		},
+		{
+			input: `node:"node a b c" , "node 12 3"` + string('\n') + "+" + string('\n') + string('\r') + `namespace : "kubecost"`,
+			shouldMatch: []*Allocation{
+				newAlloc(&AllocationProperties{Namespace: "kubecost", Node: "node a b c"}),
+				newAlloc(&AllocationProperties{Namespace: "kubecost", Node: "node 12 3"}),
+			},
+			shouldNotMatch: []*Allocation{
+				newAlloc(&AllocationProperties{Namespace: "kubecost"}),
+				newAlloc(&AllocationProperties{Namespace: "kubecost", Node: "nodeabc"}),
+			},
+		},
+		{
+			input: `label[app_abc]:"cost_analyzer"`,
+			shouldMatch: []*Allocation{
+				newAlloc(&AllocationProperties{
+					Namespace: "kubecost",
+					Labels: map[string]string{
+						"test":    "test123",
+						"app_abc": "cost_analyzer",
+					},
+				}),
+			},
+			shouldNotMatch: []*Allocation{
+				newAlloc(&AllocationProperties{
+					Namespace: "kubecost",
+					Labels: map[string]string{
+						"foo": "bar",
+					},
+				}),
+			},
+		},
+		{
+			input: `services~:"123","abc"`,
+			shouldMatch: []*Allocation{
+				newAlloc(&AllocationProperties{
+					Namespace: "kubecost",
+					Services: []string{
+						"foo",
+						"bar",
+						"123",
+					},
+				}),
+				newAlloc(&AllocationProperties{
+					Namespace: "kubecost",
+					Services: []string{
+						"foo",
+						"abc",
+						"test",
+					},
+				}),
+				newAlloc(&AllocationProperties{
+					Namespace: "kubecost",
+					Services: []string{
+						"123",
+						"abc",
+						"test",
+					},
+				}),
+			},
+			shouldNotMatch: []*Allocation{
+				newAlloc(&AllocationProperties{
+					Namespace: "kubecost",
+					Services: []string{
+						"foo",
+						"bar",
+					},
+				}),
+			},
+		},
+		{
+			input: `services!:"123","abc"`,
+		},
+		{
+			input: `label[app-abc]:"cost_analyzer"`,
+			shouldMatch: []*Allocation{
+				newAlloc(&AllocationProperties{
+					Labels: map[string]string{
+						"app_abc": "cost_analyzer",
+					},
+				}),
+			},
+			shouldNotMatch: []*Allocation{
+				newAlloc(&AllocationProperties{
+					Labels: map[string]string{
+						"app-abc": "cost_analyzer",
+					},
+				}),
+			},
+		},
+		{
+			input: `label[app_abc]:"cost_analyzer"+label[foo]:"bar"`,
+		},
+		{
+			input: `
+namespace:"kubecost" +
+label[app]:"cost_analyzer" +
+annotation[a1]:"b2" +
+cluster:"cluster-one" +
+node!:
+  "node-123",
+  "node-456" +
+controllerName:
+  "kubecost-cost-analyzer",
+  "kubecost-prometheus-server" +
+controllerKind!:
+  "daemonset",
+  "statefulset",
+  "job" +
+container!:"123-abc_foo" +
+pod!:"aaaaaaaaaaaaaaaaaaaaaaaaa" +
+services!:"abc123"
+`,
+		},
+		{
+			input: `namespace:"__unallocated__"`,
+			shouldMatch: []*Allocation{
+				newAlloc(&AllocationProperties{Namespace: ""}),
+			},
+			shouldNotMatch: []*Allocation{
+				newAlloc(&AllocationProperties{Namespace: "kube-system"}),
+			},
+		},
+		{
+			input: `namespace!:"__unallocated__"`,
+			shouldMatch: []*Allocation{
+				newAlloc(&AllocationProperties{Namespace: "kubecost"}),
+			},
+			shouldNotMatch: []*Allocation{
+				newAlloc(&AllocationProperties{Namespace: ""}),
+			},
+		},
+		{
+			input: `controllerKind:"__unallocated__"`,
+			shouldMatch: []*Allocation{
+				newAlloc(&AllocationProperties{ControllerKind: ""}),
+			},
+			shouldNotMatch: []*Allocation{
+				newAlloc(&AllocationProperties{ControllerKind: "deployment"}),
+			},
+		},
+		{
+			input: `controllerKind!:"__unallocated__"`,
+			shouldMatch: []*Allocation{
+				newAlloc(&AllocationProperties{ControllerKind: "deployment"}),
+			},
+			shouldNotMatch: []*Allocation{
+				newAlloc(&AllocationProperties{ControllerKind: ""}),
+			},
+		},
+		{
+			input: `label[app]:"__unallocated__"`,
+			shouldMatch: []*Allocation{
+				newAlloc(&AllocationProperties{Labels: map[string]string{"foo": "bar"}}),
+			},
+			shouldNotMatch: []*Allocation{
+				newAlloc(&AllocationProperties{Labels: map[string]string{"app": "test"}}),
+			},
+		},
+		{
+			input: `label[app]!:"__unallocated__"`,
+			shouldMatch: []*Allocation{
+				newAlloc(&AllocationProperties{Labels: map[string]string{"app": "test"}}),
+			},
+			shouldNotMatch: []*Allocation{
+				newAlloc(&AllocationProperties{Labels: map[string]string{"foo": "bar"}}),
+			},
+		},
+		{
+			input: `services:"__unallocated__"`,
+			shouldMatch: []*Allocation{
+				newAlloc(&AllocationProperties{Services: []string{}}),
+			},
+			shouldNotMatch: []*Allocation{
+				newAlloc(&AllocationProperties{Services: []string{"svc1", "svc2"}}),
+			},
+		},
+		{
+			input: `services!:"__unallocated__"`,
+			shouldMatch: []*Allocation{
+				newAlloc(&AllocationProperties{Services: []string{"svc1", "svc2"}}),
+			},
+			shouldNotMatch: []*Allocation{
+				newAlloc(&AllocationProperties{Services: []string{}}),
+			},
+		},
+		{
+			input: `label[cloud.google.com/gke-nodepool]:"gke-nodepool-1"`,
+			shouldMatch: []*Allocation{
+				newAlloc(&AllocationProperties{
+					Labels: map[string]string{
+						"cloud_google_com_gke_nodepool": "gke-nodepool-1",
+					},
+				}),
+			},
+			shouldNotMatch: []*Allocation{
+				newAlloc(&AllocationProperties{
+					Labels: map[string]string{
+						"cloud.google.com/gke-nodepool": "gke-nodepool-1",
+					},
+				}),
+			},
+		},
+		{
+			input: `label:"cloud.google.com/gke-nodepool"`,
+			shouldMatch: []*Allocation{
+				newAlloc(&AllocationProperties{
+					Labels: map[string]string{
+						"cloud_google_com_gke_nodepool": "gke-nodepool-1",
+					},
+				}),
+			},
+			shouldNotMatch: []*Allocation{
+				newAlloc(&AllocationProperties{
+					Labels: map[string]string{
+						"cloud.google.com/gke-nodepool": "gke-nodepool-1",
+					},
+				}),
+			},
+		},
+	}
+
+	for i, c := range cases {
+		t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
+			t.Logf("Query: %s", c.input)
+			tree, err := allocParser.Parse(c.input)
+			if err != nil {
+				t.Fatalf("Unexpected parse error: %s", err)
+			}
+			t.Logf("%s", ast.ToPreOrderString(tree))
+
+			matcher, err := allocCompiler.Compile(tree)
+			t.Logf("Result: %s", matcher)
+			if err != nil {
+				t.Fatalf("Unexpected parse error: %s", err)
+			}
+			for _, shouldMatch := range c.shouldMatch {
+				if !matcher.Matches(shouldMatch) {
+					t.Errorf("Failed to match %s", shouldMatch.Name)
+				}
+			}
+			for _, shouldNotMatch := range c.shouldNotMatch {
+				if matcher.Matches(shouldNotMatch) {
+					t.Errorf("Incorrectly matched %s", shouldNotMatch.Name)
+				}
+			}
+		})
+	}
+}
+
+// Allocation Mock
+
+// Maps fields from an allocation to a string value based on an identifier
+func AllocFieldMap(a *Allocation, identifier ast.Identifier) (string, error) {
+	switch identifier.Field.Name {
+	case "namespace":
+		return a.Properties.Namespace, nil
+	case "node":
+		return a.Properties.Node, nil
+	case "cluster":
+		return a.Properties.Cluster, nil
+	case "controllerName":
+		return a.Properties.Controller, nil
+	case "controllerKind":
+		return a.Properties.ControllerKind, nil
+	case "pod":
+		return a.Properties.Pod, nil
+	case "container":
+		return a.Properties.Container, nil
+	case "label":
+		return a.Properties.Labels[identifier.Key], nil
+	case "annotation":
+		return a.Properties.Annotations[identifier.Key], nil
+	}
+
+	return "", fmt.Errorf("Failed to find string identifier on Allocation: %s", identifier.Field.Name)
+}
+
+// Maps slice fields from an allocation to a []string value based on an identifier
+func AllocSliceFieldMap(a *Allocation, identifier ast.Identifier) ([]string, error) {
+	switch identifier.Field.Name {
+	case "services":
+		return a.Properties.Services, nil
+	}
+
+	return nil, fmt.Errorf("Failed to find []string identifier on Allocation: %s", identifier.Field.Name)
+}
+
+// Maps map fields from an allocation to a map[string]string value based on an identifier
+func AllocMapFieldMap(a *Allocation, identifier ast.Identifier) (map[string]string, error) {
+	switch identifier.Field.Name {
+	case "label":
+		return a.Properties.Labels, nil
+	case "annotation":
+		return a.Properties.Annotations, nil
+	}
+	return nil, fmt.Errorf("Failed to find map[string]string identifier on Allocation: %s", identifier.Field.Name)
+}
+
+type AllocationProperties struct {
+	Cluster        string            `json:"cluster,omitempty"`
+	Node           string            `json:"node,omitempty"`
+	Container      string            `json:"container,omitempty"`
+	Controller     string            `json:"controller,omitempty"`
+	ControllerKind string            `json:"controllerKind,omitempty"`
+	Namespace      string            `json:"namespace,omitempty"`
+	Pod            string            `json:"pod,omitempty"`
+	Services       []string          `json:"services,omitempty"`
+	ProviderID     string            `json:"providerID,omitempty"`
+	Labels         map[string]string `json:"labels,omitempty"`
+	Annotations    map[string]string `json:"annotations,omitempty"`
+}
+
+func (p *AllocationProperties) String() string {
+	if p == nil {
+		return "<nil>"
+	}
+
+	strs := []string{}
+
+	if p.Cluster != "" {
+		strs = append(strs, "Cluster:"+p.Cluster)
+	}
+
+	if p.Node != "" {
+		strs = append(strs, "Node:"+p.Node)
+	}
+
+	if p.Container != "" {
+		strs = append(strs, "Container:"+p.Container)
+	}
+
+	if p.Controller != "" {
+		strs = append(strs, "Controller:"+p.Controller)
+	}
+
+	if p.ControllerKind != "" {
+		strs = append(strs, "ControllerKind:"+p.ControllerKind)
+	}
+
+	if p.Namespace != "" {
+		strs = append(strs, "Namespace:"+p.Namespace)
+	}
+
+	if p.Pod != "" {
+		strs = append(strs, "Pod:"+p.Pod)
+	}
+
+	if p.ProviderID != "" {
+		strs = append(strs, "ProviderID:"+p.ProviderID)
+	}
+
+	if len(p.Services) > 0 {
+		strs = append(strs, "Services:"+strings.Join(p.Services, ";"))
+	}
+
+	var labelStrs []string
+	for k, prop := range p.Labels {
+		labelStrs = append(labelStrs, fmt.Sprintf("%s:%s", k, prop))
+	}
+	strs = append(strs, fmt.Sprintf("Labels:{%s}", strings.Join(labelStrs, ",")))
+
+	var annotationStrs []string
+	for k, prop := range p.Annotations {
+		annotationStrs = append(annotationStrs, fmt.Sprintf("%s:%s", k, prop))
+	}
+	strs = append(strs, fmt.Sprintf("Annotations:{%s}", strings.Join(annotationStrs, ",")))
+
+	return fmt.Sprintf("{%s}", strings.Join(strs, "; "))
+}
+
+type Allocation struct {
+	Name       string
+	Properties *AllocationProperties
+}

+ 21 - 0
pkg/filter21/matcher/not.go

@@ -0,0 +1,21 @@
+package matcher
+
+import "fmt"
+
+// Not negates any filter contained within it
+type Not[T any] struct {
+	Matcher Matcher[T]
+}
+
+func (n *Not[T]) Add(m Matcher[T]) {
+	n.Matcher = m
+}
+
+func (n *Not[T]) String() string {
+	return fmt.Sprintf("(not %s)", n.Matcher.String())
+}
+
+// Matches inverts the result of the child matcher
+func (n *Not[T]) Matches(that T) bool {
+	return !n.Matcher.Matches(that)
+}

+ 42 - 0
pkg/filter21/matcher/or.go

@@ -0,0 +1,42 @@
+package matcher
+
+import (
+	"fmt"
+)
+
+// Or is a set of filters that should be evaluated as a logical
+// OR.
+type Or[T any] struct {
+	Matchers []Matcher[T]
+}
+
+func (o *Or[T]) Add(m Matcher[T]) {
+	o.Matchers = append(o.Matchers, m)
+}
+
+func (o *Or[T]) String() string {
+	s := "(or"
+	for _, f := range o.Matchers {
+		s += fmt.Sprintf(" %s", f)
+	}
+
+	s += ")"
+	return s
+}
+
+// Matches is the canonical in-Go function for determining if T
+// matches OR match rules.
+func (o *Or[T]) Matches(that T) bool {
+	filters := o.Matchers
+	if len(filters) == 0 {
+		return true
+	}
+
+	for _, filter := range filters {
+		if filter.Matches(that) {
+			return true
+		}
+	}
+
+	return false
+}

+ 79 - 0
pkg/filter21/matcher/stringmapmatcher.go

@@ -0,0 +1,79 @@
+package matcher
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/opencost/opencost/pkg/filter21/ast"
+	"github.com/opencost/opencost/pkg/log"
+)
+
+// StringMapMatcherFactory leverages a single MapFieldMapper[T] to generate instances of
+// StringMapMatcher[T].
+type StringMapMatcherFactory[T any] struct {
+	fieldMapper MapFieldMapper[T]
+}
+
+// NewStringMapMatcherFactory creates a new StringMapMatcher factory for a given T type.
+func NewStringMapMatcherFactory[T any](fieldMapper MapFieldMapper[T]) *StringMapMatcherFactory[T] {
+	return &StringMapMatcherFactory[T]{
+		fieldMapper: fieldMapper,
+	}
+}
+
+// NewStringMapMatcher creates a new StringMapMatcher using the provided op, field ident and key for comparison
+func (smmf *StringMapMatcherFactory[T]) NewStringMapMatcher(op ast.FilterOp, ident ast.Identifier, key string) *StringMapMatcher[T] {
+	return &StringMapMatcher[T]{
+		Op:          op,
+		Identifier:  ident,
+		Key:         key,
+		fieldMapper: smmf.fieldMapper,
+	}
+}
+
+// // StringMapMatcher matches properties of a T instance which are map[string]string
+type StringMapMatcher[T any] struct {
+	Op         ast.FilterOp
+	Identifier ast.Identifier
+	Key        string
+
+	fieldMapper MapFieldMapper[T]
+}
+
+func (smm *StringMapMatcher[T]) String() string {
+	return fmt.Sprintf(`(%s %s "%s")`, smm.Op, smm.Identifier.String(), smm.Key)
+}
+
+func (smm *StringMapMatcher[T]) Matches(that T) bool {
+	thatMap, err := smm.fieldMapper(that, smm.Identifier)
+	if err != nil {
+		log.Errorf("Filter: StringMapMatcher: could not retrieve field %s: %s", smm.Identifier.String(), err.Error())
+		return false
+	}
+
+	switch smm.Op {
+	case ast.FilterOpContains:
+		_, exists := thatMap[smm.Key]
+		return exists
+
+	case ast.FilterOpContainsPrefix:
+		for k := range thatMap {
+			if strings.HasPrefix(k, smm.Key) {
+				return true
+			}
+		}
+		return false
+
+	case ast.FilterOpContainsSuffix:
+		for k := range thatMap {
+			if strings.HasSuffix(k, smm.Key) {
+				return true
+			}
+		}
+		return false
+
+	default:
+		log.Errorf("Filter: StringMapMatcher: Unhandled matcher op. This is a filter implementation error and requires immediate patching. Op: %s", smm.Op)
+		return false
+	}
+}

+ 73 - 0
pkg/filter21/matcher/stringmatcher.go

@@ -0,0 +1,73 @@
+package matcher
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/opencost/opencost/pkg/filter21/ast"
+	"github.com/opencost/opencost/pkg/log"
+)
+
+// StringMatcherFactory leverages a single StringFieldMapper[T] to generate instances of
+// StringMatcher[T].
+type StringMatcherFactory[T any] struct {
+	fieldMapper StringFieldMapper[T]
+}
+
+// NewStringMatcherFactory creates a new StringMatcher factory for a given T type.
+func NewStringMatcherFactory[T any](fieldMapper StringFieldMapper[T]) *StringMatcherFactory[T] {
+	return &StringMatcherFactory[T]{
+		fieldMapper: fieldMapper,
+	}
+}
+
+// NewStringMatcher creates a new StringMatcher using the provided op, field ident, and value comparison.
+func (smf *StringMatcherFactory[T]) NewStringMatcher(op ast.FilterOp, ident ast.Identifier, value string) *StringMatcher[T] {
+	return &StringMatcher[T]{
+		Op:          op,
+		Identifier:  ident,
+		Value:       value,
+		fieldMapper: smf.fieldMapper,
+	}
+}
+
+// StringMatcher matches properties of a T instance which are string.
+type StringMatcher[T any] struct {
+	Op         ast.FilterOp
+	Identifier ast.Identifier
+	Value      string
+
+	fieldMapper StringFieldMapper[T]
+}
+
+func (sm *StringMatcher[T]) String() string {
+	return fmt.Sprintf(`(%s %s "%s")`, sm.Op, sm.Identifier.String(), sm.Value)
+}
+
+// Matches is the canonical in-Go function for determining if T
+// matches string property comparison rules.
+func (sm *StringMatcher[T]) Matches(that T) bool {
+	thatString, err := sm.fieldMapper(that, sm.Identifier)
+	if err != nil {
+		log.Errorf("Filter: StringMatcher: could not retrieve field %s: %s", sm.Identifier.String(), err.Error())
+		return false
+	}
+
+	switch sm.Op {
+	case ast.FilterOpEquals:
+		return thatString == sm.Value
+
+	case ast.FilterOpContains:
+		return strings.Contains(thatString, sm.Value)
+
+	case ast.FilterOpContainsPrefix:
+		return strings.HasPrefix(thatString, sm.Value)
+
+	case ast.FilterOpContainsSuffix:
+		return strings.HasSuffix(thatString, sm.Value)
+
+	default:
+		log.Errorf("Filter: StringMatcher: Unhandled filter op. This is a filter implementation error and requires immediate patching. Op: %s", sm.Op)
+		return false
+	}
+}

+ 91 - 0
pkg/filter21/matcher/stringslicematcher.go

@@ -0,0 +1,91 @@
+package matcher
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/opencost/opencost/pkg/filter21/ast"
+	"github.com/opencost/opencost/pkg/log"
+)
+
+// StringMatcherFactory leverages a single StringSliceFieldMapper[T] to generate instances of
+// StringSliceMatcher[T].
+type StringSliceMatcherFactory[T any] struct {
+	fieldMapper SliceFieldMapper[T]
+}
+
+// NewStringSliceMatcherFactory creates a new StringMatcher factory for a given T type.
+func NewStringSliceMatcherFactory[T any](fieldMapper SliceFieldMapper[T]) *StringSliceMatcherFactory[T] {
+	return &StringSliceMatcherFactory[T]{
+		fieldMapper: fieldMapper,
+	}
+}
+
+// NewStringMatcher creates a new StringSliceMatcher using the provided op, field ident, and value comparison.
+func (smf *StringSliceMatcherFactory[T]) NewStringSliceMatcher(op ast.FilterOp, ident ast.Identifier, value string) *StringSliceMatcher[T] {
+	return &StringSliceMatcher[T]{
+		Op:          op,
+		Identifier:  ident,
+		Value:       value,
+		fieldMapper: smf.fieldMapper,
+	}
+}
+
+// StringSliceProperty is the lowest-level type of filter. It represents
+// a filter operation (equality, inequality, etc.) on a property that contains a string slice
+type StringSliceMatcher[T any] struct {
+	Op         ast.FilterOp
+	Identifier ast.Identifier
+	Value      string
+
+	fieldMapper SliceFieldMapper[T]
+}
+
+func (ssp *StringSliceMatcher[T]) String() string {
+	return fmt.Sprintf(`(%s %s "%s")`, ssp.Op, ssp.Identifier.String(), ssp.Value)
+}
+
+func (ssp *StringSliceMatcher[T]) Matches(that T) bool {
+	thatSlice, err := ssp.fieldMapper(that, ssp.Identifier)
+	if err != nil {
+		log.Errorf("Filter: StringSliceMatcher: could not retrieve field %s: %s", ssp.Identifier.String(), err.Error())
+		return false
+	}
+
+	switch ssp.Op {
+
+	case ast.FilterOpContains:
+		if len(thatSlice) == 0 {
+			return ssp.Value == ""
+		}
+
+		for _, s := range thatSlice {
+			if s == ssp.Value {
+				return true
+			}
+		}
+
+	case ast.FilterOpContainsPrefix:
+		for _, s := range thatSlice {
+			if strings.HasPrefix(s, ssp.Value) {
+				return true
+			}
+		}
+
+		return false
+
+	case ast.FilterOpContainsSuffix:
+		for _, s := range thatSlice {
+			if strings.HasSuffix(s, ssp.Value) {
+				return true
+			}
+		}
+		return false
+
+	default:
+		log.Errorf("Filter: StringSliceMatcher: Unhandled filter op. This is a filter implementation error and requires immediate patching. Op: %s", ssp.Op)
+		return false
+	}
+
+	return false
+}

+ 226 - 0
pkg/filter21/ops/ops.go

@@ -0,0 +1,226 @@
+// The ops package provides a set of functions that can be used to
+// build a filter AST programatically using basic functions, versus
+// building a filter AST leveraging all structural components of the
+// tree.
+package ops
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+
+	"github.com/opencost/opencost/pkg/filter21/allocation"
+	"github.com/opencost/opencost/pkg/filter21/asset"
+	"github.com/opencost/opencost/pkg/filter21/ast"
+	"github.com/opencost/opencost/pkg/util/typeutil"
+)
+
+// keyFieldType is used to extract field, key, and field type
+type keyFieldType interface {
+	Field() string
+	Key() string
+	Type() string
+}
+
+// This is somewhat of a fancy solution, but allows us to "register" DefaultFieldByName funcs
+// funcs by Field type.
+var defaultFieldByType = map[string]any{
+	// typeutil.TypeOf[cloud.CloudAggregationField]():        cloud.DefaultFieldByName,
+	typeutil.TypeOf[allocation.AllocationField](): allocation.DefaultFieldByName,
+	typeutil.TypeOf[asset.AssetField]():           asset.DefaultFieldByName,
+	// typeutil.TypeOf[containerstats.ContainerStatsField](): containerstats.DefaultFieldByName,
+}
+
+// asField looks up a specific T field instance by name and returns the default
+// ast.Field value for that type.
+func asField[T ~string](field T) *ast.Field {
+	lookup, ok := defaultFieldByType[typeutil.TypeOf[T]()]
+	if !ok {
+		return nil
+	}
+
+	defaultLookup, ok := lookup.(func(T) *ast.Field)
+	if !ok {
+		return nil
+	}
+
+	return defaultLookup(field)
+}
+
+// asFieldWithType allows for a field to be looked up by name and type.
+func asFieldWithType(field string, typ string) *ast.Field {
+	lookup, ok := defaultFieldByType[typ]
+	if !ok {
+		return nil
+	}
+
+	// This is the sacrifice being made to allow a simple filter
+	// builder style API. In the cases where we have keys, the typical
+	// field type gets wrapped in a KeyedFieldType, which is a string
+	// that holds all the parameterized data, but no way to get back from
+	// string to T-instance.
+
+	// Since we have the type name, we can use that to lookup the specific
+	// func(T) *ast.Field function to be used.
+	funcType := reflect.TypeOf(lookup)
+
+	// Assert that the function has a single parameter (type T)
+	if funcType.NumIn() != 1 {
+		return nil
+	}
+
+	// Get a reference to the first parameter's type (T)
+	inType := funcType.In(0)
+
+	// Create a reflect.Value for the string field, then convert it to
+	// the T type from the function's parameter list. (This has to be
+	// done to ensure we're executing the call with the correct types)
+	fieldParam := reflect.ValueOf(field).Convert(inType)
+
+	// Create a reflect.Value for the lookup function
+	callable := reflect.ValueOf(lookup)
+
+	// Call the function with the fieldParam value, and get the result
+	result := callable.Call([]reflect.Value{fieldParam})
+	if len(result) == 0 {
+		return nil
+	}
+
+	// Lastly, extract the value from the reflect.Value and ensure we can
+	// cast it to *ast.Field
+	resultValue := result[0].Interface()
+	if f, ok := resultValue.(*ast.Field); ok {
+		return f
+	}
+
+	return nil
+}
+
+// KeyedFieldType is a type alias for field is a special field type that can
+// be deconstructed into multiple components.
+type KeyedFieldType string
+
+func (k KeyedFieldType) Field() string {
+	str := string(k)
+	idx := strings.Index(str, "$")
+	if idx == -1 {
+		return ""
+	}
+
+	return str[0:idx]
+}
+
+func (k KeyedFieldType) Key() string {
+	str := string(k)
+	idx := strings.Index(str, "$")
+	if idx == -1 {
+		return ""
+	}
+
+	lastIndex := strings.LastIndex(str, "$")
+	if lastIndex == -1 {
+		return ""
+	}
+
+	return str[idx+1 : lastIndex]
+}
+
+func (k KeyedFieldType) Type() string {
+	str := string(k)
+	lastIndex := strings.LastIndex(str, "$")
+	if lastIndex == -1 {
+		return ""
+	}
+
+	return str[lastIndex+1:]
+}
+
+func WithKey[T ~string](field T, key string) KeyedFieldType {
+	k := fmt.Sprintf("%s$%s$%s", field, key, typeutil.TypeOf[T]())
+
+	return KeyedFieldType(k)
+}
+
+func toFieldAndKey[T ~string](field T) (*ast.Field, string) {
+	var inner any = field
+	if kft, ok := inner.(keyFieldType); ok {
+		return asFieldWithType(kft.Field(), kft.Type()), kft.Key()
+	}
+
+	return asField(field), ""
+}
+
+func identifier[T ~string](field T) ast.Identifier {
+	f, key := toFieldAndKey(field)
+
+	return ast.Identifier{
+		Field: f,
+		Key:   key,
+	}
+}
+
+func And(node, next ast.FilterNode, others ...ast.FilterNode) ast.FilterNode {
+	operands := append([]ast.FilterNode{node, next}, others...)
+
+	return &ast.AndOp{
+		Operands: operands,
+	}
+}
+
+func Or(node, next ast.FilterNode, others ...ast.FilterNode) ast.FilterNode {
+	operands := append([]ast.FilterNode{node, next}, others...)
+
+	return &ast.OrOp{
+		Operands: operands,
+	}
+}
+
+func Not(node ast.FilterNode) ast.FilterNode {
+	return &ast.NotOp{
+		Operand: node,
+	}
+}
+
+func Eq[T ~string](field T, value string) ast.FilterNode {
+	return &ast.EqualOp{
+		Left:  identifier(field),
+		Right: value,
+	}
+}
+
+func NotEq[T ~string](field T, value string) ast.FilterNode {
+	return Not(Eq(field, value))
+}
+
+func Contains[T ~string](field T, value string) ast.FilterNode {
+	return &ast.ContainsOp{
+		Left:  identifier(field),
+		Right: value,
+	}
+}
+
+func NotContains[T ~string](field T, value string) ast.FilterNode {
+	return Not(Contains(field, value))
+}
+
+func ContainsPrefix[T ~string](field T, value string) ast.FilterNode {
+	return &ast.ContainsPrefixOp{
+		Left:  identifier(field),
+		Right: value,
+	}
+}
+
+func NotContainsPrefix[T ~string](field T, value string) ast.FilterNode {
+	return Not(ContainsPrefix(field, value))
+}
+
+func ContainsSuffix[T ~string](field T, value string) ast.FilterNode {
+	return &ast.ContainsSuffixOp{
+		Left:  identifier(field),
+		Right: value,
+	}
+}
+
+func NotContainsSuffix[T ~string](field T, value string) ast.FilterNode {
+	return Not(ContainsSuffix(field, value))
+}

+ 103 - 0
pkg/filter21/ops/ops_test.go

@@ -0,0 +1,103 @@
+package ops_test
+
+import (
+	"testing"
+
+	"github.com/google/go-cmp/cmp"
+	"github.com/opencost/opencost/pkg/filter21/allocation"
+	"github.com/opencost/opencost/pkg/filter21/ast"
+	"github.com/opencost/opencost/pkg/filter21/ops"
+)
+
+func TestBasicOpsBuilder(t *testing.T) {
+	parser := allocation.NewAllocationFilterParser()
+
+	filterTree := ops.And(
+		ops.Or(
+			ops.Eq(allocation.FieldNamespace, "kubecost"),
+			ops.Eq(allocation.FieldClusterID, "cluster-one"),
+		),
+		ops.NotContains(allocation.FieldServices, "service-a"),
+		ops.NotEq(ops.WithKey(allocation.FieldLabel, "app"), "cost-analyzer"),
+		ops.Contains(allocation.FieldLabel, "foo"),
+	)
+
+	otherTree, err := parser.Parse(`
+		(namespace: "kubecost" | cluster: "cluster-one") +
+		services!~:"service-a" +
+		label[app]!: "cost-analyzer" +
+		label~:"foo"
+	`)
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if !cmp.Equal(filterTree, otherTree) {
+		t.Fatalf("Filter Trees are not equal: %s", cmp.Diff(filterTree, otherTree))
+	}
+}
+
+func TestLongFormComparison(t *testing.T) {
+	filterTree := ops.And(
+		ops.Or(
+			ops.Eq(allocation.FieldNamespace, "kubecost"),
+			ops.Eq(allocation.FieldClusterID, "cluster-one"),
+		),
+		ops.NotContains(allocation.FieldServices, "service-a"),
+		ops.NotEq(ops.WithKey(allocation.FieldLabel, "app"), "cost-analyzer"),
+		ops.Contains(allocation.FieldLabel, "foo"),
+	)
+
+	comparisonTree := &ast.AndOp{
+		Operands: []ast.FilterNode{
+			&ast.OrOp{
+				Operands: []ast.FilterNode{
+					&ast.EqualOp{
+						Left: ast.Identifier{
+							Field: ast.NewField(allocation.FieldNamespace),
+							Key:   "",
+						},
+						Right: "kubecost",
+					},
+					&ast.EqualOp{
+						Left: ast.Identifier{
+							Field: ast.NewField(allocation.FieldClusterID),
+							Key:   "",
+						},
+						Right: "cluster-one",
+					},
+				},
+			},
+			&ast.NotOp{
+				Operand: &ast.ContainsOp{
+					Left: ast.Identifier{
+						Field: ast.NewSliceField(allocation.FieldServices),
+						Key:   "",
+					},
+					Right: "service-a",
+				},
+			},
+			&ast.NotOp{
+				Operand: &ast.EqualOp{
+					Left: ast.Identifier{
+						Field: ast.NewMapField(allocation.FieldLabel),
+						Key:   "app",
+					},
+					Right: "cost-analyzer",
+				},
+			},
+			&ast.ContainsOp{
+				Left: ast.Identifier{
+					Field: ast.NewMapField(allocation.FieldLabel),
+					Key:   "",
+				},
+				Right: "foo",
+			},
+		},
+	}
+
+	if !cmp.Equal(filterTree, comparisonTree) {
+		t.Fatalf("Filter Trees are not equal: %s", cmp.Diff(filterTree, comparisonTree))
+	}
+}

+ 40 - 0
pkg/filter21/transform/pass.go

@@ -0,0 +1,40 @@
+package transform
+
+import (
+	"fmt"
+
+	"github.com/opencost/opencost/pkg/filter21/ast"
+)
+
+// CompilerPass is an interface which defines an implementation capable of
+// accepting an input AST and making optimizations or changes, and returning
+// a new (or the existing) AST.
+type CompilerPass interface {
+	// Exec executes the pass on the provided AST. This method may either return
+	// a new AST or the existing modified AST. Note that the parameter to this
+	// method may be changed directly.
+	Exec(filter ast.FilterNode) (ast.FilterNode, error)
+}
+
+// func CompilerPass(transformFunc func(ast.FilterNode) (ast.FilterNode, error)) (ast.FilterNode, error) {
+// }
+
+// ApplyAll applies all the compiler passes serially and returns the resulting
+// tree. This method copies the passes AST before executing the compiler passes.
+func ApplyAll(filter ast.FilterNode, passes []CompilerPass) (ast.FilterNode, error) {
+	// return the input filter if there are no passes to run
+	if len(passes) == 0 {
+		return filter, nil
+	}
+
+	// Clone the filter first, then apply the passes
+	var f ast.FilterNode = ast.Clone(filter)
+	for i, pass := range passes {
+		var err error
+		f, err = pass.Exec(f)
+		if err != nil {
+			return nil, fmt.Errorf("compiler pass %d (%+v) failed: %w", i, pass, err)
+		}
+	}
+	return f, nil
+}

+ 67 - 0
pkg/filter21/transform/promlabels.go

@@ -0,0 +1,67 @@
+package transform
+
+import (
+	"regexp"
+
+	"github.com/opencost/opencost/pkg/filter21/ast"
+)
+
+// regex for invalid prometheus label characters
+var invalidKey = regexp.MustCompile(`[^a-zA-Z0-9_]`)
+var promKeyPass CompilerPass = new(promKeySanitizePass)
+
+// PrometheusKeySanitizePass returns a
+func PrometheusKeySanitizePass() CompilerPass {
+	return promKeyPass
+}
+
+type promKeySanitizePass struct{}
+
+// Exec executes the pass on the provided AST. This method may either return
+// a new AST or modify and return the AST parameter. The parameter into this
+// method may be changed directly.
+func (pks *promKeySanitizePass) Exec(filter ast.FilterNode) (ast.FilterNode, error) {
+	ast.PreOrderTraversal(filter, func(fn ast.FilterNode, ts ast.TraversalState) {
+		switch n := fn.(type) {
+		case *ast.EqualOp:
+			sanitize(&n.Left)
+		case *ast.ContainsOp:
+			left := &n.Left
+			// if we use a contains operator on a map, we sanitize the value
+			if left.Field.IsMap() && left.Key == "" {
+				n.Right = sanitizeKey(n.Right)
+			} else {
+				sanitize(left)
+			}
+		case *ast.ContainsPrefixOp:
+			left := &n.Left
+			// if we use a contains operator on a map, we sanitize the value
+			if left.Field.IsMap() && left.Key == "" {
+				n.Right = sanitizeKey(n.Right)
+			} else {
+				sanitize(left)
+			}
+		case *ast.ContainsSuffixOp:
+			left := &n.Left
+			// if we use a contains operator on a map, we sanitize the value
+			if left.Field.IsMap() && left.Key == "" {
+				n.Right = sanitizeKey(n.Right)
+			} else {
+				sanitize(left)
+			}
+		}
+	})
+	return filter, nil
+}
+
+// sanitizes the identifier
+func sanitize(left *ast.Identifier) {
+	if left.Key != "" {
+		left.Key = sanitizeKey(left.Key)
+	}
+}
+
+// replaces all invalid characters with underscore
+func sanitizeKey(s string) string {
+	return invalidKey.ReplaceAllString(s, "_")
+}

+ 42 - 0
pkg/filter21/transform/unallocated.go

@@ -0,0 +1,42 @@
+package transform
+
+import "github.com/opencost/opencost/pkg/filter21/ast"
+
+const unallocatedSuffix = "__unallocated__"
+
+var unallocPass CompilerPass = new(unallocReplacePass)
+
+// UnallocatedReplacementPass returns a CompilerPass implementation which replaces all
+// __unallocated__ with empty string
+func UnallocatedReplacementPass() CompilerPass {
+	return unallocPass
+}
+
+type unallocReplacePass struct{}
+
+// Exec executes the pass on the provided AST. This method may either return
+// a new AST or modify and return the AST parameter. The parameter into this
+// method may be changed directly.
+func (pks *unallocReplacePass) Exec(filter ast.FilterNode) (ast.FilterNode, error) {
+	ast.PreOrderTraversal(filter, func(fn ast.FilterNode, ts ast.TraversalState) {
+		switch n := fn.(type) {
+		case *ast.EqualOp:
+			n.Right = replaceUnallocated(n.Right)
+		case *ast.ContainsOp:
+			n.Right = replaceUnallocated(n.Right)
+		case *ast.ContainsPrefixOp:
+			n.Right = replaceUnallocated(n.Right)
+		case *ast.ContainsSuffixOp:
+			n.Right = replaceUnallocated(n.Right)
+		}
+	})
+	return filter, nil
+}
+
+// replaces unallocated with empty string if valid
+func replaceUnallocated(s string) string {
+	if s == unallocatedSuffix {
+		return ""
+	}
+	return s
+}

+ 63 - 0
pkg/filter21/util/stack.go

@@ -0,0 +1,63 @@
+package util
+
+type stackNode[T any] struct {
+	value    T
+	previous *stackNode[T]
+}
+
+type Stack[T any] struct {
+	top *stackNode[T]
+
+	length int
+}
+
+// NewStack creates a new Stack[T]
+func NewStack[T any]() *Stack[T] {
+	return &Stack[T]{
+		top:    nil,
+		length: 0,
+	}
+}
+
+// Push adds a value to the top of the stack.
+func (s *Stack[T]) Push(value T) {
+	n := &stackNode[T]{
+		value:    value,
+		previous: s.top,
+	}
+
+	s.top = n
+	s.length++
+}
+
+// Pop the top item of the stack and return it
+func (s *Stack[T]) Pop() T {
+	if s.length == 0 {
+		return defaultFor[T]()
+	}
+
+	n := s.top
+	s.top = n.previous
+	s.length--
+
+	return n.value
+}
+
+// Top returns the item on the top of the stack
+func (s *Stack[T]) Top() T {
+	if s.length == 0 {
+		return defaultFor[T]()
+	}
+
+	return s.top.value
+}
+
+// Length returns the total number of elements on the stack.
+func (s *Stack[T]) Length() int {
+	return s.length
+}
+
+func defaultFor[T any]() T {
+	var t T
+	return t
+}

+ 299 - 95
pkg/kubecost/allocation.go

@@ -6,9 +6,13 @@ import (
 	"strings"
 	"time"
 
+	filter21 "github.com/opencost/opencost/pkg/filter21"
+	"github.com/opencost/opencost/pkg/filter21/ast"
+	"github.com/opencost/opencost/pkg/filter21/matcher"
 	"github.com/opencost/opencost/pkg/log"
 	"github.com/opencost/opencost/pkg/util"
 	"github.com/opencost/opencost/pkg/util/timeutil"
+	"golang.org/x/exp/slices"
 )
 
 // TODO Clean-up use of IsEmpty; nil checks should be separated for safety.
@@ -88,6 +92,32 @@ type Allocation struct {
 	// and appended to an Allocation, and so by default is is nil.
 	ProportionalAssetResourceCosts ProportionalAssetResourceCosts `json:"proportionalAssetResourceCosts"` //@bingen:field[ignore]
 	SharedCostBreakdown            SharedCostBreakdowns           `json:"sharedCostBreakdown"`            //@bingen:field[ignore]
+	LoadBalancers                  LbAllocations                  `json:"LoadBalancers"`                  // @bingen:field[version=18]
+
+}
+
+type LbAllocations map[string]*LbAllocation
+
+func (orig LbAllocations) Clone() LbAllocations {
+	if orig == nil {
+		return nil
+	}
+
+	newAllocs := LbAllocations{}
+
+	for key, lbAlloc := range orig {
+		newAllocs[key] = &LbAllocation{
+			Service: lbAlloc.Service,
+			Cost:    lbAlloc.Cost,
+		}
+	}
+	return newAllocs
+}
+
+type LbAllocation struct {
+	Service string  `json:"service"`
+	Cost    float64 `json:"cost"`
+	Private bool    `json:"private"`
 }
 
 // RawAllocationOnlyData is information that only belong in "raw" Allocations,
@@ -248,24 +278,29 @@ func (pva *PVAllocation) Equal(that *PVAllocation) bool {
 }
 
 type ProportionalAssetResourceCost struct {
-	Cluster                    string  `json:"cluster"`
-	Node                       string  `json:"node,omitempty"`
-	ProviderID                 string  `json:"providerID,omitempty"`
-	CPUPercentage              float64 `json:"cpuPercentage"`
-	GPUPercentage              float64 `json:"gpuPercentage"`
-	RAMPercentage              float64 `json:"ramPercentage"`
-	NodeResourceCostPercentage float64 `json:"nodeResourceCostPercentage"`
-	GPUTotalCost               float64 `json:"-"`
-	GPUProportionalCost        float64 `json:"-"`
-	CPUTotalCost               float64 `json:"-"`
-	CPUProportionalCost        float64 `json:"-"`
-	RAMTotalCost               float64 `json:"-"`
-	RAMProportionalCost        float64 `json:"-"`
-}
-
-func (parc ProportionalAssetResourceCost) Key(insertByNode bool) string {
-	if insertByNode {
-		return parc.Cluster + "," + parc.Node
+	Cluster                string  `json:"cluster"`
+	Name                   string  `json:"name,omitempty"`
+	Type                   string  `json:"type,omitempty"`
+	ProviderID             string  `json:"providerID,omitempty"`
+	CPUPercentage          float64 `json:"cpuPercentage"`
+	GPUPercentage          float64 `json:"gpuPercentage"`
+	RAMPercentage          float64 `json:"ramPercentage"`
+	LoadBalancerPercentage float64 `json:"loadBalancerPercentage"`
+
+	NodeResourceCostPercentage   float64 `json:"nodeResourceCostPercentage"`
+	GPUTotalCost                 float64 `json:"-"`
+	GPUProportionalCost          float64 `json:"-"`
+	CPUTotalCost                 float64 `json:"-"`
+	CPUProportionalCost          float64 `json:"-"`
+	RAMTotalCost                 float64 `json:"-"`
+	RAMProportionalCost          float64 `json:"-"`
+	LoadBalancerProportionalCost float64 `json:"-"`
+	LoadBalancerTotalCost        float64 `json:"-"`
+}
+
+func (parc ProportionalAssetResourceCost) Key(insertByName bool) string {
+	if insertByName {
+		return parc.Cluster + "," + parc.Name
 	} else {
 		return parc.Cluster
 	}
@@ -283,36 +318,36 @@ func (parcs ProportionalAssetResourceCosts) Clone() ProportionalAssetResourceCos
 	return cloned
 }
 
-func (parcs ProportionalAssetResourceCosts) Insert(parc ProportionalAssetResourceCost, insertByNode bool) {
-	if !insertByNode {
-		parc.Node = ""
+func (parcs ProportionalAssetResourceCosts) Insert(parc ProportionalAssetResourceCost, insertByName bool) {
+	if !insertByName {
+		parc.Name = ""
+		parc.Type = ""
 		parc.ProviderID = ""
 	}
-	if curr, ok := parcs[parc.Key(insertByNode)]; ok {
+	if curr, ok := parcs[parc.Key(insertByName)]; ok {
 
 		toInsert := ProportionalAssetResourceCost{
-			Node:                curr.Node,
-			Cluster:             curr.Cluster,
-			ProviderID:          curr.ProviderID,
-			CPUTotalCost:        curr.CPUTotalCost + parc.CPUTotalCost,
-			CPUProportionalCost: curr.CPUProportionalCost + parc.CPUProportionalCost,
-			RAMTotalCost:        curr.RAMTotalCost + parc.RAMTotalCost,
-			RAMProportionalCost: curr.RAMProportionalCost + parc.RAMProportionalCost,
-			GPUProportionalCost: curr.GPUProportionalCost + parc.GPUProportionalCost,
-			GPUTotalCost:        curr.GPUTotalCost + parc.GPUTotalCost,
-		}
-
-		computePercentages(&toInsert)
-		parcs[parc.Key(insertByNode)] = toInsert
+			Name:                         curr.Name,
+			Type:                         curr.Type,
+			Cluster:                      curr.Cluster,
+			ProviderID:                   curr.ProviderID,
+			CPUProportionalCost:          curr.CPUProportionalCost + parc.CPUProportionalCost,
+			RAMProportionalCost:          curr.RAMProportionalCost + parc.RAMProportionalCost,
+			GPUProportionalCost:          curr.GPUProportionalCost + parc.GPUProportionalCost,
+			LoadBalancerProportionalCost: curr.LoadBalancerProportionalCost + parc.LoadBalancerProportionalCost,
+		}
+
+		ComputePercentages(&toInsert)
+		parcs[parc.Key(insertByName)] = toInsert
 	} else {
-		computePercentages(&parc)
-		parcs[parc.Key(insertByNode)] = parc
+		ComputePercentages(&parc)
+		parcs[parc.Key(insertByName)] = parc
 	}
 }
 
-func computePercentages(toInsert *ProportionalAssetResourceCost) {
+func ComputePercentages(toInsert *ProportionalAssetResourceCost) {
 	// compute percentages
-	totalCost := toInsert.RAMTotalCost + toInsert.CPUTotalCost + toInsert.GPUTotalCost
+	totalNodeCost := toInsert.RAMTotalCost + toInsert.CPUTotalCost + toInsert.GPUTotalCost
 
 	if toInsert.CPUTotalCost > 0 {
 		toInsert.CPUPercentage = toInsert.CPUProportionalCost / toInsert.CPUTotalCost
@@ -322,21 +357,25 @@ func computePercentages(toInsert *ProportionalAssetResourceCost) {
 		toInsert.GPUPercentage = toInsert.GPUProportionalCost / toInsert.GPUTotalCost
 	}
 
+	if toInsert.LoadBalancerTotalCost > 0 {
+		toInsert.LoadBalancerPercentage = toInsert.LoadBalancerProportionalCost / toInsert.LoadBalancerTotalCost
+	}
+
 	if toInsert.RAMTotalCost > 0 {
 		toInsert.RAMPercentage = toInsert.RAMProportionalCost / toInsert.RAMTotalCost
 	}
 
-	ramFraction := toInsert.RAMTotalCost / totalCost
+	ramFraction := toInsert.RAMTotalCost / totalNodeCost
 	if ramFraction != ramFraction || ramFraction < 0 {
 		ramFraction = 0
 	}
 
-	cpuFraction := toInsert.CPUTotalCost / totalCost
+	cpuFraction := toInsert.CPUTotalCost / totalNodeCost
 	if cpuFraction != cpuFraction || cpuFraction < 0 {
 		cpuFraction = 0
 	}
 
-	gpuFraction := toInsert.GPUTotalCost / totalCost
+	gpuFraction := toInsert.GPUTotalCost / totalNodeCost
 	if gpuFraction != gpuFraction || gpuFraction < 0 {
 		gpuFraction = 0
 	}
@@ -348,12 +387,12 @@ func computePercentages(toInsert *ProportionalAssetResourceCost) {
 func (parcs ProportionalAssetResourceCosts) Add(that ProportionalAssetResourceCosts) {
 
 	for _, parc := range that {
-		// if node field is empty, we know this is a cluster level PARC aggregation
-		insertByNode := true
-		if parc.Node == "" {
-			insertByNode = false
+		// if name field is empty, we know this is a cluster level PARC aggregation
+		insertByName := true
+		if parc.Name == "" {
+			insertByName = false
 		}
-		parcs.Insert(parc, insertByNode)
+		parcs.Insert(parc, insertByName)
 	}
 }
 
@@ -473,6 +512,7 @@ func (a *Allocation) Clone() *Allocation {
 		RawAllocationOnly:              a.RawAllocationOnly.Clone(),
 		ProportionalAssetResourceCosts: a.ProportionalAssetResourceCosts.Clone(),
 		SharedCostBreakdown:            a.SharedCostBreakdown.Clone(),
+		LoadBalancers:                  a.LoadBalancers.Clone(),
 	}
 }
 
@@ -818,12 +858,21 @@ func (a *Allocation) IsUnallocated() bool {
 }
 
 // IsUnmounted is true if the given Allocation represents unmounted volume costs.
+// Note: Due to change in https://github.com/opencost/opencost/pull/1477 made to include Unmounted
+// PVC cost inside namespace we need to check unmounted suffix across all the three major properties
+// to actually classify it as unmounted.
 func (a *Allocation) IsUnmounted() bool {
 	if a == nil {
 		return false
 	}
 
-	return strings.Contains(a.Name, UnmountedSuffix)
+	props := a.Properties
+	if props != nil {
+		if props.Container == UnmountedSuffix && props.Namespace == UnmountedSuffix && props.Pod == UnmountedSuffix {
+			return true
+		}
+	}
+	return false
 }
 
 // Minutes returns the number of minutes the Allocation represents, as defined
@@ -995,11 +1044,38 @@ func (a *Allocation) add(that *Allocation) {
 	a.NetworkCostAdjustment += that.NetworkCostAdjustment
 	a.LoadBalancerCostAdjustment += that.LoadBalancerCostAdjustment
 
+	// Sum LoadBalancer Allocations
+	a.LoadBalancers = a.LoadBalancers.Add(that.LoadBalancers)
+
 	// Any data that is in a "raw allocation only" is not valid in any
 	// sort of cumulative Allocation (like one that is added).
 	a.RawAllocationOnly = nil
 }
 
+func (thisLbAllocs LbAllocations) Add(thatLbAllocs LbAllocations) LbAllocations {
+	// loop through both sets of LB allocations, building a new LBAllocations that has the summed set
+	mergedLbAllocs := thisLbAllocs.Clone()
+	if thatLbAllocs != nil {
+		if mergedLbAllocs == nil {
+			mergedLbAllocs = LbAllocations{}
+		}
+		for lbKey, thatlbAlloc := range thatLbAllocs {
+			thisLbAlloc, ok := mergedLbAllocs[lbKey]
+			if !ok {
+				thisLbAlloc = &LbAllocation{
+					Service: thatlbAlloc.Service,
+					Cost:    thatlbAlloc.Cost,
+				}
+				mergedLbAllocs[lbKey] = thisLbAlloc
+			} else {
+				thisLbAlloc.Cost += thatlbAlloc.Cost
+			}
+
+		}
+	}
+	return mergedLbAllocs
+}
+
 // AllocationSet stores a set of Allocations, each with a unique name, that share
 // a window. An AllocationSet is mutable, so treat it like a threadsafe map.
 type AllocationSet struct {
@@ -1037,7 +1113,7 @@ func NewAllocationSet(start, end time.Time, allocs ...*Allocation) *AllocationSe
 // simple flag for sharing idle resources.
 type AllocationAggregationOptions struct {
 	AllocationTotalsStore                 AllocationTotalsStore
-	Filter                                AllocationFilter
+	Filter                                filter21.Filter
 	IdleByNode                            bool
 	IncludeProportionalAssetResourceCosts bool
 	LabelConfig                           *LabelConfig
@@ -1045,6 +1121,8 @@ type AllocationAggregationOptions struct {
 	Reconcile                             bool
 	ReconcileNetwork                      bool
 	ShareFuncs                            []AllocationMatchFunc
+	SharedNamespaces                      []string
+	SharedLabels                          map[string][]string
 	ShareIdle                             string
 	ShareSplit                            string
 	SharedHourlyCosts                     map[string]float64
@@ -1053,6 +1131,13 @@ type AllocationAggregationOptions struct {
 	IncludeAggregatedMetadata             bool
 }
 
+func isFilterEmpty(filter AllocationMatcher) bool {
+	if _, isAllPass := filter.(*matcher.AllPass[*Allocation]); isAllPass {
+		return true
+	}
+	return false
+}
+
 // AggregateBy aggregates the Allocations in the given AllocationSet by the given
 // AllocationProperty. This will only be legal if the AllocationSet is divisible by the
 // given AllocationProperty; e.g. Containers can be divided by Namespace, but not vice-a-versa.
@@ -1120,10 +1205,19 @@ func (as *AllocationSet) AggregateBy(aggregateBy []string, options *AllocationAg
 		options.ShareIdle = ShareNone
 	}
 
-	// Pre-flatten the filter so we can just check == nil to see if there are
-	// filters.
-	if options.Filter != nil {
-		options.Filter = options.Filter.Flattened()
+	var filter AllocationMatcher
+	if options.Filter == nil {
+		filter = &matcher.AllPass[*Allocation]{}
+	} else {
+		compiler := NewAllocationMatchCompiler(options.LabelConfig)
+		var err error
+		filter, err = compiler.Compile(options.Filter)
+		if err != nil {
+			return fmt.Errorf("compiling filter '%s': %w", ast.ToPreOrderShortString(options.Filter), err)
+		}
+	}
+	if filter == nil {
+		return fmt.Errorf("unexpected nil filter")
 	}
 
 	var allocatedTotalsMap map[string]map[string]float64
@@ -1132,10 +1226,16 @@ func (as *AllocationSet) AggregateBy(aggregateBy []string, options *AllocationAg
 	// an empty slice implies that we should aggregate everything. See
 	// generateKey for why that makes sense.
 	shouldAggregate := aggregateBy != nil
-	shouldFilter := options.Filter != nil
+	shouldFilter := !isFilterEmpty(filter)
 	shouldShare := len(options.SharedHourlyCosts) > 0 || len(options.ShareFuncs) > 0
 	if !shouldAggregate && !shouldFilter && !shouldShare && options.ShareIdle == ShareNone && !options.IncludeProportionalAssetResourceCosts {
 		// There is nothing for AggregateBy to do, so simply return nil
+		// before returning, set aggregated metadata inclusion in properties
+		if options.IncludeAggregatedMetadata {
+			for index := range as.Allocations {
+				as.Allocations[index].Properties.AggregatedMetadata = true
+			}
+		}
 		return nil
 	}
 
@@ -1171,9 +1271,13 @@ func (as *AllocationSet) AggregateBy(aggregateBy []string, options *AllocationAg
 	// them to their respective sets, removing them from the set of allocations
 	// to aggregate.
 	for _, alloc := range as.Allocations {
-		// if the user does not want any aggregated labels/annotations returned
-		// set the properties accordingly
+
 		alloc.Properties.AggregatedMetadata = options.IncludeAggregatedMetadata
+		// build a parallel set of allocations to only be used
+		// for computing PARCs
+		if options.IncludeProportionalAssetResourceCosts {
+			parcSet.Insert(alloc.Clone())
+		}
 
 		// External allocations get aggregated post-hoc (see step 6) and do
 		// not necessarily contain complete sets of properties, so they are
@@ -1198,12 +1302,6 @@ func (as *AllocationSet) AggregateBy(aggregateBy []string, options *AllocationAg
 				aggSet.Insert(alloc)
 			}
 
-			// build a parallel set of allocations to only be used
-			// for computing PARCs
-			if options.IncludeProportionalAssetResourceCosts {
-				parcSet.Insert(alloc.Clone())
-			}
-
 			continue
 		}
 
@@ -1274,7 +1372,7 @@ func (as *AllocationSet) AggregateBy(aggregateBy []string, options *AllocationAg
 	// (2b) If proportional asset resource costs are to be included, compute them
 	// and add them to the allocations.
 	if options.IncludeProportionalAssetResourceCosts {
-		err := deriveProportionalAssetResourceCosts(options, as, shareSet)
+		err := deriveProportionalAssetResourceCosts(options, as, shareSet, parcSet)
 		if err != nil {
 			log.Debugf("AggregateBy: failed to derive proportional asset resource costs from idle coefficients: %s", err)
 			return fmt.Errorf("AggregateBy: failed to derive proportional asset resource costs from idle coefficients: %s", err)
@@ -1356,14 +1454,9 @@ func (as *AllocationSet) AggregateBy(aggregateBy []string, options *AllocationAg
 			log.DedupedWarningf(3, "AllocationSet.AggregateBy: missing idleId for allocation: %s", alloc.Name)
 		}
 
-		skip := false
-
 		// (3) If the allocation does not match the filter, immediately skip the
 		// allocation.
-		if options.Filter != nil {
-			skip = !options.Filter.Matches(alloc)
-		}
-		if skip {
+		if !filter.Matches(alloc) {
 			// If we are tracking idle filtration coefficients, delete the
 			// entry corresponding to the filtered allocation. (Deleting the
 			// entry will result in that proportional amount being removed
@@ -1539,7 +1632,12 @@ func (as *AllocationSet) AggregateBy(aggregateBy []string, options *AllocationAg
 					if alloc.SharedCostBreakdown == nil {
 						alloc.SharedCostBreakdown = map[string]SharedCostBreakdown{}
 					}
-					sharedCostName := sharedAlloc.generateKey(aggregateBy, options.LabelConfig)
+
+					sharedCostName, err := sharedAlloc.determineSharingName(options)
+					if err != nil {
+						return fmt.Errorf("failed to group shared costs: %s", err)
+					}
+
 					// check if current allocation is a shared flat overhead cost
 					if strings.Contains(sharedAlloc.Name, SharedSuffix) {
 						sharedCostName = "overheadCost"
@@ -1570,11 +1668,7 @@ func (as *AllocationSet) AggregateBy(aggregateBy []string, options *AllocationAg
 	// exact key match, given each external allocation's proerties, and
 	// aggregate if an exact match is found.
 	for _, alloc := range externalSet.Allocations {
-		skip := false
-		if options.Filter != nil {
-			skip = !options.Filter.Matches(alloc)
-		}
-		if !skip {
+		if filter.Matches(alloc) {
 			key := alloc.generateKey(aggregateBy, options.LabelConfig)
 
 			alloc.Name = key
@@ -1604,11 +1698,7 @@ func (as *AllocationSet) AggregateBy(aggregateBy []string, options *AllocationAg
 	if idleSet.Length() > 0 {
 		for _, idleAlloc := range idleSet.Allocations {
 			// if the idle does not apply to the non-filtered values, skip it
-			skip := false
-			if options.Filter != nil {
-				skip = !options.Filter.Matches(idleAlloc)
-			}
-			if skip {
+			if !filter.Matches(idleAlloc) {
 				continue
 			}
 
@@ -1719,6 +1809,21 @@ func computeShareCoeffs(aggregateBy []string, options *AllocationAggregationOpti
 	// counts each aggregation proportionally to its respective costs
 	shareType := options.ShareSplit
 
+	var filter AllocationMatcher
+	if options.Filter == nil {
+		filter = &matcher.AllPass[*Allocation]{}
+	} else {
+		compiler := NewAllocationMatchCompiler(options.LabelConfig)
+		var err error
+		filter, err = compiler.Compile(options.Filter)
+		if err != nil {
+			return nil, fmt.Errorf("compiling filter '%s': %w", ast.ToPreOrderShortString(options.Filter), err)
+		}
+	}
+	if filter == nil {
+		return nil, fmt.Errorf("unexpected nil filter")
+	}
+
 	// Record allocation values first, then normalize by totals to get percentages
 	for _, alloc := range as.Allocations {
 		if alloc.IsIdle() {
@@ -1740,11 +1845,7 @@ func computeShareCoeffs(aggregateBy []string, options *AllocationAggregationOpti
 		// of a non-filtered allocation will be conserved even when the filter
 		// is removed. (Otherwise, all the shared cost will get redistributed
 		// over the unfiltered results, inflating their shared costs.)
-		filtered := false
-		if options.Filter != nil {
-			filtered = !options.Filter.Matches(alloc)
-		}
-		if filtered {
+		if !filter.Matches(alloc) {
 			name = "__filtered__"
 		}
 
@@ -1872,7 +1973,7 @@ func computeIdleCoeffs(options *AllocationAggregationOptions, as *AllocationSet,
 	return coeffs, totals, nil
 }
 
-func deriveProportionalAssetResourceCosts(options *AllocationAggregationOptions, as *AllocationSet, shareSet *AllocationSet) error {
+func deriveProportionalAssetResourceCosts(options *AllocationAggregationOptions, as *AllocationSet, shareSet, parcsSet *AllocationSet) error {
 
 	// Compute idle coefficients, then save them in AllocationAggregationOptions
 	// [idle_id][allocation name][resource] = [coeff]
@@ -1883,11 +1984,7 @@ func deriveProportionalAssetResourceCosts(options *AllocationAggregationOptions,
 	totals := map[string]map[string]float64{}
 
 	// Record allocation values first, then normalize by totals to get percentages
-	for _, alloc := range as.Allocations {
-		if alloc.IsIdle() {
-			// Skip idle allocations in coefficient calculation
-			continue
-		}
+	for _, alloc := range parcsSet.Allocations {
 
 		idleId, err := alloc.getIdleId(options)
 		if err != nil {
@@ -1908,6 +2005,22 @@ func deriveProportionalAssetResourceCosts(options *AllocationAggregationOptions,
 		if _, ok := coeffs[idleId][name]; !ok {
 			coeffs[idleId][name] = map[string]float64{}
 		}
+		// idle IDs for load balancers are their services
+		for key := range alloc.LoadBalancers {
+			if _, ok := totals[key]; !ok {
+				totals[key] = map[string]float64{}
+			}
+
+			if _, ok := coeffs[key]; !ok {
+				coeffs[key] = map[string]map[string]float64{}
+			}
+			if _, ok := coeffs[key][name]; !ok {
+				coeffs[key][name] = map[string]float64{}
+			}
+
+			coeffs[key][name]["loadbalancer"] += alloc.LoadBalancerTotalCost()
+			totals[key]["loadbalancer"] += alloc.LoadBalancerTotalCost()
+		}
 
 		coeffs[idleId][name]["cpu"] += alloc.CPUTotalCost()
 		coeffs[idleId][name]["gpu"] += alloc.GPUTotalCost()
@@ -1953,6 +2066,23 @@ func deriveProportionalAssetResourceCosts(options *AllocationAggregationOptions,
 		totals[idleId]["cpu"] += alloc.CPUTotalCost()
 		totals[idleId]["gpu"] += alloc.GPUTotalCost()
 		totals[idleId]["ram"] += alloc.RAMTotalCost()
+
+		// idle IDs for load balancers are their services
+		for key := range alloc.LoadBalancers {
+			if _, ok := totals[key]; !ok {
+				totals[key] = map[string]float64{}
+			}
+
+			if _, ok := coeffs[key]; !ok {
+				coeffs[key] = map[string]map[string]float64{}
+			}
+			if _, ok := coeffs[key][name]; !ok {
+				coeffs[key][name] = map[string]float64{}
+			}
+			coeffs[key][name]["loadbalancer"] += alloc.LoadBalancerTotalCost()
+			totals[key]["loadbalancer"] += alloc.LoadBalancerTotalCost()
+		}
+
 	}
 
 	// after totals are computed, loop through and set parcs on allocations
@@ -1965,20 +2095,94 @@ func deriveProportionalAssetResourceCosts(options *AllocationAggregationOptions,
 		alloc.ProportionalAssetResourceCosts = ProportionalAssetResourceCosts{}
 		alloc.ProportionalAssetResourceCosts.Insert(ProportionalAssetResourceCost{
 			Cluster:             alloc.Properties.Cluster,
-			Node:                alloc.Properties.Node,
+			Name:                alloc.Properties.Node,
+			Type:                "Node",
 			ProviderID:          alloc.Properties.ProviderID,
-			GPUTotalCost:        totals[idleId]["gpu"],
-			CPUTotalCost:        totals[idleId]["cpu"],
-			RAMTotalCost:        totals[idleId]["ram"],
 			GPUProportionalCost: coeffs[idleId][alloc.Name]["gpu"],
 			CPUProportionalCost: coeffs[idleId][alloc.Name]["cpu"],
 			RAMProportionalCost: coeffs[idleId][alloc.Name]["ram"],
 		}, options.IdleByNode)
+		// insert a separate PARC for the load balancer
+		if alloc.LoadBalancerCost != 0 {
+			for key, svc := range alloc.LoadBalancers {
+
+				alloc.ProportionalAssetResourceCosts.Insert(ProportionalAssetResourceCost{
+					Cluster:                      alloc.Properties.Cluster,
+					Name:                         svc.Service,
+					Type:                         "LoadBalancer",
+					LoadBalancerProportionalCost: coeffs[key][alloc.Name]["loadbalancer"],
+				}, options.IdleByNode)
+			}
+		}
+
 	}
 
 	return nil
 }
 
+func (a *Allocation) determineSharingName(options *AllocationAggregationOptions) (string, error) {
+	if a == nil {
+		return "", fmt.Errorf("determineSharingName called on nil Allocation")
+	} else if options == nil {
+		return "unknown", nil
+	}
+
+	// grab SharedLabels keys and sort them, to keep this function deterministic
+	var labelKeys []string
+	for labelKey, _ := range options.SharedLabels {
+		labelKeys = append(labelKeys, labelKey)
+	}
+	slices.Sort(labelKeys)
+
+	var sharedAggregateBy []string
+	var sharedLabels [][]string
+	for _, labelKey := range labelKeys {
+		sharedAgg := fmt.Sprintf("label:%s", labelKey)
+		if !slices.Contains(sharedAggregateBy, sharedAgg) {
+			sharedAggregateBy = append(sharedAggregateBy, sharedAgg)
+		}
+		sharedLabels = append(sharedLabels, options.SharedLabels[labelKey])
+	}
+	if len(options.SharedNamespaces) > 0 {
+		sharedAggregateBy = append(sharedAggregateBy, "namespace")
+	}
+	sharedCostName := a.generateKey(sharedAggregateBy, options.LabelConfig)
+
+	// get each value in the generated key, then reset the name
+	sharedCostNameValues := strings.Split(sharedCostName, "/")
+	sharedCostName = ""
+
+	// if we don't have as many values as aggregateBys, something went wrong in generateKey
+	if len(sharedCostNameValues) != len(sharedAggregateBy) {
+		log.Warnf("Unable to determine share cost group for allocation \"%s\"", a.Name)
+	} else {
+		// try to match to the first label
+		for i, sharedLabelValues := range sharedLabels {
+			allocLabel := sharedCostNameValues[i]
+			if slices.Contains(sharedLabelValues, allocLabel) {
+				return allocLabel, nil
+			}
+		}
+
+		// if we didn't match to a label, try to match to a namespace
+		if len(options.SharedNamespaces) > 0 {
+			// namespace will always be the last value, if SharedNamespaces is set
+			allocNamespace := sharedCostNameValues[len(sharedCostNameValues)-1]
+			if slices.Contains(options.SharedNamespaces, allocNamespace) {
+				return allocNamespace, nil
+			}
+		}
+
+		// if neither the labels nor the namespaces matched, we log a warning and mark this allocation
+		// as unknown
+		if len(sharedCostName) == 0 {
+			log.Warnf("Failed to determine shared cost grouping for allocation \"%s\"", a.Name)
+		}
+	}
+
+	return "unknown", nil
+}
+
 // getIdleId returns the providerId or cluster of an Allocation depending on the IdleByNode
 // option in the AllocationAggregationOptions and an error if the respective field is missing
 func (a *Allocation) getIdleId(options *AllocationAggregationOptions) (string, error) {

+ 2 - 1
pkg/kubecost/allocation_json.go

@@ -55,6 +55,7 @@ type AllocationJSON struct {
 	TotalEfficiency                *float64                        `json:"totalEfficiency"`
 	RawAllocationOnly              *RawAllocationOnlyData          `json:"rawAllocationOnly,omitempty"`
 	ProportionalAssetResourceCosts *ProportionalAssetResourceCosts `json:"proportionalAssetResourceCosts,omitempty"`
+	LoadBalancers                  LbAllocations                   `json:"lbAllocations"`
 	SharedCostBreakdown            *SharedCostBreakdowns           `json:"sharedCostBreakdown,omitempty"`
 }
 
@@ -106,8 +107,8 @@ func (aj *AllocationJSON) BuildFromAllocation(a *Allocation) {
 	aj.TotalEfficiency = formatFloat64ForResponse(a.TotalEfficiency())
 	aj.RawAllocationOnly = a.RawAllocationOnly
 	aj.ProportionalAssetResourceCosts = &a.ProportionalAssetResourceCosts
+	aj.LoadBalancers = a.LoadBalancers
 	aj.SharedCostBreakdown = &a.SharedCostBreakdown
-
 }
 
 // formatFloat64ForResponse - take an existing float64, round it to 6 decimal places and return is possible, or return nil if invalid

+ 377 - 155
pkg/kubecost/allocation_test.go

@@ -9,12 +9,39 @@ import (
 	"time"
 
 	"github.com/davecgh/go-spew/spew"
+	filter21 "github.com/opencost/opencost/pkg/filter21"
+	afilter "github.com/opencost/opencost/pkg/filter21/allocation"
+	"github.com/opencost/opencost/pkg/filter21/ops"
 	"github.com/opencost/opencost/pkg/log"
 	"github.com/opencost/opencost/pkg/util"
 	"github.com/opencost/opencost/pkg/util/json"
 	"github.com/opencost/opencost/pkg/util/timeutil"
 )
 
+var filterParser = afilter.NewAllocationFilterParser()
+var matcherCompiler = NewAllocationMatchCompiler(nil)
+
+// useful for creating filters on the fly when testing. panics
+// on parse errors!
+func mustParseFilter(s string) filter21.Filter {
+	filter, err := filterParser.Parse(s)
+	if err != nil {
+		panic(err)
+	}
+	return filter
+}
+
+// useful for creating filters on the fly when testing. panics
+// on parse or compile errors!
+func mustCompileFilter(s string) AllocationMatcher {
+	filter := mustParseFilter(s)
+	m, err := matcherCompiler.Compile(filter)
+	if err != nil {
+		panic(err)
+	}
+	return m
+}
+
 func TestAllocation_Add(t *testing.T) {
 	var nilAlloc *Allocation
 	zeroAlloc := &Allocation{}
@@ -1087,51 +1114,33 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 			expectedParcResults: map[string]ProportionalAssetResourceCosts{
 				"namespace1": {
 					"cluster1": ProportionalAssetResourceCost{
-						Cluster:                    "cluster1",
-						Node:                       "",
-						ProviderID:                 "",
-						CPUPercentage:              0.16667,
-						GPUPercentage:              0.16667,
-						RAMPercentage:              0.27083,
-						NodeResourceCostPercentage: 0.22619,
-						GPUTotalCost:               18,
-						GPUProportionalCost:        3,
-						CPUTotalCost:               18,
-						CPUProportionalCost:        3,
-						RAMTotalCost:               48,
-						RAMProportionalCost:        13,
+						Cluster:             "cluster1",
+						Name:                "",
+						Type:                "",
+						ProviderID:          "",
+						GPUProportionalCost: 3,
+						CPUProportionalCost: 3,
+						RAMProportionalCost: 13,
 					},
 				},
 				"namespace2": {
 					"cluster1": ProportionalAssetResourceCost{
-						Cluster:                    "cluster1",
-						Node:                       "",
-						ProviderID:                 "",
-						CPUPercentage:              0.16667,
-						GPUPercentage:              0.16667,
-						RAMPercentage:              0.0625,
-						NodeResourceCostPercentage: 0.10714,
-						GPUTotalCost:               18,
-						GPUProportionalCost:        3,
-						CPUTotalCost:               18,
-						CPUProportionalCost:        3,
-						RAMTotalCost:               48,
-						RAMProportionalCost:        3,
+						Cluster:             "cluster1",
+						Name:                "",
+						Type:                "",
+						ProviderID:          "",
+						GPUProportionalCost: 3,
+						CPUProportionalCost: 3,
+						RAMProportionalCost: 3,
 					},
 					"cluster2": ProportionalAssetResourceCost{
-						Cluster:                    "cluster2",
-						Node:                       "",
-						ProviderID:                 "",
-						CPUPercentage:              0.16667,
-						GPUPercentage:              0.16667,
-						RAMPercentage:              0.16667,
-						NodeResourceCostPercentage: 0.16667,
-						GPUTotalCost:               18,
-						GPUProportionalCost:        3,
-						CPUTotalCost:               18,
-						CPUProportionalCost:        3,
-						RAMTotalCost:               18,
-						RAMProportionalCost:        3,
+						Cluster:             "cluster2",
+						Name:                "",
+						Type:                "",
+						ProviderID:          "",
+						GPUProportionalCost: 3,
+						CPUProportionalCost: 3,
+						RAMProportionalCost: 3,
 					},
 				},
 			},
@@ -1191,11 +1200,7 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 			start: start,
 			aggBy: []string{AllocationClusterProp},
 			aggOpts: &AllocationAggregationOptions{
-				Filter: AllocationFilterCondition{
-					Field: FilterClusterID,
-					Op:    FilterEquals,
-					Value: "cluster1",
-				},
+				Filter:    mustParseFilter(`cluster:"cluster1"`),
 				ShareIdle: ShareNone,
 			},
 			numResults: 1 + numIdle,
@@ -1213,7 +1218,7 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 			start: start,
 			aggBy: []string{AllocationClusterProp},
 			aggOpts: &AllocationAggregationOptions{
-				Filter:    AllocationFilterCondition{Field: FilterClusterID, Op: FilterEquals, Value: "cluster1"},
+				Filter:    mustParseFilter(`cluster:"cluster1"`),
 				ShareIdle: ShareWeighted,
 			},
 			numResults: 1,
@@ -1230,7 +1235,7 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 			start: start,
 			aggBy: []string{AllocationNamespaceProp},
 			aggOpts: &AllocationAggregationOptions{
-				Filter:    AllocationFilterCondition{Field: FilterClusterID, Op: FilterEquals, Value: "cluster1"},
+				Filter:    mustParseFilter(`cluster:"cluster1"`),
 				ShareIdle: ShareNone,
 			},
 			numResults: 2 + numIdle,
@@ -1249,7 +1254,7 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 			start: start,
 			aggBy: []string{AllocationClusterProp},
 			aggOpts: &AllocationAggregationOptions{
-				Filter:    AllocationFilterCondition{Field: FilterNamespace, Op: FilterEquals, Value: "namespace2"},
+				Filter:    mustParseFilter(`namespace:"namespace2"`),
 				ShareIdle: ShareNone,
 			},
 			numResults: numClusters + numIdle,
@@ -1292,7 +1297,7 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 			start: start,
 			aggBy: []string{AllocationNamespaceProp},
 			aggOpts: &AllocationAggregationOptions{
-				Filter:    AllocationFilterCondition{Field: FilterNamespace, Op: FilterEquals, Value: "namespace2"},
+				Filter:    mustParseFilter(`namespace:"namespace2"`),
 				ShareIdle: ShareWeighted,
 			},
 			numResults: 1,
@@ -1317,7 +1322,7 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 			start: start,
 			aggBy: []string{AllocationNamespaceProp},
 			aggOpts: &AllocationAggregationOptions{
-				Filter:            AllocationFilterCondition{Field: FilterNamespace, Op: FilterEquals, Value: "namespace2"},
+				Filter:            mustParseFilter(`namespace:"namespace2"`),
 				SharedHourlyCosts: map[string]float64{"total": sharedOverheadHourlyCost},
 				ShareSplit:        ShareWeighted,
 			},
@@ -1336,7 +1341,7 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 			start: start,
 			aggBy: []string{AllocationNamespaceProp},
 			aggOpts: &AllocationAggregationOptions{
-				Filter:     AllocationFilterCondition{Field: FilterNamespace, Op: FilterEquals, Value: "namespace2"},
+				Filter:     mustParseFilter(`namespace:"namespace2"`),
 				ShareFuncs: []AllocationMatchFunc{isNamespace("namespace1")},
 				ShareSplit: ShareWeighted,
 			},
@@ -1355,7 +1360,7 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 			start: start,
 			aggBy: []string{AllocationNamespaceProp},
 			aggOpts: &AllocationAggregationOptions{
-				Filter:     AllocationFilterCondition{Field: FilterNamespace, Op: FilterEquals, Value: "namespace2"},
+				Filter:     mustParseFilter(`namespace:"namespace2"`),
 				ShareFuncs: []AllocationMatchFunc{isNamespace("namespace1")},
 				ShareSplit: ShareWeighted,
 				ShareIdle:  ShareWeighted,
@@ -1461,7 +1466,7 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 			start: start,
 			aggBy: []string{AllocationNamespaceProp},
 			aggOpts: &AllocationAggregationOptions{
-				Filter:     AllocationFilterCondition{Field: FilterNamespace, Op: FilterEquals, Value: "namespace2"},
+				Filter:     mustParseFilter(`namespace:"namespace2"`),
 				ShareFuncs: []AllocationMatchFunc{isNamespace("namespace1")},
 				ShareSplit: ShareWeighted,
 				ShareIdle:  ShareWeighted,
@@ -1507,7 +1512,7 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 			start: start,
 			aggBy: []string{AllocationNamespaceProp},
 			aggOpts: &AllocationAggregationOptions{
-				Filter:            AllocationFilterCondition{Field: FilterNamespace, Op: FilterEquals, Value: "namespace2"},
+				Filter:            mustParseFilter(`namespace:"namespace2"`),
 				ShareSplit:        ShareWeighted,
 				ShareIdle:         ShareWeighted,
 				SharedHourlyCosts: map[string]float64{"total": sharedOverheadHourlyCost},
@@ -1543,113 +1548,71 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 			expectedParcResults: map[string]ProportionalAssetResourceCosts{
 				"namespace1": {
 					"cluster1,c1nodes": ProportionalAssetResourceCost{
-						Cluster:                    "cluster1",
-						Node:                       "c1nodes",
-						ProviderID:                 "c1nodes",
-						CPUPercentage:              0.16667,
-						GPUPercentage:              0.16667,
-						RAMPercentage:              0.27083,
-						NodeResourceCostPercentage: 0.22619,
-						GPUTotalCost:               18,
-						GPUProportionalCost:        3,
-						CPUTotalCost:               18,
-						CPUProportionalCost:        3,
-						RAMTotalCost:               48,
-						RAMProportionalCost:        13,
+						Cluster:             "cluster1",
+						Name:                "c1nodes",
+						Type:                "Node",
+						ProviderID:          "c1nodes",
+						GPUProportionalCost: 3,
+						CPUProportionalCost: 3,
+						RAMProportionalCost: 13,
 					},
 					"cluster2,node2": ProportionalAssetResourceCost{
-						Cluster:                    "cluster2",
-						Node:                       "node2",
-						ProviderID:                 "node2",
-						CPUPercentage:              0.16667,
-						GPUPercentage:              0.16667,
-						RAMPercentage:              0.0625,
-						NodeResourceCostPercentage: 0.10714,
-						GPUTotalCost:               18,
-						GPUProportionalCost:        3,
-						CPUTotalCost:               18,
-						CPUProportionalCost:        3,
-						RAMTotalCost:               48,
-						RAMProportionalCost:        3,
+						Cluster:             "cluster2",
+						Name:                "node2",
+						Type:                "Node",
+						ProviderID:          "node2",
+						GPUProportionalCost: 3,
+						CPUProportionalCost: 3,
+						RAMProportionalCost: 3,
 					},
 				},
 				"namespace2": {
 					"cluster1,c1nodes": ProportionalAssetResourceCost{
-						Cluster:                    "cluster1",
-						Node:                       "c1nodes",
-						ProviderID:                 "c1nodes",
-						CPUPercentage:              0.16667,
-						GPUPercentage:              0.16667,
-						RAMPercentage:              0.0625,
-						NodeResourceCostPercentage: 0.10714,
-						GPUTotalCost:               18,
-						GPUProportionalCost:        3,
-						CPUTotalCost:               18,
-						CPUProportionalCost:        3,
-						RAMTotalCost:               48,
-						RAMProportionalCost:        3,
+						Cluster:             "cluster1",
+						Name:                "c1nodes",
+						Type:                "Node",
+						ProviderID:          "c1nodes",
+						GPUProportionalCost: 3,
+						CPUProportionalCost: 3,
+						RAMProportionalCost: 3,
 					},
 					"cluster2,node1": ProportionalAssetResourceCost{
-						Cluster:                    "cluster2",
-						Node:                       "node1",
-						ProviderID:                 "node1",
-						CPUPercentage:              0.5,
-						GPUPercentage:              0.5,
-						RAMPercentage:              0.5,
-						NodeResourceCostPercentage: 0.5,
-						GPUTotalCost:               4,
-						GPUProportionalCost:        2,
-						CPUTotalCost:               4,
-						CPUProportionalCost:        2,
-						RAMTotalCost:               4,
-						RAMProportionalCost:        2,
+						Cluster:             "cluster2",
+						Name:                "node1",
+						Type:                "Node",
+						ProviderID:          "node1",
+						GPUProportionalCost: 2,
+						CPUProportionalCost: 2,
+						RAMProportionalCost: 2,
 					},
 					"cluster2,node2": ProportionalAssetResourceCost{
-						Cluster:                    "cluster2",
-						Node:                       "node2",
-						ProviderID:                 "node2",
-						CPUPercentage:              0.5,
-						GPUPercentage:              0.5,
-						RAMPercentage:              0.5,
-						NodeResourceCostPercentage: 0.5,
-						GPUTotalCost:               2,
-						GPUProportionalCost:        1,
-						CPUTotalCost:               2,
-						CPUProportionalCost:        1,
-						RAMTotalCost:               2,
-						RAMProportionalCost:        1,
+						Cluster:             "cluster2",
+						Name:                "node2",
+						Type:                "Node",
+						ProviderID:          "node2",
+						GPUProportionalCost: 1,
+						CPUProportionalCost: 1,
+						RAMProportionalCost: 1,
 					},
 				},
 				"namespace3": {
 					"cluster2,node3": ProportionalAssetResourceCost{
-						Cluster:                    "cluster2",
-						Node:                       "node3",
-						ProviderID:                 "node3",
-						CPUPercentage:              0.5,
-						GPUPercentage:              0.5,
-						RAMPercentage:              0.5,
-						NodeResourceCostPercentage: 0.5,
-						GPUTotalCost:               4,
-						GPUProportionalCost:        2,
-						CPUTotalCost:               4,
-						CPUProportionalCost:        2,
-						RAMTotalCost:               4,
-						RAMProportionalCost:        2,
+						Cluster:             "cluster2",
+						Name:                "node3",
+						Type:                "Node",
+						ProviderID:          "node3",
+						GPUProportionalCost: 2,
+						CPUProportionalCost: 2,
+						RAMProportionalCost: 2,
 					},
 					"cluster2,node2": ProportionalAssetResourceCost{
-						Cluster:                    "cluster2",
-						Node:                       "node2",
-						ProviderID:                 "node2",
-						CPUPercentage:              0.5,
-						GPUPercentage:              0.5,
-						RAMPercentage:              0.5,
-						NodeResourceCostPercentage: 0.5,
-						GPUTotalCost:               2,
-						GPUProportionalCost:        1,
-						CPUTotalCost:               2,
-						CPUProportionalCost:        1,
-						RAMTotalCost:               2,
-						RAMProportionalCost:        1,
+						Cluster:             "cluster2",
+						Name:                "node2",
+						Type:                "Node",
+						ProviderID:          "node2",
+						GPUProportionalCost: 1,
+						CPUProportionalCost: 1,
+						RAMProportionalCost: 1,
 					},
 				},
 			},
@@ -1687,7 +1650,7 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 			start: start,
 			aggBy: []string{AllocationNamespaceProp},
 			aggOpts: &AllocationAggregationOptions{
-				Filter:     AllocationFilterCondition{Field: FilterNamespace, Op: FilterEquals, Value: "namespace2"},
+				Filter:     mustParseFilter(`namespace:"namespace2"`),
 				ShareIdle:  ShareWeighted,
 				IdleByNode: true,
 			},
@@ -1928,7 +1891,8 @@ func TestAllocationSet_insertMatchingWindow(t *testing.T) {
 func TestParcInsert(t *testing.T) {
 	pod1_hour1 := ProportionalAssetResourceCost{
 		Cluster:                    "cluster1",
-		Node:                       "node1",
+		Name:                       "node1",
+		Type:                       "Node",
 		ProviderID:                 "i-1234",
 		CPUPercentage:              0.125,
 		GPUPercentage:              0,
@@ -1940,7 +1904,8 @@ func TestParcInsert(t *testing.T) {
 
 	pod1_hour2 := ProportionalAssetResourceCost{
 		Cluster:                    "cluster1",
-		Node:                       "node1",
+		Name:                       "node1",
+		Type:                       "Node",
 		ProviderID:                 "i-1234",
 		CPUPercentage:              0.0,
 		GPUPercentage:              0,
@@ -1951,7 +1916,8 @@ func TestParcInsert(t *testing.T) {
 
 	pod1_hour3 := ProportionalAssetResourceCost{
 		Cluster:                    "cluster1",
-		Node:                       "node1",
+		Name:                       "node1",
+		Type:                       "Node",
 		ProviderID:                 "i-1234",
 		CPUPercentage:              0.0,
 		GPUPercentage:              0,
@@ -1962,7 +1928,8 @@ func TestParcInsert(t *testing.T) {
 
 	pod2_hour1 := ProportionalAssetResourceCost{
 		Cluster:                    "cluster1",
-		Node:                       "node2",
+		Name:                       "node2",
+		Type:                       "Node",
 		ProviderID:                 "i-1234",
 		CPUPercentage:              0.0,
 		GPUPercentage:              0,
@@ -1974,7 +1941,8 @@ func TestParcInsert(t *testing.T) {
 
 	pod2_hour2 := ProportionalAssetResourceCost{
 		Cluster:                    "cluster1",
-		Node:                       "node2",
+		Name:                       "node2",
+		Type:                       "Node",
 		ProviderID:                 "i-1234",
 		CPUPercentage:              0.0,
 		GPUPercentage:              0,
@@ -1986,7 +1954,8 @@ func TestParcInsert(t *testing.T) {
 
 	pod2_hour3 := ProportionalAssetResourceCost{
 		Cluster:                    "cluster1",
-		Node:                       "node2",
+		Name:                       "node2",
+		Type:                       "Node",
 		ProviderID:                 "i-1234",
 		CPUPercentage:              0.0,
 		GPUPercentage:              0,
@@ -1998,7 +1967,8 @@ func TestParcInsert(t *testing.T) {
 
 	pod3_hour1 := ProportionalAssetResourceCost{
 		Cluster:                    "cluster1",
-		Node:                       "node3",
+		Name:                       "node3",
+		Type:                       "Node",
 		ProviderID:                 "i-1234",
 		CPUPercentage:              0.0,
 		GPUPercentage:              0,
@@ -2010,7 +1980,8 @@ func TestParcInsert(t *testing.T) {
 
 	pod3_hour2 := ProportionalAssetResourceCost{
 		Cluster:                    "cluster1",
-		Node:                       "node3",
+		Name:                       "node3",
+		Type:                       "Node",
 		ProviderID:                 "i-1234",
 		CPUPercentage:              0.0,
 		GPUPercentage:              0,
@@ -2022,7 +1993,8 @@ func TestParcInsert(t *testing.T) {
 
 	pod3_hour3 := ProportionalAssetResourceCost{
 		Cluster:                    "cluster1",
-		Node:                       "node3",
+		Name:                       "node3",
+		Type:                       "Node",
 		ProviderID:                 "i-1234",
 		CPUPercentage:              0.0,
 		GPUPercentage:              0,
@@ -2044,6 +2016,32 @@ func TestParcInsert(t *testing.T) {
 	parcs.Insert(pod3_hour3, true)
 	log.Debug("added all parcs")
 
+	// set totals, compute percentaves
+	parc1, ok := parcs["cluster1,node1"]
+	if !ok {
+		t.Fatalf("parc1 not found")
+	}
+	parc1.CPUTotalCost = 12
+
+	parc2, ok := parcs["cluster1,node2"]
+	if !ok {
+		t.Fatalf("parc2 not found")
+	}
+	parc2.CPUTotalCost = 12
+
+	parc3, ok := parcs["cluster1,node3"]
+	if !ok {
+		t.Fatalf("parc1 not found")
+	}
+	parc3.CPUTotalCost = 12
+
+	ComputePercentages(&parc1)
+	ComputePercentages(&parc2)
+	ComputePercentages(&parc3)
+	parcs["cluster1,node1"] = parc1
+	parcs["cluster1,node2"] = parc2
+	parcs["cluster1,node3"] = parc3
+
 	expectedParcs := ProportionalAssetResourceCosts{
 		"cluster1,node1": ProportionalAssetResourceCost{
 			CPUPercentage:              0.041666666666666664,
@@ -3187,7 +3185,7 @@ func Test_AggregateByService_UnmountedLBs(t *testing.T) {
 	set.Insert(idle)
 
 	set.AggregateBy([]string{AllocationServiceProp}, &AllocationAggregationOptions{
-		Filter: AllocationFilterCondition{Field: FilterServices, Op: FilterContains, Value: "nginx-plus-nginx-ingress"},
+		Filter: ops.Contains(afilter.FieldServices, "nginx-plus-nginx-ingress"),
 	})
 
 	for _, alloc := range set.Allocations {
@@ -3214,3 +3212,227 @@ func Test_AggregateByService_UnmountedLBs(t *testing.T) {
 	spew.Config.DisableMethods = true
 	t.Logf("%s", spew.Sdump(set.Allocations))
 }
+
+func Test_DetermineSharingName(t *testing.T) {
+	var alloc *Allocation
+	var name string
+	var err error
+
+	// test nil allocation with nil options
+	name, err = alloc.determineSharingName(nil)
+	if err == nil {
+		t.Fatalf("determineSharingName: expected error; actual nil")
+	}
+
+	// test nil with non-nil options
+	name, err = alloc.determineSharingName(&AllocationAggregationOptions{})
+	if err == nil {
+		t.Fatalf("determineSharingName: expected error; actual nil")
+	}
+
+	alloc = &Allocation{}
+	alloc.Properties = &AllocationProperties{
+		Cluster: "cluster1",
+		Labels: map[string]string{
+			"app": "app1",
+			"env": "env1",
+		},
+		Namespace: "namespace1",
+	}
+
+	// test non-nil allocation with nil options
+	name, err = alloc.determineSharingName(nil)
+	if err != nil {
+		t.Fatalf("determineSharingName: expected no error; actual \"%s\"", err)
+	} else if err != nil || name != "unknown" {
+		t.Fatalf("determineSharingName: expected \"unknown\"; actual \"%s\"", name)
+	}
+
+	// test non-nil allocation with empty options
+	options := &AllocationAggregationOptions{}
+	name, err = alloc.determineSharingName(options)
+	if err != nil {
+		t.Fatalf("determineSharingName: expected no error; actual \"%s\"", err)
+	} else if err != nil || name != "unknown" {
+		t.Fatalf("determineSharingName: expected \"unknown\"; actual \"%s\"", name)
+	}
+
+	// test non-nil allocation with matching namespace options
+	options.SharedNamespaces = []string{"namespace1"}
+	name, err = alloc.determineSharingName(options)
+	if err != nil {
+		t.Fatalf("determineSharingName: expected no error; actual \"%s\"", err)
+	} else if err != nil || name != "namespace1" {
+		t.Fatalf("determineSharingName: expected \"namespace1\"; actual \"%s\"", name)
+	}
+
+	// test non-nil allocation with non-matching namespace options
+	options.SharedNamespaces = []string{"namespace2"}
+	name, err = alloc.determineSharingName(options)
+	if err != nil {
+		t.Fatalf("determineSharingName: expected no error; actual \"%s\"", err)
+	} else if err != nil || name != "unknown" {
+		t.Fatalf("determineSharingName: expected \"unknown\"; actual \"%s\"", name)
+	}
+
+	// test non-nil allocation with matching label options
+	options.SharedNamespaces = nil
+	options.SharedLabels = map[string][]string{
+		"app": {"app1"},
+	}
+	name, err = alloc.determineSharingName(options)
+	if err != nil {
+		t.Fatalf("determineSharingName: expected no error; actual \"%s\"", err)
+	} else if err != nil || name != "app1" {
+		t.Fatalf("determineSharingName: expected \"app1\"; actual \"%s\"", name)
+	}
+
+	// test non-nil allocation with partial-matching label options
+	options.SharedLabels = map[string][]string{
+		"app": {"app1", "app2"},
+	}
+	name, err = alloc.determineSharingName(options)
+	if err != nil {
+		t.Fatalf("determineSharingName: expected no error; actual \"%s\"", err)
+	} else if err != nil || name != "app1" {
+		t.Fatalf("determineSharingName: expected \"app1\"; actual \"%s\"", name)
+	}
+
+	// test non-nil allocation with non-matching label options
+	options.SharedLabels = map[string][]string{
+		"app": {"app2"},
+	}
+	name, err = alloc.determineSharingName(options)
+	if err != nil {
+		t.Fatalf("determineSharingName: expected no error; actual \"%s\"", err)
+	} else if err != nil || name != "unknown" {
+		t.Fatalf("determineSharingName: expected \"unknown\"; actual \"%s\"", name)
+	}
+
+	// test non-nil allocation with matching namespace and label options
+	options.SharedNamespaces = []string{"namespace1"}
+	options.SharedLabels = map[string][]string{
+		"app": {"app1"},
+	}
+	name, err = alloc.determineSharingName(options)
+	if err != nil {
+		t.Fatalf("determineSharingName: expected no error; actual \"%s\"", err)
+	} else if err != nil || name != "app1" {
+		t.Fatalf("determineSharingName: expected \"app1\"; actual \"%s\"", name)
+	}
+
+	// test non-nil allocation with non-matching namespace and matching label options
+	options.SharedNamespaces = []string{"namespace2"}
+	options.SharedLabels = map[string][]string{
+		"app": {"app1"},
+	}
+	name, err = alloc.determineSharingName(options)
+	if err != nil {
+		t.Fatalf("determineSharingName: expected no error; actual \"%s\"", err)
+	} else if err != nil || name != "app1" {
+		t.Fatalf("determineSharingName: expected \"app1\"; actual \"%s\"", name)
+	}
+
+	// test non-nil allocation with non-matching namespace and non-matching label options
+	options.SharedNamespaces = []string{"namespace2"}
+	options.SharedLabels = map[string][]string{
+		"app": {"app2"},
+	}
+	name, err = alloc.determineSharingName(options)
+	if err != nil {
+		t.Fatalf("determineSharingName: expected no error; actual \"%s\"", err)
+	} else if err != nil || name != "unknown" {
+		t.Fatalf("determineSharingName: expected \"unknown\"; actual \"%s\"", name)
+	}
+
+	// test non-nil allocation with multiple matching label options
+	alloc.Properties.Labels = map[string]string{
+		"app": "app1",
+		"env": "env1",
+	}
+	options.SharedNamespaces = nil
+	options.SharedLabels = map[string][]string{
+		"app": {"app1"},
+		"env": {"env1"},
+	}
+	name, err = alloc.determineSharingName(options)
+	if err != nil {
+		t.Fatalf("determineSharingName: expected no error; actual \"%s\"", err)
+	} else if err != nil || name != "app1" {
+		t.Fatalf("determineSharingName: expected \"app1\"; actual \"%s\"", name)
+	}
+
+	// test non-nil allocation with one matching label option
+	alloc.Properties.Labels = map[string]string{
+		"app": "app2",
+		"env": "env1",
+	}
+	options.SharedNamespaces = nil
+	options.SharedLabels = map[string][]string{
+		"app": {"app1"},
+		"env": {"env1"},
+	}
+	name, err = alloc.determineSharingName(options)
+	if err != nil {
+		t.Fatalf("determineSharingName: expected no error; actual \"%s\"", err)
+	} else if err != nil || name != "env1" {
+		t.Fatalf("determineSharingName: expected \"env1\"; actual \"%s\"", name)
+	}
+
+	// test non-nil allocation with one matching namespace option
+	alloc.Properties.Namespace = "namespace1"
+	options.SharedNamespaces = []string{"namespace1", "namespace2"}
+	options.SharedLabels = nil
+	name, err = alloc.determineSharingName(options)
+	if err != nil {
+		t.Fatalf("determineSharingName: expected no error; actual \"%s\"", err)
+	} else if err != nil || name != "namespace1" {
+		t.Fatalf("determineSharingName: expected \"namespace1\"; actual \"%s\"", name)
+	}
+
+	// test non-nil allocation with another one matching namespace option
+	alloc.Properties.Namespace = "namespace2"
+	options.SharedNamespaces = []string{"namespace1", "namespace2"}
+	options.SharedLabels = nil
+	name, err = alloc.determineSharingName(options)
+	if err != nil {
+		t.Fatalf("determineSharingName: expected no error; actual \"%s\"", err)
+	} else if err != nil || name != "namespace2" {
+		t.Fatalf("determineSharingName: expected \"namespace2\"; actual \"%s\"", name)
+	}
+
+	// test non-nil allocation with non-matching namespace options
+	alloc.Properties.Namespace = "namespace3"
+	options.SharedNamespaces = []string{"namespace1", "namespace2"}
+	name, err = alloc.determineSharingName(options)
+	if err != nil {
+		t.Fatalf("determineSharingName: expected no error; actual \"%s\"", err)
+	} else if err != nil || name != "unknown" {
+		t.Fatalf("determineSharingName: expected \"unknown\"; actual \"%s\"", name)
+	}
+}
+
+func TestIsFilterEmptyTrue(t *testing.T) {
+	compiler := NewAllocationMatchCompiler(nil)
+	matcher, err := compiler.Compile(nil)
+	if err != nil {
+		t.Fatalf("compiling nil filter: %s", err)
+	}
+
+	result := isFilterEmpty(matcher)
+	if !result {
+		t.Errorf("matcher '%+v' should be reported empty but wasn't", matcher)
+	}
+}
+
+func TestIsFilterEmptyFalse(t *testing.T) {
+	compiler := NewAllocationMatchCompiler(nil)
+	matcher, err := compiler.Compile(ops.Eq(afilter.FieldClusterID, "test"))
+	if err != nil {
+		t.Fatalf("compiling nil filter: %s", err)
+	}
+	result := isFilterEmpty(matcher)
+	if result {
+		t.Errorf("matcher '%+v' should be not be reported empty but was", matcher)
+	}
+}

+ 0 - 534
pkg/kubecost/allocationfilter.go

@@ -1,534 +0,0 @@
-package kubecost
-
-import (
-	"fmt"
-	"sort"
-	"strings"
-
-	"github.com/opencost/opencost/pkg/log"
-)
-
-// FilterField is an enum that represents Allocation-specific fields that can be
-// filtered on (namespace, label, etc.)
-type FilterField string
-
-// If you add a FilterField, MAKE SURE TO UPDATE ALL FILTER IMPLEMENTATIONS! Go
-// does not enforce exhaustive pattern matching on "enum" types.
-const (
-	FilterClusterID      FilterField = "clusterid"
-	FilterNode                       = "node"
-	FilterNamespace                  = "namespace"
-	FilterControllerKind             = "controllerkind"
-	FilterControllerName             = "controllername"
-	FilterPod                        = "pod"
-	FilterContainer                  = "container"
-
-	// Filtering based on label aliases (team, department, etc.) should be a
-	// responsibility of the query handler. By the time it reaches this
-	// structured representation, we shouldn't have to be aware of what is
-	// aliased to what.
-
-	FilterLabel      = "label"
-	FilterAnnotation = "annotation"
-	FilterAlias      = "alias"
-
-	FilterServices = "services"
-)
-
-// FilterOp is an enum that represents operations that can be performed
-// when filtering (equality, inequality, etc.)
-type FilterOp string
-
-// If you add a FilterOp, MAKE SURE TO UPDATE ALL FILTER IMPLEMENTATIONS! Go
-// does not enforce exhaustive pattern matching on "enum" types.
-const (
-	// FilterEquals is the equality operator
-	// "kube-system" FilterEquals "kube-system" = true
-	// "kube-syste" FilterEquals "kube-system" = false
-	FilterEquals FilterOp = "equals"
-
-	// FilterNotEquals is the inequality operator
-	FilterNotEquals = "notequals"
-
-	// FilterContains is an array/slice membership operator
-	// ["a", "b", "c"] FilterContains "a" = true
-	FilterContains = "contains"
-
-	// FilterNotContains is an array/slice non-membership operator
-	// ["a", "b", "c"] FilterNotContains "d" = true
-	FilterNotContains = "notcontains"
-
-	// FilterStartsWith matches strings with the given prefix.
-	// "kube-system" StartsWith "kube" = true
-	//
-	// When comparing with a field represented by an array/slice, this is like
-	// applying FilterContains to every element of the slice.
-	FilterStartsWith = "startswith"
-
-	// FilterContainsPrefix is like FilterContains, but using StartsWith instead
-	// of Equals.
-	// ["kube-system", "abc123"] ContainsPrefix ["kube"] = true
-	FilterContainsPrefix = "containsprefix"
-)
-
-// AllocationFilter represents anything that can be used to filter an
-// Allocation.
-//
-// Implement this interface with caution. While it is generic, it
-// is intended to be introspectable so query handlers can perform various
-// optimizations. These optimizations include:
-// - Routing a query to the most optimal cache
-// - Querying backing data stores efficiently (e.g. translation to SQL)
-//
-// Custom implementations of this interface outside of this package should not
-// expect to receive these benefits. Passing a custom implementation to a
-// handler may in errors.
-type AllocationFilter interface {
-	// Matches is the canonical in-Go function for determining if an Allocation
-	// matches a filter.
-	Matches(a *Allocation) bool
-
-	// Flattened converts a filter into a minimal form, removing unnecessary
-	// intermediate objects, like single-element or zero-element AND and OR
-	// conditions.
-	//
-	// It returns nil if the filter is filtering nothing.
-	//
-	// Example:
-	// (and (or (namespaceequals "kubecost")) (or)) ->
-	// (namespaceequals "kubecost")
-	//
-	// (and (or)) -> nil
-	Flattened() AllocationFilter
-
-	String() string
-
-	// Equals returns true if the two AllocationFilters are logically
-	// equivalent.
-	Equals(AllocationFilter) bool
-}
-
-// AllocationFilterCondition is the lowest-level type of filter. It represents
-// the a filter operation (equality, inequality, etc.) on a field (namespace,
-// label, etc.).
-type AllocationFilterCondition struct {
-	Field FilterField
-	Op    FilterOp
-
-	// Key is for filters that require key-value pairs, like labels or
-	// annotations.
-	//
-	// A filter of 'label[app]:"foo"' has Key="app" and Value="foo"
-	Key string
-
-	// Value is for _all_ filters. A filter of 'namespace:"kubecost"' has
-	// Value="kubecost"
-	Value string
-}
-
-func (afc AllocationFilterCondition) String() string {
-	if afc.Key == "" {
-		return fmt.Sprintf(`(%s %s "%s")`, afc.Op, afc.Field, afc.Value)
-	}
-
-	return fmt.Sprintf(`(%s %s[%s] "%s")`, afc.Op, afc.Field, afc.Key, afc.Value)
-}
-
-// Flattened returns itself because you cannot flatten a base condition further
-func (filter AllocationFilterCondition) Flattened() AllocationFilter {
-
-	return filter
-}
-
-func (left AllocationFilterCondition) Equals(right AllocationFilter) bool {
-	if rightAFC, ok := right.(AllocationFilterCondition); ok {
-		return left == rightAFC
-	}
-	return false
-}
-
-// AllocationFilterOr is a set of filters that should be evaluated as a logical
-// OR.
-type AllocationFilterOr struct {
-	Filters []AllocationFilter
-}
-
-func (af AllocationFilterOr) String() string {
-	s := "(or"
-	for _, f := range af.Filters {
-		s += fmt.Sprintf(" %s", f)
-	}
-
-	s += ")"
-	return s
-}
-
-// flattened returns a new slice of filters after flattening.
-func flattened(filters []AllocationFilter) []AllocationFilter {
-	var flattenedFilters []AllocationFilter
-	for _, innerFilter := range filters {
-		if innerFilter == nil {
-			continue
-		}
-		flattenedInner := innerFilter.Flattened()
-		if flattenedInner != nil {
-			flattenedFilters = append(flattenedFilters, flattenedInner)
-		}
-	}
-
-	return flattenedFilters
-}
-
-// Flattened converts a filter into a minimal form, removing unnecessary
-// intermediate objects
-//
-// Flattened returns:
-// - nil if filter contains no filters
-// - the inner filter if filter contains one filter
-// - an equivalent AllocationFilterOr if filter contains more than one filter
-func (filter AllocationFilterOr) Flattened() AllocationFilter {
-	flattenedFilters := flattened(filter.Filters)
-	if len(flattenedFilters) == 0 {
-		return nil
-	}
-
-	if len(flattenedFilters) == 1 {
-		return flattenedFilters[0]
-	}
-
-	return AllocationFilterOr{Filters: flattenedFilters}
-}
-
-func (filter AllocationFilterOr) sort() {
-	for _, inner := range filter.Filters {
-		if and, ok := inner.(AllocationFilterAnd); ok {
-			and.sort()
-		} else if or, ok := inner.(AllocationFilterOr); ok {
-			or.sort()
-		}
-	}
-
-	// While a slight hack, we can rely on the string serialization of the
-	// inner filters to get a sortable representation.
-	sort.SliceStable(filter.Filters, func(i, j int) bool {
-		return filter.Filters[i].String() < filter.Filters[j].String()
-	})
-}
-
-func (left AllocationFilterOr) Equals(right AllocationFilter) bool {
-	// The type cast takes care of right == nil as well
-	rightOr, ok := right.(AllocationFilterOr)
-	if !ok {
-		return false
-	}
-
-	if len(left.Filters) != len(rightOr.Filters) {
-		return false
-	}
-
-	left.sort()
-	rightOr.sort()
-
-	for i := range left.Filters {
-		if !left.Filters[i].Equals(rightOr.Filters[i]) {
-			return false
-		}
-	}
-	return true
-}
-
-// AllocationFilterOr is a set of filters that should be evaluated as a logical
-// AND.
-type AllocationFilterAnd struct {
-	Filters []AllocationFilter
-}
-
-func (af AllocationFilterAnd) String() string {
-	s := "(and"
-	for _, f := range af.Filters {
-		s += fmt.Sprintf(" %s", f)
-	}
-
-	s += ")"
-	return s
-}
-
-// Flattened converts a filter into a minimal form, removing unnecessary
-// intermediate objects
-//
-// Flattened returns:
-// - nil if filter contains no filters
-// - the inner filter if filter contains one filter
-// - an equivalent AllocationFilterAnd if filter contains more than one filter
-func (filter AllocationFilterAnd) Flattened() AllocationFilter {
-	flattenedFilters := flattened(filter.Filters)
-	if len(flattenedFilters) == 0 {
-		return nil
-	}
-
-	if len(flattenedFilters) == 1 {
-		return flattenedFilters[0]
-	}
-
-	return AllocationFilterAnd{Filters: flattenedFilters}
-}
-
-func (filter AllocationFilterAnd) sort() {
-	for _, inner := range filter.Filters {
-		if and, ok := inner.(AllocationFilterAnd); ok {
-			and.sort()
-		} else if or, ok := inner.(AllocationFilterOr); ok {
-			or.sort()
-		}
-	}
-
-	// While a slight hack, we can rely on the string serialization of the
-	// inner filters.
-	sort.SliceStable(filter.Filters, func(i, j int) bool {
-		return filter.Filters[i].String() < filter.Filters[j].String()
-	})
-}
-
-func (left AllocationFilterAnd) Equals(right AllocationFilter) bool {
-	// The type cast takes care of right == nil as well
-	rightAnd, ok := right.(AllocationFilterAnd)
-	if !ok {
-		return false
-	}
-
-	if len(left.Filters) != len(rightAnd.Filters) {
-		return false
-	}
-
-	left.sort()
-	rightAnd.sort()
-
-	for i := range left.Filters {
-		if !left.Filters[i].Equals(rightAnd.Filters[i]) {
-			return false
-		}
-	}
-	return true
-}
-
-func (filter AllocationFilterCondition) Matches(a *Allocation) bool {
-	if a == nil {
-		return false
-	}
-	if a.Properties == nil {
-		return false
-	}
-
-	// The Allocation's value for the field to compare
-	// We use an interface{} so this can contain the services []string slice
-	var valueToCompare interface{}
-
-	// toCompareMissing will be true if the value to be compared is missing in
-	// the Allocation. For example, if we're filtering based on the value of
-	// the "app" label, but the Allocation doesn't have an "app" label, this
-	// will become true. This lets us deal with != gracefully.
-	toCompareMissing := false
-
-	// This switch maps the filter.Field to the field to be compared in
-	// a.Properties and sets valueToCompare from the value in a.Properties.
-	switch filter.Field {
-	case FilterClusterID:
-		valueToCompare = a.Properties.Cluster
-	case FilterNode:
-		valueToCompare = a.Properties.Node
-	case FilterNamespace:
-		valueToCompare = a.Properties.Namespace
-	case FilterControllerKind:
-		valueToCompare = a.Properties.ControllerKind
-	case FilterControllerName:
-		valueToCompare = a.Properties.Controller
-	case FilterPod:
-		valueToCompare = a.Properties.Pod
-	case FilterContainer:
-		valueToCompare = a.Properties.Container
-	// Comes from GetAnnotation/LabelFilterFunc in KCM
-	case FilterLabel:
-		val, ok := a.Properties.Labels[filter.Key]
-
-		if !ok {
-			toCompareMissing = true
-		} else {
-			valueToCompare = val
-		}
-	case FilterAnnotation:
-		val, ok := a.Properties.Annotations[filter.Key]
-
-		if !ok {
-			toCompareMissing = true
-		} else {
-			valueToCompare = val
-		}
-	case FilterAlias:
-		var ok bool
-		valueToCompare, ok = a.Properties.Labels[filter.Key]
-		if !ok {
-			valueToCompare, ok = a.Properties.Annotations[filter.Key]
-			if !ok {
-				toCompareMissing = true
-			}
-		}
-	case FilterServices:
-		valueToCompare = a.Properties.Services
-	default:
-		log.Errorf("Allocation Filter: Unhandled filter field. This is a filter implementation error and requires immediate patching. Field: %s", filter.Field)
-		return false
-	}
-
-	switch filter.Op {
-	case FilterEquals:
-		// namespace:"__unallocated__" should match a.Properties.Namespace = ""
-		// label[app]:"__unallocated__" should match _, ok := Labels[app]; !ok
-		if toCompareMissing || valueToCompare == "" {
-			return filter.Value == UnallocatedSuffix
-		}
-
-		if valueToCompare == filter.Value {
-			return true
-		}
-	case FilterNotEquals:
-		// namespace!:"__unallocated__" should match
-		// a.Properties.Namespace != ""
-		// label[app]!:"__unallocated__" should match _, ok := Labels[app]; ok
-		if filter.Value == UnallocatedSuffix {
-			if toCompareMissing {
-				return false
-			}
-			return valueToCompare != ""
-		}
-
-		if toCompareMissing {
-			return true
-		}
-
-		if valueToCompare != filter.Value {
-			return true
-		}
-	case FilterContains:
-		if stringSlice, ok := valueToCompare.([]string); ok {
-			if len(stringSlice) == 0 {
-				return filter.Value == UnallocatedSuffix
-			}
-
-			for _, s := range stringSlice {
-				if s == filter.Value {
-					return true
-				}
-			}
-		} else {
-			log.Warnf("Allocation Filter: invalid 'contains' call for non-list filter value")
-		}
-	case FilterNotContains:
-		if stringSlice, ok := valueToCompare.([]string); ok {
-			// services!:"__unallocated__" should match
-			// len(a.Properties.Services) > 0
-			//
-			// TODO: is this true?
-			if filter.Value == UnallocatedSuffix {
-				return len(stringSlice) > 0
-			}
-
-			for _, s := range stringSlice {
-				if s == filter.Value {
-					return false
-				}
-			}
-
-			return true
-		} else {
-			log.Warnf("Allocation Filter: invalid 'notcontains' call for non-list filter value")
-		}
-	case FilterStartsWith:
-		if toCompareMissing {
-			return false
-		}
-
-		// We don't need special __unallocated__ logic here because a query
-		// asking for "__unallocated__" won't have a wildcard and unallocated
-		// properties are the empty string.
-
-		s, ok := valueToCompare.(string)
-		if !ok {
-			log.Warnf("Allocation Filter: invalid 'startswith' call for field with unsupported type")
-			return false
-		}
-		return strings.HasPrefix(s, filter.Value)
-	case FilterContainsPrefix:
-		if toCompareMissing {
-			return false
-		}
-
-		// We don't need special __unallocated__ logic here because a query
-		// asking for "__unallocated__" won't have a wildcard and unallocated
-		// properties are the empty string.
-
-		values, ok := valueToCompare.([]string)
-		if !ok {
-			log.Warnf("Allocation Filter: invalid '%s' call for field with unsupported type", FilterContainsPrefix)
-			return false
-		}
-
-		for _, s := range values {
-			if strings.HasPrefix(s, filter.Value) {
-				return true
-			}
-		}
-
-		return false
-	default:
-		log.Errorf("Allocation Filter: Unhandled filter op. This is a filter implementation error and requires immediate patching. Op: %s", filter.Op)
-		return false
-	}
-
-	return false
-}
-
-func (and AllocationFilterAnd) Matches(a *Allocation) bool {
-	filters := and.Filters
-	if len(filters) == 0 {
-		return true
-	}
-
-	for _, filter := range filters {
-		if !filter.Matches(a) {
-			return false
-		}
-	}
-
-	return true
-}
-
-func (or AllocationFilterOr) Matches(a *Allocation) bool {
-	filters := or.Filters
-	if len(filters) == 0 {
-		return true
-	}
-
-	for _, filter := range filters {
-		if filter.Matches(a) {
-			return true
-		}
-	}
-
-	return false
-}
-
-// AllocationFilterNone is a filter that matches no allocations. This is useful
-// for applications like authorization, where a user/group/role may be disallowed
-// from viewing Allocation data entirely.
-type AllocationFilterNone struct{}
-
-func (afn AllocationFilterNone) String() string { return "(none)" }
-
-func (afn AllocationFilterNone) Flattened() AllocationFilter { return afn }
-
-func (afn AllocationFilterNone) Matches(a *Allocation) bool { return false }
-
-func (left AllocationFilterNone) Equals(right AllocationFilter) bool {
-	_, ok := right.(AllocationFilterNone)
-	return ok
-}

+ 153 - 1148
pkg/kubecost/allocationfilter_test.go

@@ -1,16 +1,27 @@
 package kubecost
 
 import (
-	"fmt"
-	"reflect"
 	"testing"
+
+	filter21 "github.com/opencost/opencost/pkg/filter21"
+	afilter "github.com/opencost/opencost/pkg/filter21/allocation"
+	"github.com/opencost/opencost/pkg/filter21/ast"
+	"github.com/opencost/opencost/pkg/filter21/ops"
 )
 
 func Test_AllocationFilterCondition_Matches(t *testing.T) {
+	labelConfig := &LabelConfig{
+		DepartmentLabel:  "keydepartment",
+		EnvironmentLabel: "keyenvironment",
+		OwnerLabel:       "keyowner",
+		ProductLabel:     "keyproduct",
+		TeamLabel:        "keyteam",
+	}
+
 	cases := []struct {
 		name   string
 		a      *Allocation
-		filter AllocationFilter
+		filter filter21.Filter
 
 		expected bool
 	}{
@@ -21,12 +32,7 @@ func Test_AllocationFilterCondition_Matches(t *testing.T) {
 					Cluster: "cluster-one",
 				},
 			},
-			filter: AllocationFilterCondition{
-				Field: FilterClusterID,
-				Op:    FilterEquals,
-				Value: "cluster-one",
-			},
-
+			filter:   ops.Eq(afilter.FieldClusterID, "cluster-one"),
 			expected: true,
 		},
 		{
@@ -36,12 +42,7 @@ func Test_AllocationFilterCondition_Matches(t *testing.T) {
 					Cluster: "cluster-one",
 				},
 			},
-			filter: AllocationFilterCondition{
-				Field: FilterClusterID,
-				Op:    FilterStartsWith,
-				Value: "cluster",
-			},
-
+			filter:   ops.ContainsPrefix(afilter.FieldClusterID, "cluster"),
 			expected: true,
 		},
 		{
@@ -51,11 +52,7 @@ func Test_AllocationFilterCondition_Matches(t *testing.T) {
 					Cluster: "k8s-one",
 				},
 			},
-			filter: AllocationFilterCondition{
-				Field: FilterClusterID,
-				Op:    FilterStartsWith,
-				Value: "cluster",
-			},
+			filter: ops.ContainsPrefix(afilter.FieldClusterID, "cluster"),
 
 			expected: false,
 		},
@@ -66,12 +63,7 @@ func Test_AllocationFilterCondition_Matches(t *testing.T) {
 					Cluster: "",
 				},
 			},
-			filter: AllocationFilterCondition{
-				Field: FilterClusterID,
-				Op:    FilterStartsWith,
-				Value: "",
-			},
-
+			filter:   ops.ContainsPrefix(afilter.FieldClusterID, ""),
 			expected: true,
 		},
 		{
@@ -81,12 +73,7 @@ func Test_AllocationFilterCondition_Matches(t *testing.T) {
 					Cluster: "abc",
 				},
 			},
-			filter: AllocationFilterCondition{
-				Field: FilterClusterID,
-				Op:    FilterStartsWith,
-				Value: "",
-			},
-
+			filter:   ops.ContainsPrefix(afilter.FieldClusterID, ""),
 			expected: true,
 		},
 		{
@@ -96,12 +83,7 @@ func Test_AllocationFilterCondition_Matches(t *testing.T) {
 					Node: "node123",
 				},
 			},
-			filter: AllocationFilterCondition{
-				Field: FilterNode,
-				Op:    FilterEquals,
-				Value: "node123",
-			},
-
+			filter:   ops.Eq(afilter.FieldNode, "node123"),
 			expected: true,
 		},
 		{
@@ -111,12 +93,7 @@ func Test_AllocationFilterCondition_Matches(t *testing.T) {
 					Namespace: "kube-system",
 				},
 			},
-			filter: AllocationFilterCondition{
-				Field: FilterNamespace,
-				Op:    FilterNotEquals,
-				Value: "kube-system",
-			},
-
+			filter:   ops.NotEq(afilter.FieldNamespace, "kube-system"),
 			expected: false,
 		},
 		{
@@ -126,12 +103,7 @@ func Test_AllocationFilterCondition_Matches(t *testing.T) {
 					Namespace: "kube-system",
 				},
 			},
-			filter: AllocationFilterCondition{
-				Field: FilterNamespace,
-				Op:    FilterNotEquals,
-				Value: UnallocatedSuffix,
-			},
-
+			filter:   ops.NotEq(afilter.FieldNamespace, UnallocatedSuffix),
 			expected: true,
 		},
 		{
@@ -141,12 +113,7 @@ func Test_AllocationFilterCondition_Matches(t *testing.T) {
 					Namespace: "",
 				},
 			},
-			filter: AllocationFilterCondition{
-				Field: FilterNamespace,
-				Op:    FilterNotEquals,
-				Value: UnallocatedSuffix,
-			},
-
+			filter:   ops.NotEq(afilter.FieldNamespace, UnallocatedSuffix),
 			expected: false,
 		},
 		{
@@ -156,12 +123,7 @@ func Test_AllocationFilterCondition_Matches(t *testing.T) {
 					Namespace: "",
 				},
 			},
-			filter: AllocationFilterCondition{
-				Field: FilterNamespace,
-				Op:    FilterEquals,
-				Value: UnallocatedSuffix,
-			},
-
+			filter:   ops.Eq(afilter.FieldNamespace, UnallocatedSuffix),
 			expected: true,
 		},
 		{
@@ -171,12 +133,7 @@ func Test_AllocationFilterCondition_Matches(t *testing.T) {
 					ControllerKind: "deployment", // We generally store controller kinds as all lowercase
 				},
 			},
-			filter: AllocationFilterCondition{
-				Field: FilterControllerKind,
-				Op:    FilterEquals,
-				Value: "deployment",
-			},
-
+			filter:   ops.Eq(afilter.FieldControllerKind, "deployment"),
 			expected: true,
 		},
 		{
@@ -186,12 +143,7 @@ func Test_AllocationFilterCondition_Matches(t *testing.T) {
 					Controller: "kc-cost-analyzer",
 				},
 			},
-			filter: AllocationFilterCondition{
-				Field: FilterControllerName,
-				Op:    FilterEquals,
-				Value: "kc-cost-analyzer",
-			},
-
+			filter:   ops.Eq(afilter.FieldControllerName, "kc-cost-analyzer"),
 			expected: true,
 		},
 		{
@@ -201,12 +153,7 @@ func Test_AllocationFilterCondition_Matches(t *testing.T) {
 					Pod: "pod-123 UID-ABC",
 				},
 			},
-			filter: AllocationFilterCondition{
-				Field: FilterPod,
-				Op:    FilterEquals,
-				Value: "pod-123 UID-ABC",
-			},
-
+			filter:   ops.Eq(afilter.FieldPod, "pod-123 UID-ABC"),
 			expected: true,
 		},
 		{
@@ -216,12 +163,7 @@ func Test_AllocationFilterCondition_Matches(t *testing.T) {
 					Container: "cost-model",
 				},
 			},
-			filter: AllocationFilterCondition{
-				Field: FilterContainer,
-				Op:    FilterEquals,
-				Value: "cost-model",
-			},
-
+			filter:   ops.Eq(afilter.FieldContainer, "cost-model"),
 			expected: true,
 		},
 		{
@@ -233,13 +175,7 @@ func Test_AllocationFilterCondition_Matches(t *testing.T) {
 					},
 				},
 			},
-			filter: AllocationFilterCondition{
-				Field: FilterLabel,
-				Op:    FilterEquals,
-				Key:   "app",
-				Value: "foo",
-			},
-
+			filter:   ops.Eq(ops.WithKey(afilter.FieldLabel, "app"), "foo"),
 			expected: true,
 		},
 		{
@@ -251,13 +187,7 @@ func Test_AllocationFilterCondition_Matches(t *testing.T) {
 					},
 				},
 			},
-			filter: AllocationFilterCondition{
-				Field: FilterLabel,
-				Op:    FilterEquals,
-				Key:   "app",
-				Value: "foo",
-			},
-
+			filter:   ops.Eq(ops.WithKey(afilter.FieldLabel, "app"), "foo"),
 			expected: false,
 		},
 		{
@@ -269,13 +199,7 @@ func Test_AllocationFilterCondition_Matches(t *testing.T) {
 					},
 				},
 			},
-			filter: AllocationFilterCondition{
-				Field: FilterLabel,
-				Op:    FilterEquals,
-				Key:   "app",
-				Value: "foo",
-			},
-
+			filter:   ops.Eq(ops.WithKey(afilter.FieldLabel, "app"), "foo"),
 			expected: false,
 		},
 		{
@@ -287,13 +211,7 @@ func Test_AllocationFilterCondition_Matches(t *testing.T) {
 					},
 				},
 			},
-			filter: AllocationFilterCondition{
-				Field: FilterLabel,
-				Op:    FilterEquals,
-				Key:   "app",
-				Value: UnallocatedSuffix,
-			},
-
+			filter:   ops.Eq(ops.WithKey(afilter.FieldLabel, "app"), UnallocatedSuffix),
 			expected: true,
 		},
 		{
@@ -305,13 +223,7 @@ func Test_AllocationFilterCondition_Matches(t *testing.T) {
 					},
 				},
 			},
-			filter: AllocationFilterCondition{
-				Field: FilterLabel,
-				Op:    FilterEquals,
-				Key:   "app",
-				Value: UnallocatedSuffix,
-			},
-
+			filter:   ops.Eq(ops.WithKey(afilter.FieldLabel, "app"), UnallocatedSuffix),
 			expected: false,
 		},
 		{
@@ -323,13 +235,7 @@ func Test_AllocationFilterCondition_Matches(t *testing.T) {
 					},
 				},
 			},
-			filter: AllocationFilterCondition{
-				Field: FilterLabel,
-				Op:    FilterNotEquals,
-				Key:   "app",
-				Value: UnallocatedSuffix,
-			},
-
+			filter:   ops.NotEq(ops.WithKey(afilter.FieldLabel, "app"), UnallocatedSuffix),
 			expected: false,
 		},
 		{
@@ -341,13 +247,7 @@ func Test_AllocationFilterCondition_Matches(t *testing.T) {
 					},
 				},
 			},
-			filter: AllocationFilterCondition{
-				Field: FilterLabel,
-				Op:    FilterNotEquals,
-				Key:   "app",
-				Value: UnallocatedSuffix,
-			},
-
+			filter:   ops.NotEq(ops.WithKey(afilter.FieldLabel, "app"), UnallocatedSuffix),
 			expected: true,
 		},
 		{
@@ -359,13 +259,7 @@ func Test_AllocationFilterCondition_Matches(t *testing.T) {
 					},
 				},
 			},
-			filter: AllocationFilterCondition{
-				Field: FilterLabel,
-				Op:    FilterNotEquals,
-				Key:   "app",
-				Value: "foo",
-			},
-
+			filter:   ops.NotEq(ops.WithKey(afilter.FieldLabel, "app"), "foo"),
 			expected: true,
 		},
 		{
@@ -377,13 +271,7 @@ func Test_AllocationFilterCondition_Matches(t *testing.T) {
 					},
 				},
 			},
-			filter: AllocationFilterCondition{
-				Field: FilterAnnotation,
-				Op:    FilterEquals,
-				Key:   "prom_modified_name",
-				Value: "testing123",
-			},
-
+			filter:   ops.Eq(ops.WithKey(afilter.FieldAnnotation, "prom_modified_name"), "testing123"),
 			expected: true,
 		},
 		{
@@ -395,13 +283,7 @@ func Test_AllocationFilterCondition_Matches(t *testing.T) {
 					},
 				},
 			},
-			filter: AllocationFilterCondition{
-				Field: FilterAnnotation,
-				Op:    FilterEquals,
-				Key:   "app",
-				Value: "foo",
-			},
-
+			filter:   ops.Eq(ops.WithKey(afilter.FieldAnnotation, "app"), "foo"),
 			expected: false,
 		},
 		{
@@ -413,13 +295,7 @@ func Test_AllocationFilterCondition_Matches(t *testing.T) {
 					},
 				},
 			},
-			filter: AllocationFilterCondition{
-				Field: FilterAnnotation,
-				Op:    FilterEquals,
-				Key:   "app",
-				Value: "foo",
-			},
-
+			filter:   ops.Eq(ops.WithKey(afilter.FieldAnnotation, "app"), "foo"),
 			expected: false,
 		},
 		{
@@ -431,13 +307,7 @@ func Test_AllocationFilterCondition_Matches(t *testing.T) {
 					},
 				},
 			},
-			filter: AllocationFilterCondition{
-				Field: FilterAnnotation,
-				Op:    FilterNotEquals,
-				Key:   "app",
-				Value: "foo",
-			},
-
+			filter:   ops.NotEq(ops.WithKey(afilter.FieldAnnotation, "app"), "foo"),
 			expected: true,
 		},
 		{
@@ -447,12 +317,7 @@ func Test_AllocationFilterCondition_Matches(t *testing.T) {
 					Namespace: "",
 				},
 			},
-			filter: AllocationFilterCondition{
-				Field: FilterNamespace,
-				Op:    FilterEquals,
-				Value: UnallocatedSuffix,
-			},
-
+			filter:   ops.Eq(afilter.FieldNamespace, UnallocatedSuffix),
 			expected: true,
 		},
 		{
@@ -462,12 +327,7 @@ func Test_AllocationFilterCondition_Matches(t *testing.T) {
 					Services: []string{"serv1", "serv2"},
 				},
 			},
-			filter: AllocationFilterCondition{
-				Field: FilterServices,
-				Op:    FilterContains,
-				Value: "serv2",
-			},
-
+			filter:   ops.Contains(afilter.FieldServices, "serv2"),
 			expected: true,
 		},
 		{
@@ -477,12 +337,7 @@ func Test_AllocationFilterCondition_Matches(t *testing.T) {
 					Services: []string{"serv1", "serv2"},
 				},
 			},
-			filter: AllocationFilterCondition{
-				Field: FilterServices,
-				Op:    FilterContains,
-				Value: "serv3",
-			},
-
+			filter:   ops.Contains(afilter.FieldServices, "serv3"),
 			expected: false,
 		},
 		{
@@ -492,12 +347,7 @@ func Test_AllocationFilterCondition_Matches(t *testing.T) {
 					Services: []string{"serv1", "serv2"},
 				},
 			},
-			filter: AllocationFilterCondition{
-				Field: FilterServices,
-				Op:    FilterNotContains,
-				Value: "serv3",
-			},
-
+			filter:   ops.NotContains(afilter.FieldServices, "serv3"),
 			expected: true,
 		},
 		{
@@ -507,12 +357,7 @@ func Test_AllocationFilterCondition_Matches(t *testing.T) {
 					Services: []string{"serv1", "serv2"},
 				},
 			},
-			filter: AllocationFilterCondition{
-				Field: FilterServices,
-				Op:    FilterNotContains,
-				Value: "serv2",
-			},
-
+			filter:   ops.NotContains(afilter.FieldServices, "serv2"),
 			expected: false,
 		},
 		{
@@ -522,12 +367,7 @@ func Test_AllocationFilterCondition_Matches(t *testing.T) {
 					Services: []string{"serv1", "serv2"},
 				},
 			},
-			filter: AllocationFilterCondition{
-				Field: FilterServices,
-				Op:    FilterNotContains,
-				Value: UnallocatedSuffix,
-			},
-
+			filter:   ops.NotContains(afilter.FieldServices, UnallocatedSuffix),
 			expected: true,
 		},
 		{
@@ -537,12 +377,7 @@ func Test_AllocationFilterCondition_Matches(t *testing.T) {
 					Services: []string{},
 				},
 			},
-			filter: AllocationFilterCondition{
-				Field: FilterServices,
-				Op:    FilterNotContains,
-				Value: UnallocatedSuffix,
-			},
-
+			filter:   ops.NotContains(afilter.FieldServices, UnallocatedSuffix),
 			expected: false,
 		},
 		{
@@ -552,12 +387,7 @@ func Test_AllocationFilterCondition_Matches(t *testing.T) {
 					Services: []string{"serv1", "serv2"},
 				},
 			},
-			filter: AllocationFilterCondition{
-				Field: FilterServices,
-				Op:    FilterContainsPrefix,
-				Value: "serv",
-			},
-
+			filter:   ops.ContainsPrefix(afilter.FieldServices, "serv"),
 			expected: true,
 		},
 		{
@@ -567,12 +397,7 @@ func Test_AllocationFilterCondition_Matches(t *testing.T) {
 					Services: []string{"foo", "bar"},
 				},
 			},
-			filter: AllocationFilterCondition{
-				Field: FilterServices,
-				Op:    FilterContainsPrefix,
-				Value: "serv",
-			},
-
+			filter:   ops.ContainsPrefix(afilter.FieldServices, "serv"),
 			expected: false,
 		},
 		{
@@ -582,12 +407,7 @@ func Test_AllocationFilterCondition_Matches(t *testing.T) {
 					Services: []string{"serv1", "serv2"},
 				},
 			},
-			filter: AllocationFilterCondition{
-				Field: FilterServices,
-				Op:    FilterContains,
-				Value: UnallocatedSuffix,
-			},
-
+			filter:   ops.Contains(afilter.FieldServices, UnallocatedSuffix),
 			expected: false,
 		},
 		{
@@ -597,26 +417,66 @@ func Test_AllocationFilterCondition_Matches(t *testing.T) {
 					Services: []string{},
 				},
 			},
-			filter: AllocationFilterCondition{
-				Field: FilterServices,
-				Op:    FilterContains,
-				Value: UnallocatedSuffix,
+			filter:   ops.Contains(afilter.FieldServices, UnallocatedSuffix),
+			expected: true,
+		},
+		{
+			name: `department equals -> true`,
+			a: &Allocation{
+				Properties: &AllocationProperties{
+					Labels: AllocationLabels{
+						"keydepartment": "foo",
+					},
+				},
+			},
+			// The ops package doesn't handle alias construction quite right,
+			// so we construct it more manually here
+			filter: &ast.EqualOp{
+				Left: ast.Identifier{
+					Field: ast.NewAliasField(afilter.AliasDepartment),
+				},
+				Right: "foo",
+			},
+			expected: true,
+		},
+		{
+			name: `product != unallocated -> true`,
+			a: &Allocation{
+				Properties: &AllocationProperties{
+					Annotations: AllocationAnnotations{
+						"keyproduct": "foo",
+					},
+				},
+			},
+			// The ops package doesn't handle alias construction quite right,
+			// so we construct it more manually here
+			filter: &ast.NotOp{
+				Operand: &ast.EqualOp{
+					Left: ast.Identifier{
+						Field: ast.NewAliasField(afilter.AliasDepartment),
+					},
+					Right: UnallocatedSuffix,
+				},
 			},
-
 			expected: true,
 		},
 	}
 
 	for _, c := range cases {
-		result := c.filter.Matches(c.a)
+		compiler := NewAllocationMatchCompiler(labelConfig)
+		compiled, err := compiler.Compile(c.filter)
+		if err != nil {
+			t.Fatalf("err compiling filter '%s': %s", ast.ToPreOrderShortString(c.filter), err)
+		}
 
+		result := compiled.Matches(c.a)
 		if result != c.expected {
 			t.Errorf("%s: expected %t, got %t", c.name, c.expected, result)
 		}
 	}
 }
 
-func Test_AllocationFilterNone_Matches(t *testing.T) {
+func Test_AllocationFilterContradiction_Matches(t *testing.T) {
 	cases := []struct {
 		name string
 		a    *Allocation
@@ -724,18 +584,25 @@ func Test_AllocationFilterNone_Matches(t *testing.T) {
 	}
 
 	for _, c := range cases {
-		result := AllocationFilterNone{}.Matches(c.a)
+		filter := &ast.ContradictionOp{}
+		compiler := NewAllocationMatchCompiler(nil)
+		compiled, err := compiler.Compile(filter)
+		if err != nil {
+			t.Fatalf("err compiling filter '%s': %s", ast.ToPreOrderShortString(filter), err)
+		}
 
+		result := compiled.Matches(c.a)
 		if result {
 			t.Errorf("%s: should have been rejected", c.name)
 		}
 	}
 }
+
 func Test_AllocationFilterAnd_Matches(t *testing.T) {
 	cases := []struct {
 		name   string
 		a      *Allocation
-		filter AllocationFilter
+		filter filter21.Filter
 
 		expected bool
 	}{
@@ -749,19 +616,10 @@ func Test_AllocationFilterAnd_Matches(t *testing.T) {
 					},
 				},
 			},
-			filter: AllocationFilterAnd{[]AllocationFilter{
-				AllocationFilterCondition{
-					Field: FilterLabel,
-					Op:    FilterEquals,
-					Key:   "app",
-					Value: "foo",
-				},
-				AllocationFilterCondition{
-					Field: FilterNamespace,
-					Op:    FilterEquals,
-					Value: "kubecost",
-				},
-			}},
+			filter: ops.And(
+				ops.Eq(ops.WithKey(afilter.FieldLabel, "app"), "foo"),
+				ops.Eq(afilter.FieldNamespace, "kubecost"),
+			),
 			expected: true,
 		},
 		{
@@ -774,19 +632,10 @@ func Test_AllocationFilterAnd_Matches(t *testing.T) {
 					},
 				},
 			},
-			filter: AllocationFilterAnd{[]AllocationFilter{
-				AllocationFilterCondition{
-					Field: FilterLabel,
-					Op:    FilterEquals,
-					Key:   "app",
-					Value: "foo",
-				},
-				AllocationFilterCondition{
-					Field: FilterNamespace,
-					Op:    FilterEquals,
-					Value: "kubecost",
-				},
-			}},
+			filter: ops.And(
+				ops.Eq(ops.WithKey(afilter.FieldLabel, "app"), "foo"),
+				ops.Eq(afilter.FieldNamespace, "kubecost"),
+			),
 			expected: false,
 		},
 		{
@@ -799,19 +648,10 @@ func Test_AllocationFilterAnd_Matches(t *testing.T) {
 					},
 				},
 			},
-			filter: AllocationFilterAnd{[]AllocationFilter{
-				AllocationFilterCondition{
-					Field: FilterLabel,
-					Op:    FilterEquals,
-					Key:   "app",
-					Value: "foo",
-				},
-				AllocationFilterCondition{
-					Field: FilterNamespace,
-					Op:    FilterEquals,
-					Value: "kubecost",
-				},
-			}},
+			filter: ops.And(
+				ops.Eq(ops.WithKey(afilter.FieldLabel, "app"), "foo"),
+				ops.Eq(afilter.FieldNamespace, "kubecost"),
+			),
 			expected: false,
 		},
 		{
@@ -824,23 +664,14 @@ func Test_AllocationFilterAnd_Matches(t *testing.T) {
 					},
 				},
 			},
-			filter: AllocationFilterAnd{[]AllocationFilter{
-				AllocationFilterCondition{
-					Field: FilterLabel,
-					Op:    FilterEquals,
-					Key:   "app",
-					Value: "foo",
-				},
-				AllocationFilterCondition{
-					Field: FilterNamespace,
-					Op:    FilterEquals,
-					Value: "kubecost",
-				},
-			}},
+			filter: ops.And(
+				ops.Eq(ops.WithKey(afilter.FieldLabel, "app"), "foo"),
+				ops.Eq(afilter.FieldNamespace, "kubecost"),
+			),
 			expected: false,
 		},
 		{
-			name: `(and none) matches nothing`,
+			name: `contradiction matches nothing`,
 			a: &Allocation{
 				Properties: &AllocationProperties{
 					Namespace: "kube-system",
@@ -849,16 +680,19 @@ func Test_AllocationFilterAnd_Matches(t *testing.T) {
 					},
 				},
 			},
-			filter: AllocationFilterAnd{[]AllocationFilter{
-				AllocationFilterNone{},
-			}},
+			filter:   &ast.ContradictionOp{},
 			expected: false,
 		},
 	}
 
 	for _, c := range cases {
-		result := c.filter.Matches(c.a)
+		compiler := NewAllocationMatchCompiler(nil)
+		compiled, err := compiler.Compile(c.filter)
+		if err != nil {
+			t.Fatalf("err compiling filter '%s': %s", ast.ToPreOrderShortString(c.filter), err)
+		}
 
+		result := compiled.Matches(c.a)
 		if result != c.expected {
 			t.Errorf("%s: expected %t, got %t", c.name, c.expected, result)
 		}
@@ -869,7 +703,7 @@ func Test_AllocationFilterOr_Matches(t *testing.T) {
 	cases := []struct {
 		name   string
 		a      *Allocation
-		filter AllocationFilter
+		filter filter21.Filter
 
 		expected bool
 	}{
@@ -883,19 +717,10 @@ func Test_AllocationFilterOr_Matches(t *testing.T) {
 					},
 				},
 			},
-			filter: AllocationFilterOr{[]AllocationFilter{
-				AllocationFilterCondition{
-					Field: FilterLabel,
-					Op:    FilterEquals,
-					Key:   "app",
-					Value: "foo",
-				},
-				AllocationFilterCondition{
-					Field: FilterNamespace,
-					Op:    FilterEquals,
-					Value: "kubecost",
-				},
-			}},
+			filter: ops.Or(
+				ops.Eq(ops.WithKey(afilter.FieldLabel, "app"), "foo"),
+				ops.Eq(afilter.FieldNamespace, "kubecost"),
+			),
 			expected: true,
 		},
 		{
@@ -908,19 +733,10 @@ func Test_AllocationFilterOr_Matches(t *testing.T) {
 					},
 				},
 			},
-			filter: AllocationFilterOr{[]AllocationFilter{
-				AllocationFilterCondition{
-					Field: FilterLabel,
-					Op:    FilterEquals,
-					Key:   "app",
-					Value: "foo",
-				},
-				AllocationFilterCondition{
-					Field: FilterNamespace,
-					Op:    FilterEquals,
-					Value: "kubecost",
-				},
-			}},
+			filter: ops.Or(
+				ops.Eq(ops.WithKey(afilter.FieldLabel, "app"), "foo"),
+				ops.Eq(afilter.FieldNamespace, "kubecost"),
+			),
 			expected: true,
 		},
 		{
@@ -933,19 +749,10 @@ func Test_AllocationFilterOr_Matches(t *testing.T) {
 					},
 				},
 			},
-			filter: AllocationFilterOr{[]AllocationFilter{
-				AllocationFilterCondition{
-					Field: FilterLabel,
-					Op:    FilterEquals,
-					Key:   "app",
-					Value: "foo",
-				},
-				AllocationFilterCondition{
-					Field: FilterNamespace,
-					Op:    FilterEquals,
-					Value: "kubecost",
-				},
-			}},
+			filter: ops.Or(
+				ops.Eq(ops.WithKey(afilter.FieldLabel, "app"), "foo"),
+				ops.Eq(afilter.FieldNamespace, "kubecost"),
+			),
 			expected: true,
 		},
 		{
@@ -958,826 +765,24 @@ func Test_AllocationFilterOr_Matches(t *testing.T) {
 					},
 				},
 			},
-			filter: AllocationFilterOr{[]AllocationFilter{
-				AllocationFilterCondition{
-					Field: FilterLabel,
-					Op:    FilterEquals,
-					Key:   "app",
-					Value: "foo",
-				},
-				AllocationFilterCondition{
-					Field: FilterNamespace,
-					Op:    FilterEquals,
-					Value: "kubecost",
-				},
-			}},
+			filter: ops.Or(
+				ops.Eq(ops.WithKey(afilter.FieldLabel, "app"), "foo"),
+				ops.Eq(afilter.FieldNamespace, "kubecost"),
+			),
 			expected: false,
 		},
 	}
 
 	for _, c := range cases {
-		result := c.filter.Matches(c.a)
+		compiler := NewAllocationMatchCompiler(nil)
+		compiled, err := compiler.Compile(c.filter)
+		if err != nil {
+			t.Fatalf("err compiling filter '%s': %s", ast.ToPreOrderShortString(c.filter), err)
+		}
 
+		result := compiled.Matches(c.a)
 		if result != c.expected {
 			t.Errorf("%s: expected %t, got %t", c.name, c.expected, result)
 		}
 	}
 }
-
-func Test_AllocationFilter_Flattened(t *testing.T) {
-	cases := []struct {
-		name string
-
-		input    AllocationFilter
-		expected AllocationFilter
-	}{
-		{
-			name: "AllocationFilterCondition",
-			input: AllocationFilterCondition{
-				Field: FilterNamespace,
-				Op:    FilterEquals,
-			},
-			expected: AllocationFilterCondition{
-				Field: FilterNamespace,
-				Op:    FilterEquals,
-			},
-		},
-		{
-			name:     "empty AllocationFilterAnd (nil)",
-			input:    AllocationFilterAnd{},
-			expected: nil,
-		},
-		{
-			name:     "empty AllocationFilterAnd (len 0)",
-			input:    AllocationFilterAnd{Filters: []AllocationFilter{}},
-			expected: nil,
-		},
-		{
-			name:     "empty AllocationFilterOr (nil)",
-			input:    AllocationFilterOr{},
-			expected: nil,
-		},
-		{
-			name:     "empty AllocationFilterOr (len 0)",
-			input:    AllocationFilterOr{Filters: []AllocationFilter{}},
-			expected: nil,
-		},
-		{
-			name: "single-element AllocationFilterAnd",
-			input: AllocationFilterAnd{Filters: []AllocationFilter{
-				AllocationFilterCondition{
-					Field: FilterNamespace,
-					Op:    FilterEquals,
-				},
-			}},
-
-			expected: AllocationFilterCondition{
-				Field: FilterNamespace,
-				Op:    FilterEquals,
-			},
-		},
-		{
-			name: "single-element AllocationFilterOr",
-			input: AllocationFilterOr{Filters: []AllocationFilter{
-				AllocationFilterCondition{
-					Field: FilterNamespace,
-					Op:    FilterEquals,
-				},
-			}},
-
-			expected: AllocationFilterCondition{
-				Field: FilterNamespace,
-				Op:    FilterEquals,
-			},
-		},
-		{
-			name: "multi-element AllocationFilterAnd",
-			input: AllocationFilterAnd{Filters: []AllocationFilter{
-				AllocationFilterCondition{
-					Field: FilterNamespace,
-					Op:    FilterEquals,
-				},
-				AllocationFilterCondition{
-					Field: FilterClusterID,
-					Op:    FilterNotEquals,
-				},
-				AllocationFilterCondition{
-					Field: FilterServices,
-					Op:    FilterContains,
-				},
-			}},
-
-			expected: AllocationFilterAnd{Filters: []AllocationFilter{
-				AllocationFilterCondition{
-					Field: FilterNamespace,
-					Op:    FilterEquals,
-				},
-				AllocationFilterCondition{
-					Field: FilterClusterID,
-					Op:    FilterNotEquals,
-				},
-				AllocationFilterCondition{
-					Field: FilterServices,
-					Op:    FilterContains,
-				},
-			}},
-		},
-		{
-			name: "multi-element AllocationFilterOr",
-			input: AllocationFilterOr{Filters: []AllocationFilter{
-				AllocationFilterCondition{
-					Field: FilterNamespace,
-					Op:    FilterEquals,
-				},
-				AllocationFilterCondition{
-					Field: FilterClusterID,
-					Op:    FilterNotEquals,
-				},
-				AllocationFilterCondition{
-					Field: FilterServices,
-					Op:    FilterContains,
-				},
-			}},
-
-			expected: AllocationFilterOr{Filters: []AllocationFilter{
-				AllocationFilterCondition{
-					Field: FilterNamespace,
-					Op:    FilterEquals,
-				},
-				AllocationFilterCondition{
-					Field: FilterClusterID,
-					Op:    FilterNotEquals,
-				},
-				AllocationFilterCondition{
-					Field: FilterServices,
-					Op:    FilterContains,
-				},
-			}},
-		},
-		{
-			name:     "AllocationFilterNone",
-			input:    AllocationFilterNone{},
-			expected: AllocationFilterNone{},
-		},
-	}
-
-	for _, c := range cases {
-		t.Run(c.name, func(t *testing.T) {
-			result := c.input.Flattened()
-
-			if !reflect.DeepEqual(result, c.expected) {
-				t.Errorf("Expected: '%s'. Got '%s'.", c.expected, result)
-			}
-		})
-	}
-}
-
-func Test_AllocationFilter_Equals(t *testing.T) {
-	cases := []struct {
-		left     AllocationFilter
-		right    AllocationFilter
-		expected bool
-	}{
-		// AFC
-		{
-			left:     AllocationFilterCondition{},
-			right:    AllocationFilterCondition{},
-			expected: true,
-		},
-		{
-			left: AllocationFilterCondition{
-				Field: FilterNamespace,
-				Op:    FilterStartsWith,
-				Value: "kubecost-abc",
-			},
-			right: AllocationFilterCondition{
-				Field: FilterNamespace,
-				Op:    FilterStartsWith,
-				Value: "kubecost-abc",
-			},
-			expected: true,
-		},
-		{
-			left: AllocationFilterCondition{
-				Field: FilterLabel,
-				Op:    FilterEquals,
-				Key:   "app",
-				Value: "kubecost-abc",
-			},
-			right: AllocationFilterCondition{
-				Field: FilterLabel,
-				Op:    FilterEquals,
-				Key:   "app",
-				Value: "kubecost-abc",
-			},
-			expected: true,
-		},
-		{
-			left: AllocationFilterCondition{
-				Field: FilterLabel,
-				Op:    FilterEquals,
-				Key:   "app",
-				Value: "kubecost-abc",
-			},
-			right: AllocationFilterCondition{
-				Field: FilterLabel,
-				Op:    FilterEquals,
-				Value: "kubecost-abc",
-			},
-			expected: false,
-		},
-		{
-			left: AllocationFilterCondition{
-				Field: FilterLabel,
-				Op:    FilterEquals,
-				Value: "kubecost-abc",
-			},
-			right: AllocationFilterCondition{
-				Field: FilterLabel,
-				Op:    FilterEquals,
-				Key:   "app",
-				Value: "kubecost-abc",
-			},
-			expected: false,
-		},
-		{
-			left: AllocationFilterCondition{
-				Field: FilterNamespace,
-				Op:    FilterStartsWith,
-				Value: "kubecost-abc",
-			},
-			right: AllocationFilterCondition{
-				Field: FilterNamespace,
-				Op:    FilterStartsWith,
-				Value: "kubecost-abcd",
-			},
-			expected: false,
-		},
-		// OR
-		// EMPTY
-		{
-			left:     AllocationFilterOr{},
-			right:    nil,
-			expected: false,
-		},
-		{
-			left:     AllocationFilterOr{Filters: []AllocationFilter{}},
-			right:    nil,
-			expected: false,
-		},
-
-		{
-			left:     AllocationFilterOr{},
-			right:    AllocationFilterOr{},
-			expected: true,
-		},
-		{
-			left:     AllocationFilterOr{},
-			right:    AllocationFilterOr{Filters: []AllocationFilter{}},
-			expected: true,
-		},
-
-		{
-			left:     AllocationFilterOr{Filters: []AllocationFilter{}},
-			right:    AllocationFilterOr{},
-			expected: true,
-		},
-		{
-			left:     AllocationFilterOr{Filters: []AllocationFilter{}},
-			right:    AllocationFilterOr{Filters: []AllocationFilter{}},
-			expected: true,
-		},
-		// FILLED
-		{
-			left: AllocationFilterOr{Filters: []AllocationFilter{
-				AllocationFilterNone{},
-				AllocationFilterCondition{
-					Field: FilterLabel,
-					Op:    FilterStartsWith,
-					Key:   "xyz",
-					Value: "kubecost",
-				},
-				AllocationFilterAnd{
-					Filters: []AllocationFilter{
-						AllocationFilterCondition{
-							Field: FilterNamespace,
-							Op:    FilterEquals,
-							Value: "ns1",
-						},
-						AllocationFilterCondition{
-							Field: FilterNamespace,
-							Op:    FilterEquals,
-							Value: "ns2",
-						},
-					},
-				},
-			}},
-			right: AllocationFilterOr{Filters: []AllocationFilter{
-				AllocationFilterNone{},
-				AllocationFilterCondition{
-					Field: FilterLabel,
-					Op:    FilterStartsWith,
-					Key:   "xyz",
-					Value: "kubecost",
-				},
-				AllocationFilterAnd{
-					Filters: []AllocationFilter{
-						AllocationFilterCondition{
-							Field: FilterNamespace,
-							Op:    FilterEquals,
-							Value: "ns1",
-						},
-						AllocationFilterCondition{
-							Field: FilterNamespace,
-							Op:    FilterEquals,
-							Value: "ns2",
-						},
-					},
-				},
-			}},
-			expected: true,
-		},
-		{
-			left: AllocationFilterOr{Filters: []AllocationFilter{
-				AllocationFilterNone{},
-				AllocationFilterCondition{
-					Field: FilterLabel,
-					Op:    FilterStartsWith,
-					Key:   "xyz",
-					Value: "kubecost",
-				},
-				AllocationFilterAnd{
-					Filters: []AllocationFilter{
-						AllocationFilterCondition{
-							Field: FilterNamespace,
-							Op:    FilterEquals,
-							Value: "ns1",
-						},
-						AllocationFilterCondition{
-							Field: FilterNamespace,
-							Op:    FilterEquals,
-							Value: "ns2",
-						},
-					},
-				},
-			}},
-			right: AllocationFilterOr{Filters: []AllocationFilter{
-				AllocationFilterCondition{
-					Field: FilterLabel,
-					Op:    FilterStartsWith,
-					Key:   "xyz",
-					Value: "kubecost",
-				},
-				AllocationFilterNone{},
-				AllocationFilterAnd{
-					Filters: []AllocationFilter{
-						AllocationFilterCondition{
-							Field: FilterNamespace,
-							Op:    FilterEquals,
-							Value: "ns2",
-						},
-						AllocationFilterCondition{
-							Field: FilterNamespace,
-							Op:    FilterEquals,
-							Value: "ns1",
-						},
-					},
-				},
-			}},
-			expected: true,
-		},
-		{
-			left: AllocationFilterOr{Filters: []AllocationFilter{
-				AllocationFilterNone{},
-				AllocationFilterCondition{
-					Field: FilterLabel,
-					Op:    FilterStartsWith,
-					Key:   "xyz",
-					Value: "kubecost",
-				},
-				AllocationFilterAnd{
-					Filters: []AllocationFilter{
-						AllocationFilterCondition{
-							Field: FilterNamespace,
-							Op:    FilterEquals,
-							Value: "ns1",
-						},
-						AllocationFilterCondition{
-							Field: FilterNamespace,
-							Op:    FilterEquals,
-							Value: "ns2",
-						},
-					},
-				},
-			}},
-			right: AllocationFilterOr{Filters: []AllocationFilter{
-				AllocationFilterAnd{
-					Filters: []AllocationFilter{
-						AllocationFilterCondition{
-							Field: FilterNamespace,
-							Op:    FilterEquals,
-							Value: "ns1",
-						},
-						AllocationFilterCondition{
-							Field: FilterNamespace,
-							Op:    FilterEquals,
-							Value: "ns2",
-						},
-					},
-				},
-				AllocationFilterNone{},
-				AllocationFilterCondition{
-					Field: FilterLabel,
-					Op:    FilterStartsWith,
-					Key:   "xyz",
-					Value: "kubecost",
-				},
-			}},
-			expected: true,
-		},
-		{
-			left: AllocationFilterOr{Filters: []AllocationFilter{
-				AllocationFilterNone{},
-				AllocationFilterCondition{
-					Field: FilterLabel,
-					Op:    FilterStartsWith,
-					Key:   "xyz",
-					Value: "kubecost",
-				},
-				AllocationFilterAnd{
-					Filters: []AllocationFilter{
-						AllocationFilterCondition{
-							Field: FilterNamespace,
-							Op:    FilterEquals,
-							Value: "ns1",
-						},
-						AllocationFilterCondition{
-							Field: FilterNamespace,
-							Op:    FilterEquals,
-							Value: "ns2",
-						},
-					},
-				},
-			}},
-			right: AllocationFilterOr{Filters: []AllocationFilter{
-				AllocationFilterNone{},
-				AllocationFilterCondition{
-					Field: FilterLabel,
-					Op:    FilterStartsWith,
-					Key:   "xyz",
-					Value: "kubecost",
-				},
-				AllocationFilterAnd{
-					Filters: []AllocationFilter{
-						AllocationFilterCondition{
-							Field: FilterNamespace,
-							Op:    FilterEquals,
-							Value: "ns3",
-						},
-						AllocationFilterCondition{
-							Field: FilterNamespace,
-							Op:    FilterEquals,
-							Value: "ns2",
-						},
-					},
-				},
-			}},
-			expected: false,
-		},
-		{
-			left: AllocationFilterOr{Filters: []AllocationFilter{
-				AllocationFilterNone{},
-				AllocationFilterCondition{
-					Field: FilterLabel,
-					Op:    FilterStartsWith,
-					Key:   "xyz",
-					Value: "kubecost",
-				},
-				AllocationFilterAnd{
-					Filters: []AllocationFilter{
-						AllocationFilterCondition{
-							Field: FilterNamespace,
-							Op:    FilterEquals,
-							Value: "ns1",
-						},
-						AllocationFilterCondition{
-							Field: FilterNamespace,
-							Op:    FilterEquals,
-							Value: "ns2",
-						},
-					},
-				},
-			}},
-			right: AllocationFilterOr{Filters: []AllocationFilter{
-				AllocationFilterCondition{
-					Field: FilterLabel,
-					Op:    FilterStartsWith,
-					Key:   "xyz",
-					Value: "kubecost",
-				},
-				AllocationFilterAnd{
-					Filters: []AllocationFilter{
-						AllocationFilterCondition{
-							Field: FilterNamespace,
-							Op:    FilterEquals,
-							Value: "ns1",
-						},
-						AllocationFilterCondition{
-							Field: FilterNamespace,
-							Op:    FilterEquals,
-							Value: "ns2",
-						},
-					},
-				},
-			}},
-			expected: false,
-		},
-		// AND
-		// EMPTY
-		{
-			left:     AllocationFilterAnd{},
-			right:    nil,
-			expected: false,
-		},
-		{
-			left:     AllocationFilterAnd{Filters: []AllocationFilter{}},
-			right:    nil,
-			expected: false,
-		},
-
-		{
-			left:     AllocationFilterAnd{},
-			right:    AllocationFilterAnd{},
-			expected: true,
-		},
-		{
-			left:     AllocationFilterAnd{},
-			right:    AllocationFilterAnd{Filters: []AllocationFilter{}},
-			expected: true,
-		},
-
-		{
-			left:     AllocationFilterAnd{Filters: []AllocationFilter{}},
-			right:    AllocationFilterAnd{},
-			expected: true,
-		},
-		{
-			left:     AllocationFilterAnd{Filters: []AllocationFilter{}},
-			right:    AllocationFilterAnd{Filters: []AllocationFilter{}},
-			expected: true,
-		},
-		// FILLED
-		{
-			left: AllocationFilterAnd{Filters: []AllocationFilter{
-				AllocationFilterNone{},
-				AllocationFilterCondition{
-					Field: FilterLabel,
-					Op:    FilterStartsWith,
-					Key:   "xyz",
-					Value: "kubecost",
-				},
-				AllocationFilterOr{
-					Filters: []AllocationFilter{
-						AllocationFilterCondition{
-							Field: FilterNamespace,
-							Op:    FilterEquals,
-							Value: "ns1",
-						},
-						AllocationFilterCondition{
-							Field: FilterNamespace,
-							Op:    FilterEquals,
-							Value: "ns2",
-						},
-					},
-				},
-			}},
-			right: AllocationFilterAnd{Filters: []AllocationFilter{
-				AllocationFilterNone{},
-				AllocationFilterCondition{
-					Field: FilterLabel,
-					Op:    FilterStartsWith,
-					Key:   "xyz",
-					Value: "kubecost",
-				},
-				AllocationFilterOr{
-					Filters: []AllocationFilter{
-						AllocationFilterCondition{
-							Field: FilterNamespace,
-							Op:    FilterEquals,
-							Value: "ns1",
-						},
-						AllocationFilterCondition{
-							Field: FilterNamespace,
-							Op:    FilterEquals,
-							Value: "ns2",
-						},
-					},
-				},
-			}},
-			expected: true,
-		},
-		{
-			left: AllocationFilterAnd{Filters: []AllocationFilter{
-				AllocationFilterNone{},
-				AllocationFilterCondition{
-					Field: FilterLabel,
-					Op:    FilterStartsWith,
-					Key:   "xyz",
-					Value: "kubecost",
-				},
-				AllocationFilterOr{
-					Filters: []AllocationFilter{
-						AllocationFilterCondition{
-							Field: FilterNamespace,
-							Op:    FilterEquals,
-							Value: "ns1",
-						},
-						AllocationFilterCondition{
-							Field: FilterNamespace,
-							Op:    FilterEquals,
-							Value: "ns2",
-						},
-					},
-				},
-			}},
-			right: AllocationFilterAnd{Filters: []AllocationFilter{
-				AllocationFilterCondition{
-					Field: FilterLabel,
-					Op:    FilterStartsWith,
-					Key:   "xyz",
-					Value: "kubecost",
-				},
-				AllocationFilterNone{},
-				AllocationFilterOr{
-					Filters: []AllocationFilter{
-						AllocationFilterCondition{
-							Field: FilterNamespace,
-							Op:    FilterEquals,
-							Value: "ns2",
-						},
-						AllocationFilterCondition{
-							Field: FilterNamespace,
-							Op:    FilterEquals,
-							Value: "ns1",
-						},
-					},
-				},
-			}},
-			expected: true,
-		},
-		{
-			left: AllocationFilterAnd{Filters: []AllocationFilter{
-				AllocationFilterNone{},
-				AllocationFilterCondition{
-					Field: FilterLabel,
-					Op:    FilterStartsWith,
-					Key:   "xyz",
-					Value: "kubecost",
-				},
-				AllocationFilterOr{
-					Filters: []AllocationFilter{
-						AllocationFilterCondition{
-							Field: FilterNamespace,
-							Op:    FilterEquals,
-							Value: "ns1",
-						},
-						AllocationFilterCondition{
-							Field: FilterNamespace,
-							Op:    FilterEquals,
-							Value: "ns2",
-						},
-					},
-				},
-			}},
-			right: AllocationFilterAnd{Filters: []AllocationFilter{
-				AllocationFilterOr{
-					Filters: []AllocationFilter{
-						AllocationFilterCondition{
-							Field: FilterNamespace,
-							Op:    FilterEquals,
-							Value: "ns1",
-						},
-						AllocationFilterCondition{
-							Field: FilterNamespace,
-							Op:    FilterEquals,
-							Value: "ns2",
-						},
-					},
-				},
-				AllocationFilterNone{},
-				AllocationFilterCondition{
-					Field: FilterLabel,
-					Op:    FilterStartsWith,
-					Key:   "xyz",
-					Value: "kubecost",
-				},
-			}},
-			expected: true,
-		},
-		{
-			left: AllocationFilterAnd{Filters: []AllocationFilter{
-				AllocationFilterNone{},
-				AllocationFilterCondition{
-					Field: FilterLabel,
-					Op:    FilterStartsWith,
-					Key:   "xyz",
-					Value: "kubecost",
-				},
-				AllocationFilterOr{
-					Filters: []AllocationFilter{
-						AllocationFilterCondition{
-							Field: FilterNamespace,
-							Op:    FilterEquals,
-							Value: "ns1",
-						},
-						AllocationFilterCondition{
-							Field: FilterNamespace,
-							Op:    FilterEquals,
-							Value: "ns2",
-						},
-					},
-				},
-			}},
-			right: AllocationFilterAnd{Filters: []AllocationFilter{
-				AllocationFilterNone{},
-				AllocationFilterCondition{
-					Field: FilterLabel,
-					Op:    FilterStartsWith,
-					Key:   "xyz",
-					Value: "kubecost",
-				},
-				AllocationFilterOr{
-					Filters: []AllocationFilter{
-						AllocationFilterCondition{
-							Field: FilterNamespace,
-							Op:    FilterEquals,
-							Value: "ns3",
-						},
-						AllocationFilterCondition{
-							Field: FilterNamespace,
-							Op:    FilterEquals,
-							Value: "ns2",
-						},
-					},
-				},
-			}},
-			expected: false,
-		},
-		{
-			left: AllocationFilterAnd{Filters: []AllocationFilter{
-				AllocationFilterNone{},
-				AllocationFilterCondition{
-					Field: FilterLabel,
-					Op:    FilterStartsWith,
-					Key:   "xyz",
-					Value: "kubecost",
-				},
-				AllocationFilterOr{
-					Filters: []AllocationFilter{
-						AllocationFilterCondition{
-							Field: FilterNamespace,
-							Op:    FilterEquals,
-							Value: "ns1",
-						},
-						AllocationFilterCondition{
-							Field: FilterNamespace,
-							Op:    FilterEquals,
-							Value: "ns2",
-						},
-					},
-				},
-			}},
-			right: AllocationFilterAnd{Filters: []AllocationFilter{
-				AllocationFilterCondition{
-					Field: FilterLabel,
-					Op:    FilterStartsWith,
-					Key:   "xyz",
-					Value: "kubecost",
-				},
-				AllocationFilterOr{
-					Filters: []AllocationFilter{
-						AllocationFilterCondition{
-							Field: FilterNamespace,
-							Op:    FilterEquals,
-							Value: "ns1",
-						},
-						AllocationFilterCondition{
-							Field: FilterNamespace,
-							Op:    FilterEquals,
-							Value: "ns2",
-						},
-					},
-				},
-			}},
-			expected: false,
-		},
-	}
-
-	for _, c := range cases {
-		t.Run(fmt.Sprintf("'%s' = '%s'", c.left, c.right), func(t *testing.T) {
-			if c.left.Equals(c.right) != c.expected {
-				t.Fatalf("Expected: %t", c.expected)
-			}
-		})
-	}
-}

+ 260 - 0
pkg/kubecost/allocationmatcher.go

@@ -0,0 +1,260 @@
+package kubecost
+
+import (
+	"fmt"
+
+	afilter "github.com/opencost/opencost/pkg/filter21/allocation"
+	"github.com/opencost/opencost/pkg/filter21/ast"
+	"github.com/opencost/opencost/pkg/filter21/matcher"
+	"github.com/opencost/opencost/pkg/filter21/ops"
+	"github.com/opencost/opencost/pkg/filter21/transform"
+)
+
+// AllocationMatcher is a matcher implementation for Allocation instances,
+// compiled using the matcher.MatchCompiler for allocations.
+type AllocationMatcher matcher.Matcher[*Allocation]
+
+// NewAllocationMatchCompiler creates a new instance of a
+// matcher.MatchCompiler[*Allocation] which can be used to compile filter.Filter
+// ASTs into matcher.Matcher[*Allocation] implementations.
+//
+// If the label config is nil, the compiler will fail to compile alias filters
+// if any are present in the AST.
+//
+// If storage interfaces every support querying natively by alias (e.g. if a
+// data store contained a "product" attribute on an Allocation row), that should
+// be handled by a purpose-built AST compiler.
+func NewAllocationMatchCompiler(labelConfig *LabelConfig) *matcher.MatchCompiler[*Allocation] {
+	passes := []transform.CompilerPass{}
+
+	// The label config pass should be the first pass
+	if labelConfig != nil {
+		passes = append(passes, NewAllocationAliasPass(*labelConfig))
+	}
+
+	passes = append(passes,
+		transform.PrometheusKeySanitizePass(),
+		transform.UnallocatedReplacementPass(),
+	)
+	return matcher.NewMatchCompiler(
+		allocationFieldMap,
+		allocationSliceFieldMap,
+		allocationMapFieldMap,
+		passes...,
+	)
+}
+
+// Maps fields from an allocation to a string value based on an identifier
+func allocationFieldMap(a *Allocation, identifier ast.Identifier) (string, error) {
+	if a == nil {
+		return "", fmt.Errorf("cannot map to nil allocation")
+	}
+	if a.Properties == nil {
+		return "", fmt.Errorf("cannot map to nil properties")
+	}
+	if identifier.Field == nil {
+		return "", fmt.Errorf("cannot map field from identifier with nil field")
+	}
+	switch afilter.AllocationField(identifier.Field.Name) {
+	case afilter.FieldNamespace:
+		return a.Properties.Namespace, nil
+	case afilter.FieldNode:
+		return a.Properties.Node, nil
+	case afilter.FieldClusterID:
+		return a.Properties.Cluster, nil
+	case afilter.FieldControllerName:
+		return a.Properties.Controller, nil
+	case afilter.FieldControllerKind:
+		return a.Properties.ControllerKind, nil
+	case afilter.FieldPod:
+		return a.Properties.Pod, nil
+	case afilter.FieldContainer:
+		return a.Properties.Container, nil
+	case afilter.FieldProvider:
+		return a.Properties.ProviderID, nil
+	case afilter.FieldLabel:
+		return a.Properties.Labels[identifier.Key], nil
+	case afilter.FieldAnnotation:
+		return a.Properties.Annotations[identifier.Key], nil
+	}
+
+	return "", fmt.Errorf("Failed to find string identifier on Allocation: %s", identifier.Field.Name)
+}
+
+// Maps slice fields from an allocation to a []string value based on an identifier
+func allocationSliceFieldMap(a *Allocation, identifier ast.Identifier) ([]string, error) {
+	switch afilter.AllocationField(identifier.Field.Name) {
+	case afilter.FieldServices:
+		return a.Properties.Services, nil
+	}
+
+	return nil, fmt.Errorf("Failed to find []string identifier on Allocation: %s", identifier.Field.Name)
+}
+
+// Maps map fields from an allocation to a map[string]string value based on an identifier
+func allocationMapFieldMap(a *Allocation, identifier ast.Identifier) (map[string]string, error) {
+	switch afilter.AllocationField(identifier.Field.Name) {
+	case afilter.FieldLabel:
+		return a.Properties.Labels, nil
+	case afilter.FieldAnnotation:
+		return a.Properties.Annotations, nil
+	}
+	return nil, fmt.Errorf("Failed to find map[string]string identifier on Allocation: %s", identifier.Field.Name)
+}
+
+// allocatioAliasPass implements the transform.CompilerPass interface, providing
+// a pass which converts alias nodes to logically-equivalent label/annotation
+// filter nodes based on the label config.
+type allocationAliasPass struct {
+	Config              LabelConfig
+	AliasNameToAliasKey map[afilter.AllocationAlias]string
+}
+
+// NewAliasPass creates a compiler pass that converts alias nodes to
+// logically-equivalent label/annotation nodes based on the label config.
+//
+// Due to the special alias logic that combines label and annotation behavior
+// when filtering on alias, an alias filter is logically equivalent to the
+// following expression:
+//
+// (or
+//
+//	(and (contains labels <parseraliaskey>)
+//	     (<op> labels[<parseraliaskey>] <filtervalue>))
+//	(and (not (contains labels <parseraliaskey>))
+//	     (and (contains annotations departmentkey)
+//	          (<op> annotations[<parseraliaskey>] <filtervalue>))))
+func NewAllocationAliasPass(config LabelConfig) transform.CompilerPass {
+	aliasNameToAliasKey := map[afilter.AllocationAlias]string{
+		afilter.AliasDepartment:  config.DepartmentLabel,
+		afilter.AliasEnvironment: config.EnvironmentLabel,
+		afilter.AliasOwner:       config.OwnerLabel,
+		afilter.AliasProduct:     config.ProductLabel,
+		afilter.AliasTeam:        config.TeamLabel,
+	}
+
+	return &allocationAliasPass{
+		Config:              config,
+		AliasNameToAliasKey: aliasNameToAliasKey,
+	}
+}
+
+// Exec implements the transform.CompilerPass interface for an alias pass.
+// See aliasPass struct documentation for an explanation.
+func (p *allocationAliasPass) Exec(filter ast.FilterNode) (ast.FilterNode, error) {
+	if p.AliasNameToAliasKey == nil {
+		return nil, fmt.Errorf("cannot perform alias conversion with nil mapping of alias name -> key")
+	}
+
+	var transformErr error
+	leafTransformerFunc := func(node ast.FilterNode) ast.FilterNode {
+		if transformErr != nil {
+			return node
+		}
+
+		var field *ast.Field
+		var filterValue string
+		var filterOp ast.FilterOp
+
+		switch concrete := node.(type) {
+		// These ops are not alias ops, alias ops can only be base-level ops
+		// like =, !=, etc. No modification required here.
+		case *ast.AndOp, *ast.OrOp, *ast.NotOp, *ast.VoidOp, *ast.ContradictionOp:
+			return node
+
+		case *ast.EqualOp:
+			field = concrete.Left.Field
+			filterValue = concrete.Right
+			filterOp = ast.FilterOpEquals
+		case *ast.ContainsOp:
+			field = concrete.Left.Field
+			filterValue = concrete.Right
+			filterOp = ast.FilterOpContains
+		case *ast.ContainsPrefixOp:
+			field = concrete.Left.Field
+			filterValue = concrete.Right
+			filterOp = ast.FilterOpContainsPrefix
+		case *ast.ContainsSuffixOp:
+			field = concrete.Left.Field
+			filterValue = concrete.Right
+			filterOp = ast.FilterOpContainsSuffix
+		default:
+			transformErr = fmt.Errorf("unknown op '%s' during alias pass", concrete.Op())
+			return node
+		}
+
+		if field == nil {
+			return node
+		}
+		if !field.IsAlias() {
+			return node
+		}
+
+		filterFieldAlias := afilter.AllocationAlias(field.Name)
+		parserAliasKey, ok := p.AliasNameToAliasKey[filterFieldAlias]
+		if !ok {
+			transformErr = fmt.Errorf("unknown alias field '%s'", filterFieldAlias)
+			return node
+		}
+
+		newFilter, err := convertAliasFilterToLabelAnnotationFilter(parserAliasKey, filterValue, filterOp)
+		if err != nil {
+			transformErr = fmt.Errorf("performing alias conversion for node '%+v': %w", node, err)
+			return node
+		}
+
+		return newFilter
+	}
+
+	newFilter := ast.TransformLeaves(filter, leafTransformerFunc)
+
+	if transformErr != nil {
+		return nil, fmt.Errorf("alias pass transform: %w", transformErr)
+	}
+
+	return newFilter, nil
+}
+
+// convertAliasFilterToLabelAnnotationFilter constructs a new filter node using
+// only operations on labels and annotations that is logically equivalent to an
+// alias node from relevant data extracted from the original alias node.
+func convertAliasFilterToLabelAnnotationFilter(aliasKey string, filterValue string, op ast.FilterOp) (ast.FilterNode, error) {
+	labelKey := ops.WithKey(afilter.FieldLabel, aliasKey)
+	annotationKey := ops.WithKey(afilter.FieldAnnotation, aliasKey)
+
+	var labelOp ast.FilterNode
+	var annotationOp ast.FilterNode
+
+	// This should only need to implement conversion for base-level ops like
+	// equals, contains, etc.
+	switch op {
+	case ast.FilterOpEquals:
+		labelOp = ops.Eq(labelKey, filterValue)
+		annotationOp = ops.Eq(annotationKey, filterValue)
+	case ast.FilterOpContains:
+		labelOp = ops.Contains(labelKey, filterValue)
+		annotationOp = ops.Contains(annotationKey, filterValue)
+	case ast.FilterOpContainsPrefix:
+		labelOp = ops.ContainsPrefix(labelKey, filterValue)
+		annotationOp = ops.ContainsPrefix(annotationKey, filterValue)
+	case ast.FilterOpContainsSuffix:
+		labelOp = ops.ContainsSuffix(labelKey, filterValue)
+		annotationOp = ops.ContainsSuffix(annotationKey, filterValue)
+	default:
+		return nil, fmt.Errorf("unsupported op type '%s' for alias conversion", op)
+	}
+
+	return ops.Or(
+		ops.And(
+			ops.Contains(afilter.FieldLabel, aliasKey),
+			labelOp,
+		),
+		ops.And(
+			ops.Not(ops.Contains(afilter.FieldLabel, aliasKey)),
+			ops.And(
+				ops.Contains(afilter.FieldAnnotation, aliasKey),
+				annotationOp,
+			),
+		),
+	), nil
+}

+ 64 - 0
pkg/kubecost/allocationmatcher_test.go

@@ -0,0 +1,64 @@
+package kubecost
+
+import (
+	"testing"
+
+	"github.com/google/go-cmp/cmp"
+	afilter "github.com/opencost/opencost/pkg/filter21/allocation"
+	"github.com/opencost/opencost/pkg/filter21/ast"
+	"github.com/opencost/opencost/pkg/filter21/ops"
+)
+
+func TestAliasPass(t *testing.T) {
+	labelConfig := &LabelConfig{
+		DepartmentLabel:  "keydepartment",
+		EnvironmentLabel: "keyenvironment",
+		OwnerLabel:       "keyowner",
+		ProductLabel:     "keyproduct",
+		TeamLabel:        "keyteam",
+	}
+
+	cases := []struct {
+		name     string
+		input    ast.FilterNode
+		expected ast.FilterNode
+	}{
+		{
+			name: "department equal",
+			input: &ast.EqualOp{
+				Left: ast.Identifier{
+					Field: ast.NewAliasField(afilter.AliasDepartment),
+				},
+				Right: "x",
+			},
+			expected: ops.Or(
+				ops.And(
+					ops.Contains(afilter.FieldLabel, "keydepartment"),
+					ops.Eq(ops.WithKey(afilter.FieldLabel, "keydepartment"), "x"),
+				),
+				ops.And(
+					ops.Not(ops.Contains(afilter.FieldLabel, "keydepartment")),
+					ops.And(
+						ops.Contains(afilter.FieldAnnotation, "keydepartment"),
+						ops.Eq(ops.WithKey(afilter.FieldAnnotation, "keydepartment"), "x"),
+					),
+				),
+			),
+		},
+	}
+
+	for _, c := range cases {
+		pass := NewAllocationAliasPass(*labelConfig)
+
+		t.Run(c.name, func(t *testing.T) {
+			result, err := pass.Exec(c.input)
+			if err != nil {
+				t.Fatalf("unexpected error: %s", err)
+			}
+
+			if diff := cmp.Diff(c.expected, result); len(diff) > 0 {
+				t.Errorf("diff: %s", diff)
+			}
+		})
+	}
+}

+ 89 - 11
pkg/kubecost/allocationprops.go

@@ -92,17 +92,19 @@ func ParseProperty(text string) (string, error) {
 
 // AllocationProperties describes a set of Kubernetes objects.
 type AllocationProperties struct {
-	Cluster        string                `json:"cluster,omitempty"`
-	Node           string                `json:"node,omitempty"`
-	Container      string                `json:"container,omitempty"`
-	Controller     string                `json:"controller,omitempty"`
-	ControllerKind string                `json:"controllerKind,omitempty"`
-	Namespace      string                `json:"namespace,omitempty"`
-	Pod            string                `json:"pod,omitempty"`
-	Services       []string              `json:"services,omitempty"`
-	ProviderID     string                `json:"providerID,omitempty"`
-	Labels         AllocationLabels      `json:"labels,omitempty"`
-	Annotations    AllocationAnnotations `json:"annotations,omitempty"`
+	Cluster              string                `json:"cluster,omitempty"`
+	Node                 string                `json:"node,omitempty"`
+	Container            string                `json:"container,omitempty"`
+	Controller           string                `json:"controller,omitempty"`
+	ControllerKind       string                `json:"controllerKind,omitempty"`
+	Namespace            string                `json:"namespace,omitempty"`
+	Pod                  string                `json:"pod,omitempty"`
+	Services             []string              `json:"services,omitempty"`
+	ProviderID           string                `json:"providerID,omitempty"`
+	Labels               AllocationLabels      `json:"labels,omitempty"`
+	Annotations          AllocationAnnotations `json:"annotations,omitempty"`
+	NamespaceLabels      AllocationLabels      `json:"namespaceLabels,omitempty"`      // @bingen:field[version=17]
+	NamespaceAnnotations AllocationAnnotations `json:"namespaceAnnotations,omitempty"` // @bingen:field[version=17]
 	// When set to true, maintain the intersection of all labels + annotations
 	// in the aggregated AllocationProperties object
 	AggregatedMetadata bool `json:"-"` //@bingen:field[ignore]
@@ -141,12 +143,25 @@ func (p *AllocationProperties) Clone() *AllocationProperties {
 	}
 	clone.Labels = labels
 
+	nsLabels := make(map[string]string, len(p.NamespaceLabels))
+	for k, v := range p.NamespaceLabels {
+		nsLabels[k] = v
+	}
+	clone.NamespaceLabels = nsLabels
+
 	annotations := make(map[string]string, len(p.Annotations))
 	for k, v := range p.Annotations {
 		annotations[k] = v
 	}
 	clone.Annotations = annotations
 
+	nsAnnotations := make(map[string]string, len(p.NamespaceAnnotations))
+	for k, v := range p.NamespaceAnnotations {
+		nsAnnotations[k] = v
+	}
+	clone.NamespaceAnnotations = nsAnnotations
+
+	clone.AggregatedMetadata = p.AggregatedMetadata
 	return clone
 }
 
@@ -200,6 +215,19 @@ func (p *AllocationProperties) Equal(that *AllocationProperties) bool {
 		return false
 	}
 
+	pNamespaceLabels := p.NamespaceLabels
+	thatNamespaceLabels := that.NamespaceLabels
+	if len(pNamespaceLabels) == len(thatNamespaceLabels) {
+		for k, pv := range pNamespaceLabels {
+			tv, ok := thatNamespaceLabels[k]
+			if !ok || tv != pv {
+				return false
+			}
+		}
+	} else {
+		return false
+	}
+
 	pAnnotations := p.Annotations
 	thatAnnotations := that.Annotations
 	if len(pAnnotations) == len(thatAnnotations) {
@@ -213,6 +241,19 @@ func (p *AllocationProperties) Equal(that *AllocationProperties) bool {
 		return false
 	}
 
+	pNamespaceAnnotations := p.NamespaceAnnotations
+	thatNamespaceAnnotations := that.NamespaceAnnotations
+	if len(pNamespaceAnnotations) == len(thatNamespaceAnnotations) {
+		for k, pv := range pNamespaceAnnotations {
+			tv, ok := thatNamespaceAnnotations[k]
+			if !ok || tv != pv {
+				return false
+			}
+		}
+	} else {
+		return false
+	}
+
 	pServices := p.Services
 	thatServices := that.Services
 	if len(pServices) == len(thatServices) {
@@ -452,6 +493,20 @@ func (p *AllocationProperties) Intersection(that *AllocationProperties) *Allocat
 	if p.Namespace == that.Namespace {
 
 		intersectionProps.Namespace = p.Namespace
+
+		// CORE-140: In the case that the namespace is the same, also copy over the namespaceLabels and annotations
+		// Note - assume that if the namespace is the same on both, then namespace label/annotation sets
+		// will be the same, so just carry one set over
+		if p.Container == UnmountedSuffix {
+			// This logic is designed to effectively ignore the unmounted/unallocated objects
+			// and just copy over the labels from the other, 'legitimate' allocation.
+			intersectionProps.NamespaceLabels = copyStringMap(that.NamespaceLabels)
+			intersectionProps.NamespaceAnnotations = copyStringMap(that.NamespaceAnnotations)
+		} else {
+			intersectionProps.NamespaceLabels = copyStringMap(p.NamespaceLabels)
+			intersectionProps.NamespaceAnnotations = copyStringMap(p.NamespaceAnnotations)
+		}
+
 		// ignore the incoming labels from unallocated or unmounted special case pods
 		if p.AggregatedMetadata || that.AggregatedMetadata {
 			intersectionProps.AggregatedMetadata = true
@@ -476,15 +531,26 @@ func (p *AllocationProperties) Intersection(that *AllocationProperties) *Allocat
 			}
 		}
 	}
+
 	if p.Pod == that.Pod {
 		intersectionProps.Pod = p.Pod
 	}
 	if p.ProviderID == that.ProviderID {
 		intersectionProps.ProviderID = p.ProviderID
 	}
+
 	return intersectionProps
 }
 
+func copyStringMap(original map[string]string) map[string]string {
+	copy := make(map[string]string)
+	for key, value := range original {
+		copy[key] = value
+	}
+
+	return copy
+}
+
 func mapIntersection(map1, map2 map[string]string) map[string]string {
 	result := make(map[string]string)
 	for key, value := range map1 {
@@ -548,11 +614,23 @@ func (p *AllocationProperties) String() string {
 	}
 	strs = append(strs, fmt.Sprintf("Labels:{%s}", strings.Join(labelStrs, ",")))
 
+	var nsLabelStrs []string
+	for k, prop := range p.NamespaceLabels {
+		nsLabelStrs = append(nsLabelStrs, fmt.Sprintf("%s:%s", k, prop))
+	}
+	strs = append(strs, fmt.Sprintf("NamespaceLabels:{%s}", strings.Join(nsLabelStrs, ",")))
+
 	var annotationStrs []string
 	for k, prop := range p.Annotations {
 		annotationStrs = append(annotationStrs, fmt.Sprintf("%s:%s", k, prop))
 	}
 	strs = append(strs, fmt.Sprintf("Annotations:{%s}", strings.Join(annotationStrs, ",")))
 
+	var nsAnnotationStrs []string
+	for k, prop := range p.NamespaceAnnotations {
+		nsAnnotationStrs = append(nsAnnotationStrs, fmt.Sprintf("%s:%s", k, prop))
+	}
+	strs = append(strs, fmt.Sprintf("NamespaceAnnotations:{%s}", strings.Join(nsAnnotationStrs, ",")))
+
 	return fmt.Sprintf("{%s}", strings.Join(strs, "; "))
 }

+ 151 - 23
pkg/kubecost/allocationprops_test.go

@@ -21,8 +21,10 @@ func TestAllocationPropsIntersection(t *testing.T) {
 				Annotations: map[string]string{},
 			},
 			expected: &AllocationProperties{
-				Labels:      nil,
-				Annotations: nil,
+				Labels:               nil,
+				Annotations:          nil,
+				NamespaceLabels:      map[string]string{},
+				NamespaceAnnotations: map[string]string{},
 			},
 		},
 		"nil intersection": {
@@ -30,7 +32,7 @@ func TestAllocationPropsIntersection(t *testing.T) {
 			allocationProps2: nil,
 			expected:         nil,
 		},
-		"intersection, with labels/annotations, no aggregated metdata": {
+		"intersection, with labels/annotations, no aggregated metadata": {
 			allocationProps1: &AllocationProperties{
 				AggregatedMetadata: false,
 				Node:               "node1",
@@ -44,13 +46,15 @@ func TestAllocationPropsIntersection(t *testing.T) {
 				Annotations:        map[string]string{"key4": "val4"},
 			},
 			expected: &AllocationProperties{
-				AggregatedMetadata: false,
-				Node:               "node1",
-				Labels:             nil,
-				Annotations:        nil,
+				AggregatedMetadata:   false,
+				Node:                 "node1",
+				Labels:               nil,
+				Annotations:          nil,
+				NamespaceLabels:      map[string]string{},
+				NamespaceAnnotations: map[string]string{},
 			},
 		},
-		"intersection, with labels/annotations, with aggregated metdata": {
+		"intersection, with labels/annotations, same values": {
 			allocationProps1: &AllocationProperties{
 				AggregatedMetadata: false,
 				ControllerKind:     "controller1",
@@ -66,11 +70,13 @@ func TestAllocationPropsIntersection(t *testing.T) {
 				Annotations:        map[string]string{"key2": "val2"},
 			},
 			expected: &AllocationProperties{
-				AggregatedMetadata: true,
-				Namespace:          "ns1",
-				ControllerKind:     "",
-				Labels:             map[string]string{"key1": "val1"},
-				Annotations:        map[string]string{"key2": "val2"},
+				AggregatedMetadata:   true,
+				Namespace:            "ns1",
+				ControllerKind:       "",
+				Labels:               map[string]string{"key1": "val1"},
+				Annotations:          map[string]string{"key2": "val2"},
+				NamespaceLabels:      map[string]string{},
+				NamespaceAnnotations: map[string]string{},
 			},
 		},
 		"intersection, with labels/annotations, special case container": {
@@ -89,11 +95,13 @@ func TestAllocationPropsIntersection(t *testing.T) {
 				Annotations:        map[string]string{"key2": "val2"},
 			},
 			expected: &AllocationProperties{
-				AggregatedMetadata: true,
-				Namespace:          "ns1",
-				ControllerKind:     "",
-				Labels:             map[string]string{"key1": "val1"},
-				Annotations:        map[string]string{"key2": "val2"},
+				AggregatedMetadata:   true,
+				Namespace:            "ns1",
+				ControllerKind:       "",
+				Labels:               map[string]string{"key1": "val1"},
+				Annotations:          map[string]string{"key2": "val2"},
+				NamespaceLabels:      map[string]string{},
+				NamespaceAnnotations: map[string]string{},
 			},
 		},
 		"test services are nulled when intersecting": {
@@ -115,11 +123,13 @@ func TestAllocationPropsIntersection(t *testing.T) {
 				Annotations:        map[string]string{"key2": "val2"},
 			},
 			expected: &AllocationProperties{
-				AggregatedMetadata: true,
-				Namespace:          "ns1",
-				ControllerKind:     "",
-				Labels:             map[string]string{"key1": "val1"},
-				Annotations:        map[string]string{"key2": "val2"},
+				AggregatedMetadata:   true,
+				Namespace:            "ns1",
+				ControllerKind:       "",
+				Labels:               map[string]string{"key1": "val1"},
+				Annotations:          map[string]string{"key2": "val2"},
+				NamespaceLabels:      map[string]string{},
+				NamespaceAnnotations: map[string]string{},
 			},
 		},
 	}
@@ -237,3 +247,121 @@ func TestGenerateKey(t *testing.T) {
 		})
 	}
 }
+
+func TestIntersection(t *testing.T) {
+
+	propsEmpty := AllocationProperties{}
+
+	propsMedium := AllocationProperties{
+		Cluster:        "cluster1",
+		Node:           "Node1",
+		Container:      "container1",
+		Controller:     "controller1",
+		ControllerKind: "controllerkind1",
+		Namespace:      "ns1",
+		Pod:            "pod1",
+		Services:       []string{"service1"},
+		ProviderID:     "provider1",
+	}
+
+	propsFull := AllocationProperties{
+		Cluster:              "cluster2",
+		Node:                 "Node2",
+		Container:            "container2",
+		Controller:           "controller2",
+		ControllerKind:       "controllerkind2",
+		Namespace:            "ns2",
+		Pod:                  "pod2",
+		Services:             []string{"service2"},
+		ProviderID:           "provider2",
+		NamespaceLabels:      AllocationLabels{"key1": "value1"},
+		NamespaceAnnotations: AllocationAnnotations{"key2": "value2", "key5": "value5"},
+		Labels:               AllocationLabels{"key3": "value3"},
+		Annotations:          AllocationAnnotations{"key4": "value4"},
+	}
+
+	// Case 1: no intersection
+	// expect empty result object
+	testObj1 := AllocationProperties{}
+
+	result := testObj1.Intersection(&propsEmpty)
+
+	if !result.Equal(&propsEmpty) {
+		t.Fatalf("Case 1: expected empty object, no intersection")
+	}
+
+	// Case 2: Only has labels/annotations
+	// expect empty result object
+	testObj2 := AllocationProperties{
+		Labels:      map[string]string{"app": "product-label-light"},
+		Annotations: map[string]string{"app": "product-annotation-light"},
+	}
+
+	result = testObj2.Intersection(&propsMedium)
+
+	if !result.Equal(&propsEmpty) {
+		t.Fatalf("Case 2: expected empty object, no intersection")
+	}
+
+	// Case 3: Has non-label/annotations set
+	// expect all non label/annotation/service string array fields to be unset
+	// different container names should be omitted
+	testObj3 := AllocationProperties{
+		Cluster:        "cluster1",
+		Node:           "Node1",
+		Container:      "container2",
+		Controller:     "controller1",
+		ControllerKind: "controllerkind1",
+		Namespace:      "ns1",
+		Pod:            "pod1",
+		Services:       []string{"service1"},
+		ProviderID:     "provider1",
+	}
+
+	expectedResult := AllocationProperties{
+		Cluster:        "cluster1",
+		Node:           "Node1",
+		Controller:     "controller1",
+		ControllerKind: "controllerkind1",
+		Namespace:      "ns1",
+		Pod:            "pod1",
+		ProviderID:     "provider1",
+	}
+
+	result = testObj3.Intersection(&propsMedium)
+
+	if !result.Equal(&expectedResult) {
+		t.Fatalf("Case 3: expected output %v does not match actual output %v", expectedResult, result)
+	}
+
+	// Case 4: Copy over NamespaceLabels/Annots when namespace is the same
+	testObj4 := AllocationProperties{
+		Cluster:              "cluster2",
+		Node:                 "NodeX",
+		Container:            "containerX",
+		Controller:           "controllerX",
+		ControllerKind:       "controllerkindX",
+		Namespace:            "ns2",
+		Pod:                  "podX",
+		Services:             []string{"serviceX"},
+		ProviderID:           "providerX",
+		NamespaceLabels:      AllocationLabels{"key1": "value1"},
+		NamespaceAnnotations: AllocationAnnotations{"key2": "value2", "key5": "value5"},
+		Labels:               AllocationLabels{"key3": "value3"},
+		Annotations:          AllocationAnnotations{"key4": "value4"},
+	}
+
+	expectedResult = AllocationProperties{
+		Cluster:              "cluster2",
+		Namespace:            "ns2",
+		NamespaceLabels:      AllocationLabels{"key1": "value1"},
+		NamespaceAnnotations: AllocationAnnotations{"key2": "value2", "key5": "value5"},
+	}
+
+	result = testObj4.Intersection(&propsFull)
+
+	if !result.Equal(&expectedResult) {
+		t.Fatalf("Case 4: expected output %v does not match actual output %v", expectedResult, result)
+	}
+
+}

+ 27 - 20
pkg/kubecost/asset.go

@@ -8,6 +8,9 @@ import (
 	"strings"
 	"time"
 
+	filter21 "github.com/opencost/opencost/pkg/filter21"
+	"github.com/opencost/opencost/pkg/filter21/ast"
+	"github.com/opencost/opencost/pkg/filter21/matcher"
 	"github.com/opencost/opencost/pkg/log"
 	"github.com/opencost/opencost/pkg/util/json"
 	"github.com/opencost/opencost/pkg/util/timeutil"
@@ -422,10 +425,6 @@ func (al AssetLabels) Append(newLabels map[string]string, overwrite bool) {
 	}
 }
 
-// AssetMatchFunc is a function that can be used to match Assets by
-// returning true for any given Asset if a condition is met.
-type AssetMatchFunc func(Asset) bool
-
 // AssetType identifies a type of Asset
 type AssetType int
 
@@ -2185,10 +2184,11 @@ type LoadBalancer struct {
 	Window     Window
 	Adjustment float64
 	Cost       float64
+	Private    bool // @bingen:field[version=20]
 }
 
 // NewLoadBalancer instantiates and returns a new LoadBalancer
-func NewLoadBalancer(name, cluster, providerID string, start, end time.Time, window Window) *LoadBalancer {
+func NewLoadBalancer(name, cluster, providerID string, start, end time.Time, window Window, private bool) *LoadBalancer {
 	properties := &AssetProperties{
 		Category:   NetworkCategory,
 		Name:       name,
@@ -2203,6 +2203,7 @@ func NewLoadBalancer(name, cluster, providerID string, start, end time.Time, win
 		Start:      start,
 		End:        end,
 		Window:     window,
+		Private:    private,
 	}
 }
 
@@ -2743,6 +2744,21 @@ func (as *AssetSet) AggregateBy(aggregateBy []string, opts *AssetAggregationOpti
 		return nil
 	}
 
+	var filter AssetMatcher
+	if opts.Filter == nil {
+		filter = &matcher.AllPass[Asset]{}
+	} else {
+		compiler := NewAssetMatchCompiler()
+		var err error
+		filter, err = compiler.Compile(opts.Filter)
+		if err != nil {
+			return fmt.Errorf("compiling filter '%s': %w", ast.ToPreOrderShortString(opts.Filter), err)
+		}
+	}
+	if filter == nil {
+		return fmt.Errorf("unexpected nil filter")
+	}
+
 	aggSet := NewAssetSet(as.Start(), as.End())
 	aggSet.AggregationKeys = aggregateBy
 
@@ -2759,15 +2775,8 @@ func (as *AssetSet) AggregateBy(aggregateBy []string, opts *AssetAggregationOpti
 		sa := NewSharedAsset(name, as.Window.Clone())
 		sa.Cost = hourlyCost * hours
 
-		// Insert shared asset if it passes all filters
-		insert := true
-		for _, ff := range opts.FilterFuncs {
-			if !ff(sa) {
-				insert = false
-				break
-			}
-		}
-		if insert {
+		// Insert shared asset if it passes filter
+		if filter.Matches(sa) {
 			err := aggSet.Insert(sa, opts.LabelConfig)
 			if err != nil {
 				return err
@@ -2776,11 +2785,9 @@ func (as *AssetSet) AggregateBy(aggregateBy []string, opts *AssetAggregationOpti
 	}
 
 	// Delete the Assets that don't pass each filter
-	for _, ff := range opts.FilterFuncs {
-		for key, asset := range as.Assets {
-			if !ff(asset) {
-				delete(as.Assets, key)
-			}
+	for key, asset := range as.Assets {
+		if !filter.Matches(asset) {
+			delete(as.Assets, key)
 		}
 	}
 
@@ -3460,7 +3467,7 @@ func (asr *AssetSetRange) newAccumulation() (*AssetSet, error) {
 
 type AssetAggregationOptions struct {
 	SharedHourlyCosts map[string]float64
-	FilterFuncs       []AssetMatchFunc
+	Filter            filter21.Filter
 	LabelConfig       *LabelConfig
 }
 

+ 2 - 2
pkg/kubecost/asset_json_test.go

@@ -419,7 +419,7 @@ func TestNode_Unmarshal(t *testing.T) {
 
 func TestLoadBalancer_Unmarshal(t *testing.T) {
 
-	lb1 := NewLoadBalancer("loadbalancer1", "cluster1", "provider1", *unmarshalWindow.start, *unmarshalWindow.end, unmarshalWindow)
+	lb1 := NewLoadBalancer("loadbalancer1", "cluster1", "provider1", *unmarshalWindow.start, *unmarshalWindow.end, unmarshalWindow, false)
 	lb1.Cost = 12.0
 	lb1.SetAdjustment(4.0)
 
@@ -515,7 +515,7 @@ func TestAssetset_Unmarshal(t *testing.T) {
 	disk := NewDisk("disk1", "cluster1", "disk1", *unmarshalWindow.start, *unmarshalWindow.end, unmarshalWindow)
 	network := NewNetwork("network1", "cluster1", "provider1", *unmarshalWindow.start, *unmarshalWindow.end, unmarshalWindow)
 	node := NewNode("node1", "cluster1", "provider1", *unmarshalWindow.start, *unmarshalWindow.end, unmarshalWindow)
-	lb := NewLoadBalancer("loadbalancer1", "cluster1", "provider1", *unmarshalWindow.start, *unmarshalWindow.end, unmarshalWindow)
+	lb := NewLoadBalancer("loadbalancer1", "cluster1", "provider1", *unmarshalWindow.start, *unmarshalWindow.end, unmarshalWindow, false)
 	sa := NewSharedAsset("sharedasset1", unmarshalWindow)
 
 	assetList := []Asset{any, cloud, cm, disk, network, node, lb, sa}

+ 105 - 0
pkg/kubecost/assetmatcher.go

@@ -0,0 +1,105 @@
+package kubecost
+
+import (
+	"fmt"
+	"strings"
+
+	afilter "github.com/opencost/opencost/pkg/filter21/asset"
+	"github.com/opencost/opencost/pkg/filter21/ast"
+	"github.com/opencost/opencost/pkg/filter21/matcher"
+	"github.com/opencost/opencost/pkg/filter21/transform"
+)
+
+// AssetMatcher is a matcher implementation for Asset instances,
+// compiled using the matcher.MatchCompiler.
+type AssetMatcher matcher.Matcher[Asset]
+
+// NewAssetMatchCompiler creates a new instance of a
+// matcher.MatchCompiler[Asset] which can be used to compile filter.Filter
+// ASTs into matcher.Matcher[Asset] implementations.
+//
+// If the label config is nil, the compiler will fail to compile alias filters
+// if any are present in the AST.
+//
+// If storage interfaces every support querying natively by alias (e.g. if a
+// data store contained a "product" attribute on an Asset row), that should
+// be handled by a purpose-built AST compiler.
+func NewAssetMatchCompiler() *matcher.MatchCompiler[Asset] {
+	passes := []transform.CompilerPass{}
+
+	passes = append(passes,
+		transform.PrometheusKeySanitizePass(),
+		transform.UnallocatedReplacementPass(),
+	)
+	return matcher.NewMatchCompiler(
+		assetFieldMap,
+		assetSliceFieldMap,
+		assetMapFieldMap,
+		passes...,
+	)
+}
+
+// Maps fields from an asset to a string value based on an identifier
+func assetFieldMap(a Asset, identifier ast.Identifier) (string, error) {
+	if identifier.Field == nil {
+		return "", fmt.Errorf("cannot map field from identifier with nil field")
+	}
+	if a == nil {
+		return "", fmt.Errorf("cannot map field for nil Asset")
+	}
+
+	// Check special fields before defaulting to properties-based fields
+	switch afilter.AssetField(identifier.Field.Name) {
+	case afilter.FieldType:
+		return strings.ToLower(a.Type().String()), nil
+	case afilter.FieldLabel:
+		labels := a.GetLabels()
+		if labels == nil {
+			return "", nil
+		}
+		return labels[identifier.Key], nil
+	}
+
+	props := a.GetProperties()
+	if props == nil {
+		return "", fmt.Errorf("cannot map field for Asset with nil props")
+	}
+
+	switch afilter.AssetField(identifier.Field.Name) {
+	case afilter.FieldName:
+		return props.Name, nil
+	case afilter.FieldCategory:
+		return props.Category, nil
+	case afilter.FieldClusterID:
+		return props.Cluster, nil
+	case afilter.FieldProject:
+		return props.Project, nil
+	case afilter.FieldProvider:
+		return props.Provider, nil
+	case afilter.FieldProviderID:
+		return props.ProviderID, nil
+	case afilter.FieldAccount:
+		return props.Account, nil
+	case afilter.FieldService:
+		return props.Service, nil
+	}
+
+	return "", fmt.Errorf("Failed to find string identifier on Asset: %s", identifier.Field.Name)
+}
+
+// Maps slice fields from an asset to a []string value based on an identifier
+func assetSliceFieldMap(a Asset, identifier ast.Identifier) ([]string, error) {
+	return nil, fmt.Errorf("Assets have no slice fields")
+}
+
+// Maps map fields from an Asset to a map[string]string value based on an identifier
+func assetMapFieldMap(a Asset, identifier ast.Identifier) (map[string]string, error) {
+	if a == nil {
+		return nil, fmt.Errorf("cannot get map field for nil Asset")
+	}
+	switch afilter.AssetField(identifier.Field.Name) {
+	case afilter.FieldLabel:
+		return a.GetLabels(), nil
+	}
+	return nil, fmt.Errorf("Failed to find map[string]string identifier on Asset: %s", identifier.Field.Name)
+}

+ 4 - 2
pkg/kubecost/bingen.go

@@ -26,7 +26,7 @@ package kubecost
 // @bingen:generate:CoverageSet
 
 // Asset Version Set: Includes Asset pipeline specific resources
-// @bingen:set[name=Assets,version=19]
+// @bingen:set[name=Assets,version=20]
 // @bingen:generate:Any
 // @bingen:generate:Asset
 // @bingen:generate:AssetLabels
@@ -46,7 +46,7 @@ package kubecost
 // @bingen:end
 
 // Allocation Version Set: Includes Allocation pipeline specific resources
-// @bingen:set[name=Allocation,version=16]
+// @bingen:set[name=Allocation,version=18]
 // @bingen:generate:Allocation
 // @bingen:generate[stringtable]:AllocationSet
 // @bingen:generate:AllocationSetRange
@@ -58,6 +58,8 @@ package kubecost
 // @bingen:generate:PVAllocations
 // @bingen:generate:PVKey
 // @bingen:generate:PVAllocation
+// @bingen:generate:LbAllocations
+// @bingen:generate:LbAllocation
 // @bingen:end
 
 // @bingen:set[name=Audit,version=1]

+ 0 - 3
pkg/kubecost/cloudusage.go

@@ -11,6 +11,3 @@ type CloudUsageSetRange = AssetSetRange
 
 // CloudUsageAggregationOptions is temporarily aliased as the AssetAggregationOptions until further infrastructure and pages can be built to support its usage
 type CloudUsageAggregationOptions = AssetAggregationOptions
-
-// CloudUsageMatchFunc is temporarily aliased as the AssetMatchFunc until further infrastructure and pages can be built to support its usage
-type CloudUsageMatchFunc = AssetMatchFunc

+ 8 - 0
pkg/kubecost/coverage.go

@@ -26,10 +26,18 @@ func (c *Coverage) Key() string {
 }
 
 func (c *Coverage) IsEmpty() bool {
+	if c == nil {
+		log.Warnf("calling IsEmpty() on a nil Coverage")
+		return true
+	}
 	return c.Type == "" && c.Count == 0 && len(c.Errors) == 0 && len(c.Warnings) == 0 && c.Updated == time.Time{}
 }
 
 func (c *Coverage) Clone() *Coverage {
+	if c == nil {
+		log.Warnf("calling Clone() on a nil Coverage")
+		return nil
+	}
 	var errors []string
 	if len(c.Errors) > 0 {
 		errors = make([]string, len(c.Errors))

+ 365 - 2
pkg/kubecost/kubecost_codecs.go

@@ -37,10 +37,10 @@ const (
 	DefaultCodecVersion uint8 = 17
 
 	// AssetsCodecVersion is used for any resources listed in the Assets version set
-	AssetsCodecVersion uint8 = 19
+	AssetsCodecVersion uint8 = 20
 
 	// AllocationCodecVersion is used for any resources listed in the Allocation version set
-	AllocationCodecVersion uint8 = 16
+	AllocationCodecVersion uint8 = 18
 
 	// AuditCodecVersion is used for any resources listed in the Audit version set
 	AuditCodecVersion uint8 = 1
@@ -83,6 +83,7 @@ var typeMap map[string]reflect.Type = map[string]reflect.Type{
 	"CoverageSet":                   reflect.TypeOf((*CoverageSet)(nil)).Elem(),
 	"Disk":                          reflect.TypeOf((*Disk)(nil)).Elem(),
 	"EqualityAudit":                 reflect.TypeOf((*EqualityAudit)(nil)).Elem(),
+	"LbAllocation":                  reflect.TypeOf((*LbAllocation)(nil)).Elem(),
 	"LoadBalancer":                  reflect.TypeOf((*LoadBalancer)(nil)).Elem(),
 	"Network":                       reflect.TypeOf((*Network)(nil)).Elem(),
 	"Node":                          reflect.TypeOf((*Node)(nil)).Elem(),
@@ -760,6 +761,41 @@ func (target *Allocation) MarshalBinaryWithContext(ctx *EncodingContext) (err er
 		// --- [end][write][struct](RawAllocationOnlyData) ---
 
 	}
+	// --- [begin][write][alias](LbAllocations) ---
+	if map[string]*LbAllocation(target.LoadBalancers) == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][map](map[string]*LbAllocation) ---
+		buff.WriteInt(len(map[string]*LbAllocation(target.LoadBalancers))) // map length
+		for vv, zz := range map[string]*LbAllocation(target.LoadBalancers) {
+			if ctx.IsStringTable() {
+				d := ctx.Table.AddOrGet(vv)
+				buff.WriteInt(d) // write table index
+			} else {
+				buff.WriteString(vv) // write string
+			}
+			if zz == nil {
+				buff.WriteUInt8(uint8(0)) // write nil byte
+			} else {
+				buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+				// --- [begin][write][struct](LbAllocation) ---
+				buff.WriteInt(0) // [compatibility, unused]
+				errH := zz.MarshalBinaryWithContext(ctx)
+				if errH != nil {
+					return errH
+				}
+				// --- [end][write][struct](LbAllocation) ---
+
+			}
+		}
+		// --- [end][write][map](map[string]*LbAllocation) ---
+
+	}
+	// --- [end][write][alias](LbAllocations) ---
+
 	return nil
 }
 
@@ -1023,6 +1059,55 @@ func (target *Allocation) UnmarshalBinaryWithContext(ctx *DecodingContext) (err
 		// --- [end][read][struct](RawAllocationOnlyData) ---
 
 	}
+	// field version check
+	if uint8(18) <= version {
+		// --- [begin][read][alias](LbAllocations) ---
+		var xx map[string]*LbAllocation
+		if buff.ReadUInt8() == uint8(0) {
+			xx = nil
+		} else {
+			// --- [begin][read][map](map[string]*LbAllocation) ---
+			aaa := buff.ReadInt() // map len
+			yy := make(map[string]*LbAllocation, aaa)
+			for j := 0; j < aaa; j++ {
+				var vv string
+				var ccc string
+				if ctx.IsStringTable() {
+					ddd := buff.ReadInt() // read string index
+					ccc = ctx.Table[ddd]
+				} else {
+					ccc = buff.ReadString() // read string
+				}
+				bbb := ccc
+				vv = bbb
+
+				var zz *LbAllocation
+				if buff.ReadUInt8() == uint8(0) {
+					zz = nil
+				} else {
+					// --- [begin][read][struct](LbAllocation) ---
+					eee := &LbAllocation{}
+					buff.ReadInt() // [compatibility, unused]
+					errH := eee.UnmarshalBinaryWithContext(ctx)
+					if errH != nil {
+						return errH
+					}
+					zz = eee
+					// --- [end][read][struct](LbAllocation) ---
+
+				}
+				yy[vv] = zz
+			}
+			xx = yy
+			// --- [end][read][map](map[string]*LbAllocation) ---
+
+		}
+		target.LoadBalancers = LbAllocations(xx)
+		// --- [end][read][alias](LbAllocations) ---
+
+	} else {
+	}
+
 	return nil
 }
 
@@ -1186,6 +1271,60 @@ func (target *AllocationProperties) MarshalBinaryWithContext(ctx *EncodingContex
 	}
 	// --- [end][write][alias](AllocationAnnotations) ---
 
+	// --- [begin][write][alias](AllocationLabels) ---
+	if map[string]string(target.NamespaceLabels) == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][map](map[string]string) ---
+		buff.WriteInt(len(map[string]string(target.NamespaceLabels))) // map length
+		for vvv, zzz := range map[string]string(target.NamespaceLabels) {
+			if ctx.IsStringTable() {
+				p := ctx.Table.AddOrGet(vvv)
+				buff.WriteInt(p) // write table index
+			} else {
+				buff.WriteString(vvv) // write string
+			}
+			if ctx.IsStringTable() {
+				q := ctx.Table.AddOrGet(zzz)
+				buff.WriteInt(q) // write table index
+			} else {
+				buff.WriteString(zzz) // write string
+			}
+		}
+		// --- [end][write][map](map[string]string) ---
+
+	}
+	// --- [end][write][alias](AllocationLabels) ---
+
+	// --- [begin][write][alias](AllocationAnnotations) ---
+	if map[string]string(target.NamespaceAnnotations) == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][map](map[string]string) ---
+		buff.WriteInt(len(map[string]string(target.NamespaceAnnotations))) // map length
+		for vvvv, zzzz := range map[string]string(target.NamespaceAnnotations) {
+			if ctx.IsStringTable() {
+				r := ctx.Table.AddOrGet(vvvv)
+				buff.WriteInt(r) // write table index
+			} else {
+				buff.WriteString(vvvv) // write string
+			}
+			if ctx.IsStringTable() {
+				s := ctx.Table.AddOrGet(zzzz)
+				buff.WriteInt(s) // write table index
+			} else {
+				buff.WriteString(zzzz) // write string
+			}
+		}
+		// --- [end][write][map](map[string]string) ---
+
+	}
+	// --- [end][write][alias](AllocationAnnotations) ---
+
 	return nil
 }
 
@@ -1427,6 +1566,96 @@ func (target *AllocationProperties) UnmarshalBinaryWithContext(ctx *DecodingCont
 	target.Annotations = AllocationAnnotations(tt)
 	// --- [end][read][alias](AllocationAnnotations) ---
 
+	// field version check
+	if uint8(17) <= version {
+		// --- [begin][read][alias](AllocationLabels) ---
+		var eee map[string]string
+		if buff.ReadUInt8() == uint8(0) {
+			eee = nil
+		} else {
+			// --- [begin][read][map](map[string]string) ---
+			ggg := buff.ReadInt() // map len
+			fff := make(map[string]string, ggg)
+			for jj := 0; jj < ggg; jj++ {
+				var vvv string
+				var kkk string
+				if ctx.IsStringTable() {
+					lll := buff.ReadInt() // read string index
+					kkk = ctx.Table[lll]
+				} else {
+					kkk = buff.ReadString() // read string
+				}
+				hhh := kkk
+				vvv = hhh
+
+				var zzz string
+				var nnn string
+				if ctx.IsStringTable() {
+					ooo := buff.ReadInt() // read string index
+					nnn = ctx.Table[ooo]
+				} else {
+					nnn = buff.ReadString() // read string
+				}
+				mmm := nnn
+				zzz = mmm
+
+				fff[vvv] = zzz
+			}
+			eee = fff
+			// --- [end][read][map](map[string]string) ---
+
+		}
+		target.NamespaceLabels = AllocationLabels(eee)
+		// --- [end][read][alias](AllocationLabels) ---
+
+	} else {
+	}
+
+	// field version check
+	if uint8(17) <= version {
+		// --- [begin][read][alias](AllocationAnnotations) ---
+		var ppp map[string]string
+		if buff.ReadUInt8() == uint8(0) {
+			ppp = nil
+		} else {
+			// --- [begin][read][map](map[string]string) ---
+			rrr := buff.ReadInt() // map len
+			qqq := make(map[string]string, rrr)
+			for iii := 0; iii < rrr; iii++ {
+				var vvvv string
+				var ttt string
+				if ctx.IsStringTable() {
+					uuu := buff.ReadInt() // read string index
+					ttt = ctx.Table[uuu]
+				} else {
+					ttt = buff.ReadString() // read string
+				}
+				sss := ttt
+				vvvv = sss
+
+				var zzzz string
+				var xxx string
+				if ctx.IsStringTable() {
+					yyy := buff.ReadInt() // read string index
+					xxx = ctx.Table[yyy]
+				} else {
+					xxx = buff.ReadString() // read string
+				}
+				www := xxx
+				zzzz = www
+
+				qqq[vvvv] = zzzz
+			}
+			ppp = qqq
+			// --- [end][read][map](map[string]string) ---
+
+		}
+		target.NamespaceAnnotations = AllocationAnnotations(ppp)
+		// --- [end][read][alias](AllocationAnnotations) ---
+
+	} else {
+	}
+
 	return nil
 }
 
@@ -7058,6 +7287,130 @@ func (target *EqualityAudit) UnmarshalBinaryWithContext(ctx *DecodingContext) (e
 	return nil
 }
 
+//--------------------------------------------------------------------------
+//  LbAllocation
+//--------------------------------------------------------------------------
+
+// MarshalBinary serializes the internal properties of this LbAllocation instance
+// into a byte array
+func (target *LbAllocation) MarshalBinary() (data []byte, err error) {
+	ctx := &EncodingContext{
+		Buffer: util.NewBuffer(),
+		Table:  nil,
+	}
+
+	e := target.MarshalBinaryWithContext(ctx)
+	if e != nil {
+		return nil, e
+	}
+
+	encBytes := ctx.Buffer.Bytes()
+	return encBytes, nil
+}
+
+// MarshalBinaryWithContext serializes the internal properties of this LbAllocation instance
+// into a byte array leveraging a predefined context.
+func (target *LbAllocation) MarshalBinaryWithContext(ctx *EncodingContext) (err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := ctx.Buffer
+	buff.WriteUInt8(AllocationCodecVersion) // version
+
+	if ctx.IsStringTable() {
+		a := ctx.Table.AddOrGet(target.Service)
+		buff.WriteInt(a) // write table index
+	} else {
+		buff.WriteString(target.Service) // write string
+	}
+	buff.WriteFloat64(target.Cost) // write float64
+	buff.WriteBool(target.Private) // write bool
+	return nil
+}
+
+// UnmarshalBinary uses the data passed byte array to set all the internal properties of
+// the LbAllocation type
+func (target *LbAllocation) UnmarshalBinary(data []byte) error {
+	var table []string
+	buff := util.NewBufferFromBytes(data)
+
+	// string table header validation
+	if isBinaryTag(data, BinaryTagStringTable) {
+		buff.ReadBytes(len(BinaryTagStringTable)) // strip tag length
+		tl := buff.ReadInt()                      // table length
+		if tl > 0 {
+			table = make([]string, tl, tl)
+			for i := 0; i < tl; i++ {
+				table[i] = buff.ReadString()
+			}
+		}
+	}
+
+	ctx := &DecodingContext{
+		Buffer: buff,
+		Table:  table,
+	}
+
+	err := target.UnmarshalBinaryWithContext(ctx)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// UnmarshalBinaryWithContext uses the context containing a string table and binary buffer to set all the internal properties of
+// the LbAllocation type
+func (target *LbAllocation) UnmarshalBinaryWithContext(ctx *DecodingContext) (err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := ctx.Buffer
+	version := buff.ReadUInt8()
+
+	if version > AllocationCodecVersion {
+		return fmt.Errorf("Invalid Version Unmarshaling LbAllocation. Expected %d or less, got %d", AllocationCodecVersion, version)
+	}
+
+	var b string
+	if ctx.IsStringTable() {
+		c := buff.ReadInt() // read string index
+		b = ctx.Table[c]
+	} else {
+		b = buff.ReadString() // read string
+	}
+	a := b
+	target.Service = a
+
+	d := buff.ReadFloat64() // read float64
+	target.Cost = d
+
+	e := buff.ReadBool() // read bool
+	target.Private = e
+
+	return nil
+}
+
 //--------------------------------------------------------------------------
 //  LoadBalancer
 //--------------------------------------------------------------------------
@@ -7167,6 +7520,7 @@ func (target *LoadBalancer) MarshalBinaryWithContext(ctx *EncodingContext) (err
 
 	buff.WriteFloat64(target.Adjustment) // write float64
 	buff.WriteFloat64(target.Cost)       // write float64
+	buff.WriteBool(target.Private)       // write bool
 	return nil
 }
 
@@ -7316,6 +7670,15 @@ func (target *LoadBalancer) UnmarshalBinaryWithContext(ctx *DecodingContext) (er
 	u := buff.ReadFloat64() // read float64
 	target.Cost = u
 
+	// field version check
+	if uint8(20) <= version {
+		w := buff.ReadBool() // read bool
+		target.Private = w
+
+	} else {
+		target.Private = false // default
+	}
+
 	return nil
 }
 

+ 12 - 0
pkg/kubecost/kubecost_codecs_test.go

@@ -480,6 +480,14 @@ func TestProperties_BinaryEncoding(t *testing.T) {
 	p0.Controller = "daemonset-abc"
 	p0.ControllerKind = "daemonset"
 	p0.Namespace = "namespace1"
+	p0.NamespaceLabels = map[string]string{
+		"app":                "cost-analyzer-namespace",
+		"kubernetes.io/name": "cost-analyzer",
+	}
+	p0.NamespaceAnnotations = map[string]string{
+		"com.kubernetes.io/managed-by":             "helm",
+		"kubernetes.io/last-applied-configuration": "cost-analyzer",
+	}
 	p0.Node = "node1"
 	p0.Pod = "daemonset-abc-123"
 	p0.Labels = map[string]string{
@@ -508,6 +516,10 @@ func TestProperties_BinaryEncoding(t *testing.T) {
 	p0.Controller = "daemonset-abc"
 	p0.ControllerKind = "daemonset"
 	p0.Namespace = "namespace1"
+	p0.NamespaceAnnotations = map[string]string{
+		"com.kubernetes.io/managed-by":             "helm",
+		"kubernetes.io/last-applied-configuration": "cost-analyzer",
+	}
 	p0.Services = []string{}
 	bs, err = p0.MarshalBinary()
 	if err != nil {

+ 2 - 2
pkg/kubecost/mock.go

@@ -476,10 +476,10 @@ func GenerateMockAssetSets(start, end time.Time) []*AssetSet {
 	node3Network.Cost = 2.0
 
 	// Add LoadBalancers
-	cluster2LoadBalancer1 := NewLoadBalancer("namespace2/loadBalancer1", "cluster2", "lb1", start, end, NewWindow(&start, &end))
+	cluster2LoadBalancer1 := NewLoadBalancer("namespace2/loadBalancer1", "cluster2", "lb1", start, end, NewWindow(&start, &end), false)
 	cluster2LoadBalancer1.Cost = 10.0
 
-	cluster2LoadBalancer2 := NewLoadBalancer("namespace2/loadBalancer2", "cluster2", "lb2", start, end, NewWindow(&start, &end))
+	cluster2LoadBalancer2 := NewLoadBalancer("namespace2/loadBalancer2", "cluster2", "lb2", start, end, NewWindow(&start, &end), false)
 	cluster2LoadBalancer2.Cost = 15.0
 
 	assetSet1 := NewAssetSet(start, end, cluster1Nodes, cluster2Node1, cluster2Node2, cluster2Node3, cluster2Disk1,

+ 5 - 3
pkg/kubecost/query.go

@@ -2,6 +2,8 @@ package kubecost
 
 import (
 	"time"
+
+	filter21 "github.com/opencost/opencost/pkg/filter21"
 )
 
 // Querier is an aggregate interface which has the ability to query each Kubecost store type
@@ -38,7 +40,7 @@ type AllocationQueryOptions struct {
 	AggregateBy             []string
 	Compute                 bool
 	DisableAggregatedStores bool
-	Filter                  AllocationFilter
+	Filter                  filter21.Filter
 	IdleByNode              bool
 	IncludeExternal         bool
 	IncludeIdle             bool
@@ -73,7 +75,7 @@ type AssetQueryOptions struct {
 	Compute                 bool
 	DisableAdjustments      bool
 	DisableAggregatedStores bool
-	FilterFuncs             []AssetMatchFunc
+	Filter                  filter21.Filter
 	IncludeCloud            bool
 	SharedHourlyCosts       map[string]float64
 	Step                    time.Duration
@@ -85,7 +87,7 @@ type CloudUsageQueryOptions struct {
 	Accumulate   bool
 	AggregateBy  []string
 	Compute      bool
-	FilterFuncs  []CloudUsageMatchFunc
+	Filter       filter21.Filter
 	FilterValues CloudUsageFilter
 	LabelConfig  *LabelConfig
 }

+ 39 - 26
pkg/kubecost/summaryallocation.go

@@ -7,6 +7,8 @@ import (
 	"sync"
 	"time"
 
+	"github.com/opencost/opencost/pkg/filter21/ast"
+	"github.com/opencost/opencost/pkg/filter21/matcher"
 	"github.com/opencost/opencost/pkg/log"
 	"github.com/opencost/opencost/pkg/util/timeutil"
 )
@@ -292,12 +294,20 @@ func (sa *SummaryAllocation) IsUnallocated() bool {
 
 // IsUnmounted is true if the given SummaryAllocation represents unmounted
 // volume costs.
+// Note: Due to change in https://github.com/opencost/opencost/pull/1477 made to include Unmounted
+// PVC cost inside namespace we need to check unmounted suffix across all the three major properties
+// to actually classify it as unmounted.
 func (sa *SummaryAllocation) IsUnmounted() bool {
 	if sa == nil {
 		return false
 	}
-
-	return strings.Contains(sa.Name, UnmountedSuffix)
+	props := sa.Properties
+	if props != nil {
+		if props.Container == UnmountedSuffix && props.Namespace == UnmountedSuffix && props.Pod == UnmountedSuffix {
+			return true
+		}
+	}
+	return false
 }
 
 // Minutes returns the number of minutes the SummaryAllocation represents, as
@@ -370,17 +380,15 @@ type SummaryAllocationSet struct {
 // required for unfortunate reasons to do with performance and legacy order-of-
 // operations details, as well as the fact that reconciliation has been
 // pushed down to the conversion step between Allocation and SummaryAllocation.
-func NewSummaryAllocationSet(as *AllocationSet, filter AllocationFilter, kfs []AllocationMatchFunc, reconcile, reconcileNetwork bool) *SummaryAllocationSet {
+//
+// This filter is an AllocationMatcher, not an AST, because at this point we
+// already have the data and want to make sure that the filter has already
+// gone through a compile step to deal with things like aliases.
+func NewSummaryAllocationSet(as *AllocationSet, filter AllocationMatcher, kfs []AllocationMatchFunc, reconcile, reconcileNetwork bool) *SummaryAllocationSet {
 	if as == nil {
 		return nil
 	}
 
-	// Pre-flatten the filter so we can just check == nil to see if there are
-	// filters.
-	if filter != nil {
-		filter = filter.Flattened()
-	}
-
 	// If we can know the exact size of the map, use it. If filters or sharing
 	// functions are present, we can't know the size, so we make a default map.
 	var sasMap map[string]*SummaryAllocation
@@ -542,10 +550,19 @@ func (sas *SummaryAllocationSet) AggregateBy(aggregateBy []string, options *Allo
 		options.LabelConfig = NewLabelConfig()
 	}
 
-	// Pre-flatten the filter so we can just check == nil to see if there are
-	// filters.
-	if options.Filter != nil {
-		options.Filter = options.Filter.Flattened()
+	var filter AllocationMatcher
+	if options.Filter == nil {
+		filter = &matcher.AllPass[*Allocation]{}
+	} else {
+		compiler := NewAllocationMatchCompiler(options.LabelConfig)
+		var err error
+		filter, err = compiler.Compile(options.Filter)
+		if err != nil {
+			return fmt.Errorf("compiling filter '%s': %w", ast.ToPreOrderShortString(options.Filter), err)
+		}
+	}
+	if filter == nil {
+		return fmt.Errorf("unexpected nil filter")
 	}
 
 	// Check if we have any work to do; if not, then early return. If
@@ -973,6 +990,7 @@ func (sas *SummaryAllocationSet) AggregateBy(aggregateBy []string, options *Allo
 	// 11. Distribute shared resources according to sharing coefficients.
 	// NOTE: ShareEven is not supported
 	if len(shareSet.SummaryAllocations) > 0 {
+
 		sharingCoeffDenominator := 0.0
 		for _, rt := range allocTotals {
 			sharingCoeffDenominator += rt.TotalCost()
@@ -991,9 +1009,14 @@ func (sas *SummaryAllocationSet) AggregateBy(aggregateBy []string, options *Allo
 		if sharingCoeffDenominator <= 0.0 {
 			log.Warnf("SummaryAllocation: sharing coefficient denominator is %f", sharingCoeffDenominator)
 		} else {
+
 			// Compute sharing coeffs by dividing the thus-far accumulated
 			// numerators by the now-finalized denominator.
 			for key := range sharingCoeffs {
+				// Do not share the value with unmounted suffix since it's not included in the computation.
+				if key == UnmountedSuffix {
+					continue
+				}
 				if sharingCoeffs[key] > 0.0 {
 					sharingCoeffs[key] /= sharingCoeffDenominator
 				} else {
@@ -1027,19 +1050,13 @@ func (sas *SummaryAllocationSet) AggregateBy(aggregateBy []string, options *Allo
 
 	// 12. Insert external allocations into the result set.
 	for _, sa := range externalSet.SummaryAllocations {
-		skip := false
-
 		// Make an allocation with the same properties and test that
 		// against the FilterFunc to see if the external allocation should
 		// be filtered or not.
 		// TODO:CLEANUP do something about external cost, this stinks
 		ea := &Allocation{Properties: sa.Properties}
 
-		if options.Filter != nil {
-			skip = !options.Filter.Matches(ea)
-		}
-
-		if !skip {
+		if filter.Matches(ea) {
 			key := sa.generateKey(aggregateBy, options.LabelConfig)
 
 			sa.Name = key
@@ -1051,17 +1068,13 @@ func (sas *SummaryAllocationSet) AggregateBy(aggregateBy []string, options *Allo
 	// per-resource idle cost for which there can be no idle coefficient
 	// computed because there is zero usage across all allocations.
 	for _, isa := range idleSet.SummaryAllocations {
-		// if the idle does not apply to the non-filtered values, skip it
-		skip := false
 		// Make an allocation with the same properties and test that
 		// against the FilterFunc to see if the external allocation should
 		// be filtered or not.
 		// TODO:CLEANUP do something about external cost, this stinks
 		ia := &Allocation{Properties: isa.Properties}
-		if options.Filter != nil {
-			skip = !options.Filter.Matches(ia)
-		}
-		if skip {
+		// if the idle does not apply to the non-filtered values, skip it
+		if !filter.Matches(ia) {
 			continue
 		}
 

+ 31 - 22
pkg/kubecost/totals.go

@@ -210,6 +210,7 @@ type AssetTotals struct {
 	PersistentVolumeCostAdjustment  float64   `json:"persistentVolumeCostAdjustment"`
 	RAMCost                         float64   `json:"ramCost"`
 	RAMCostAdjustment               float64   `json:"ramCostAdjustment"`
+	PrivateLoadBalancer             bool      `json:"privateLoadBalancer"`
 }
 
 // ClearAdjustments sets all adjustment fields to 0.0
@@ -245,6 +246,7 @@ func (art *AssetTotals) Clone() *AssetTotals {
 		PersistentVolumeCostAdjustment:  art.PersistentVolumeCostAdjustment,
 		RAMCost:                         art.RAMCost,
 		RAMCostAdjustment:               art.RAMCostAdjustment,
+		PrivateLoadBalancer:             art.PrivateLoadBalancer,
 	}
 }
 
@@ -295,7 +297,7 @@ func (art *AssetTotals) TotalCost() float64 {
 // use the fully-qualified (cluster, node) tuple.
 // NOTE: we're not capturing LoadBalancers here yet, but only because we don't
 // yet need them. They could be added.
-func ComputeAssetTotals(as *AssetSet, prop AssetProperty) map[string]*AssetTotals {
+func ComputeAssetTotals(as *AssetSet, byAsset bool) map[string]*AssetTotals {
 	arts := map[string]*AssetTotals{}
 
 	// Attached disks are tracked by matching their name with the name of the
@@ -306,7 +308,7 @@ func ComputeAssetTotals(as *AssetSet, prop AssetProperty) map[string]*AssetTotal
 	for _, node := range as.Nodes {
 		// Default to computing totals by Cluster, but allow override to use Node.
 		key := node.Properties.Cluster
-		if prop == AssetNodeProp {
+		if byAsset {
 			key = fmt.Sprintf("%s/%s", node.Properties.Cluster, node.Properties.Name)
 		}
 
@@ -397,25 +399,30 @@ func ComputeAssetTotals(as *AssetSet, prop AssetProperty) map[string]*AssetTotal
 		arts[key].GPUCostAdjustment += gpuCostAdjustment
 	}
 
-	// Only record LoadBalancer and ClusterManagement when prop
-	// is cluster. We can't breakdown these types by Node.
-	if prop == AssetClusterProp {
-		for _, lb := range as.LoadBalancers {
-			key := lb.Properties.Cluster
+	for _, lb := range as.LoadBalancers {
+		// Default to computing totals by Cluster, but allow override to use LoadBalancer.
+		key := lb.Properties.Cluster
+		if byAsset {
+			key = fmt.Sprintf("%s/%s", lb.Properties.Cluster, lb.Properties.Name)
+		}
 
-			if _, ok := arts[key]; !ok {
-				arts[key] = &AssetTotals{
-					Start:   lb.Start,
-					End:     lb.End,
-					Cluster: lb.Properties.Cluster,
-				}
+		if _, ok := arts[key]; !ok {
+			arts[key] = &AssetTotals{
+				Start:               lb.Start,
+				End:                 lb.End,
+				Cluster:             lb.Properties.Cluster,
+				Node:                lb.Properties.Name,
+				PrivateLoadBalancer: lb.Private,
 			}
-
-			arts[key].Count++
-			arts[key].LoadBalancerCost += lb.Cost
-			arts[key].LoadBalancerCostAdjustment += lb.Adjustment
 		}
 
+		arts[key].LoadBalancerCost += lb.Cost
+		arts[key].LoadBalancerCostAdjustment += lb.Adjustment
+	}
+
+	// Only record ClusterManagement when prop
+	// is cluster. We can't breakdown these types by Node.
+	if !byAsset {
 		for _, cm := range as.ClusterManagement {
 			key := cm.Properties.Cluster
 
@@ -447,7 +454,7 @@ func ComputeAssetTotals(as *AssetSet, prop AssetProperty) map[string]*AssetTotal
 		// cluster/node. But if we're aggregating by cluster only, then
 		// reset the key to just the cluster.
 		key := name
-		if prop == AssetClusterProp {
+		if !byAsset {
 			key = disk.Properties.Cluster
 		}
 
@@ -458,7 +465,7 @@ func ComputeAssetTotals(as *AssetSet, prop AssetProperty) map[string]*AssetTotal
 				Cluster: disk.Properties.Cluster,
 			}
 
-			if prop == AssetNodeProp {
+			if byAsset {
 				arts[key].Node = disk.Properties.Name
 			}
 		}
@@ -471,7 +478,7 @@ func ComputeAssetTotals(as *AssetSet, prop AssetProperty) map[string]*AssetTotal
 			arts[key].Count++
 			arts[key].AttachedVolumeCost += disk.Cost
 			arts[key].AttachedVolumeCostAdjustment += disk.Adjustment
-		} else if prop == AssetClusterProp {
+		} else if !byAsset {
 			// Here, we're looking at a PersistentVolume because we're not
 			// looking at an AttachedVolume. Only record PersistentVolume data
 			// at the cluster level (i.e. prop == AssetClusterProp).
@@ -621,10 +628,10 @@ func UpdateAssetTotalsStore(arts AssetTotalsStore, as *AssetSet) (*AssetTotalsSe
 	start := *as.Window.Start()
 	end := *as.Window.End()
 
-	artsByCluster := ComputeAssetTotals(as, AssetClusterProp)
+	artsByCluster := ComputeAssetTotals(as, false)
 	arts.SetAssetTotalsByCluster(start, end, artsByCluster)
 
-	artsByNode := ComputeAssetTotals(as, AssetNodeProp)
+	artsByNode := ComputeAssetTotals(as, true)
 	arts.SetAssetTotalsByNode(start, end, artsByNode)
 
 	log.Debugf("ETL: Asset: updated resource totals for %s", as.Window)
@@ -730,6 +737,8 @@ func (mts *MemoryTotalsStore) GetAssetTotalsByCluster(start time.Time, end time.
 func (mts *MemoryTotalsStore) GetAssetTotalsByNode(start time.Time, end time.Time) (map[string]*AssetTotals, bool) {
 	k := storeKey(start, end)
 	if raw, ok := mts.assetTotalsByNode.Get(k); !ok {
+		// it's possible that after accumulation, the time chunks stored here
+		// are being queried combined
 		return map[string]*AssetTotals{}, false
 	} else {
 		original := raw.(map[string]*AssetTotals)

+ 8 - 8
pkg/prom/diagnostics.go

@@ -60,14 +60,14 @@ const DocumentationBaseURL = "https://github.com/kubecost/docs/blob/master/diagn
 var diagnosticDefinitions map[string]*diagnosticDefinition = map[string]*diagnosticDefinition{
 	CAdvisorDiagnosticMetricID: {
 		ID:          CAdvisorDiagnosticMetricID,
-		QueryFmt:    `absent_over_time(container_cpu_usage_seconds_total[5m] %s)`,
+		QueryFmt:    `absent_over_time(container_cpu_usage_seconds_total{%s}[5m] %s)`,
 		Label:       "cAdvisor metrics available",
 		Description: "Determine if cAdvisor metrics are available during last 5 minutes.",
 		DocLink:     fmt.Sprintf("%s#cadvisor-metrics-available", DocumentationBaseURL),
 	},
 	KSMDiagnosticMetricID: {
 		ID:          KSMDiagnosticMetricID,
-		QueryFmt:    `absent_over_time(kube_pod_container_resource_requests{resource="memory", unit="byte"}[5m] %s)`,
+		QueryFmt:    `absent_over_time(kube_pod_container_resource_requests{resource="memory", unit="byte", %s}[5m] %s)`,
 		Label:       "Kube-state-metrics available",
 		Description: "Determine if metrics from kube-state-metrics are available during last 5 minutes.",
 		DocLink:     fmt.Sprintf("%s#kube-state-metrics-metrics-available", DocumentationBaseURL),
@@ -87,7 +87,7 @@ var diagnosticDefinitions map[string]*diagnosticDefinition = map[string]*diagnos
 	},
 	CAdvisorLabelDiagnosticMetricID: {
 		ID:          CAdvisorLabelDiagnosticMetricID,
-		QueryFmt:    `absent_over_time(container_cpu_usage_seconds_total{container!="",pod!="",%s}[5m] %s)`,
+		QueryFmt:    `absent_over_time(container_cpu_usage_seconds_total{container!="",pod!="", %s}[5m] %s)`,
 		Label:       "Expected cAdvisor labels available",
 		Description: "Determine if expected cAdvisor labels are present during last 5 minutes.",
 		DocLink:     fmt.Sprintf("%s#cadvisor-metrics-available", DocumentationBaseURL),
@@ -107,33 +107,33 @@ var diagnosticDefinitions map[string]*diagnosticDefinition = map[string]*diagnos
 	},
 	CPUThrottlingDiagnosticMetricID: {
 		ID: CPUThrottlingDiagnosticMetricID,
-		QueryFmt: `avg(increase(container_cpu_cfs_throttled_periods_total{container="cost-model",%s}[10m] %s)) by (container_name, pod_name, namespace)
+		QueryFmt: `avg(increase(container_cpu_cfs_throttled_periods_total{container="cost-model", %s}[10m] %s)) by (container_name, pod_name, namespace)
 	/ avg(increase(container_cpu_cfs_periods_total{container="cost-model",%s}[10m] %s)) by (container_name, pod_name, namespace) > 0.2`,
 		Label:       "Kubecost is not CPU throttled",
 		Description: "Kubecost loading slowly? A kubecost component might be CPU throttled",
 	},
 	KubecostRecordingRuleCPUUsageID: {
 		ID:          KubecostRecordingRuleCPUUsageID,
-		QueryFmt:    `absent_over_time(kubecost_container_cpu_usage_irate[5m] %s)`,
+		QueryFmt:    `absent_over_time(kubecost_container_cpu_usage_irate{%s}[5m] %s)`,
 		Label:       "Kubecost's CPU usage recording rule is set up",
 		Description: "If the 'kubecost_container_cpu_usage_irate' recording rule is not set up, Allocation pipeline build may put pressure on your Prometheus due to the use of a subquery.",
 		DocLink:     "https://docs.kubecost.com/install-and-configure/install/custom-prom",
 	},
 	CAdvisorWorkingSetBytesMetricID: {
 		ID:          CAdvisorWorkingSetBytesMetricID,
-		QueryFmt:    `absent_over_time(container_memory_working_set_bytes{container="cost-model", container!="POD", instance!=""}[5m] %s)`,
+		QueryFmt:    `absent_over_time(container_memory_working_set_bytes{container="cost-model", container!="POD", instance!="", %s}[5m] %s)`,
 		Label:       "cAdvisor working set bytes metrics available",
 		Description: "Determine if cAdvisor working set bytes metrics are available during last 5 minutes.",
 	},
 	KSMCPUCapacityMetricID: {
 		ID:          KSMCPUCapacityMetricID,
-		QueryFmt:    `absent_over_time(kube_node_status_capacity_cpu_cores[5m] %s)`,
+		QueryFmt:    `absent_over_time(kube_node_status_capacity_cpu_cores{%s}[5m] %s)`,
 		Label:       "KSM had CPU capacity during the last 5 minutes",
 		Description: "Determine if KSM had CPU capacity during the last 5 minutes",
 	},
 	KSMAllocatableCPUCoresMetricID: {
 		ID:          KSMAllocatableCPUCoresMetricID,
-		QueryFmt:    `absent_over_time(kube_node_status_allocatable_cpu_cores[5m] %s)`,
+		QueryFmt:    `absent_over_time(kube_node_status_allocatable_cpu_cores{%s}[5m] %s)`,
 		Label:       "KSM had allocatable CPU cores during the last 5 minutes",
 		Description: "Determine if KSM had allocatable CPU cores during the last 5 minutes",
 	},

+ 2 - 2
pkg/storage/s3storage.go

@@ -16,7 +16,7 @@ import (
 
 	"github.com/opencost/opencost/pkg/log"
 
-	aws "github.com/aws/aws-sdk-go-v2/aws"
+	"github.com/aws/aws-sdk-go-v2/aws"
 	awsconfig "github.com/aws/aws-sdk-go-v2/config"
 
 	"github.com/minio/minio-go/v7"
@@ -565,6 +565,7 @@ func (s3 *S3Storage) getRange(ctx context.Context, name string, off, length int6
 		}
 	}
 	r, err := s3.client.GetObject(ctx, s3.name, name, *opts)
+	defer r.Close()
 	if err != nil {
 		if s3.isObjNotFound(err) {
 			return nil, DoesNotExistError
@@ -575,7 +576,6 @@ func (s3 *S3Storage) getRange(ctx context.Context, name string, off, length int6
 	// NotFoundObject error is revealed only after first Read. This does the initial GetRequest. Prefetch this here
 	// for convenience.
 	if _, err := r.Read(nil); err != nil {
-		r.Close()
 		if s3.isObjNotFound(err) {
 			return nil, DoesNotExistError
 		}

+ 0 - 499
pkg/util/allocationfilterutil/queryfilters.go

@@ -1,499 +0,0 @@
-package allocationfilterutil
-
-import (
-	"fmt"
-	"reflect"
-	"strings"
-
-	"github.com/opencost/opencost/pkg/costmodel/clusters"
-	"github.com/opencost/opencost/pkg/kubecost"
-	"github.com/opencost/opencost/pkg/log"
-	"github.com/opencost/opencost/pkg/prom"
-	"github.com/opencost/opencost/pkg/util/mapper"
-)
-
-const (
-	ParamFilterClusters        = "filterClusters"
-	ParamFilterNodes           = "filterNodes"
-	ParamFilterNamespaces      = "filterNamespaces"
-	ParamFilterControllerKinds = "filterControllerKinds"
-	ParamFilterControllers     = "filterControllers"
-	ParamFilterPods            = "filterPods"
-	ParamFilterContainers      = "filterContainers"
-
-	ParamFilterDepartments  = "filterDepartments"
-	ParamFilterEnvironments = "filterEnvironments"
-	ParamFilterOwners       = "filterOwners"
-	ParamFilterProducts     = "filterProducts"
-	ParamFilterTeams        = "filterTeams"
-
-	ParamFilterAnnotations = "filterAnnotations"
-	ParamFilterLabels      = "filterLabels"
-	ParamFilterServices    = "filterServices"
-)
-
-var allocationFilterFieldMap = map[string]string{
-	kubecost.AllocationClusterProp:        ParamFilterClusters,
-	kubecost.FilterNode:                   ParamFilterNodes,
-	kubecost.AllocationNamespaceProp:      ParamFilterNamespaces,
-	kubecost.AllocationControllerKindProp: ParamFilterControllerKinds,
-	kubecost.AllocationControllerProp:     ParamFilterControllers,
-	kubecost.AllocationPodProp:            ParamFilterPods,
-	kubecost.AllocationContainerProp:      ParamFilterContainers,
-	kubecost.AllocationDepartmentProp:     ParamFilterDepartments,
-	kubecost.AllocationEnvironmentProp:    ParamFilterEnvironments,
-	kubecost.AllocationOwnerProp:          ParamFilterOwners,
-	kubecost.AllocationProductProp:        ParamFilterProducts,
-	kubecost.AllocationTeamProp:           ParamFilterTeams,
-	kubecost.AllocationAnnotationProp:     ParamFilterAnnotations,
-	kubecost.AllocationLabelProp:          ParamFilterLabels,
-	kubecost.AllocationServiceProp:        ParamFilterServices,
-}
-
-func GetAllocationFilterForTheAllocationProperty(allocationProp string) (string, error) {
-	if _, ok := allocationFilterFieldMap[allocationProp]; !ok {
-		return "", fmt.Errorf("unknown allocation property %s", allocationProp)
-	}
-	return allocationFilterFieldMap[allocationProp], nil
-}
-
-// AllHTTPParamKeys returns all HTTP GET parameters used for v1 filters. It is
-// intended to help validate HTTP queries in handlers to help avoid e.g.
-// spelling errors.
-func AllHTTPParamKeys() []string {
-	return []string{
-		ParamFilterClusters,
-		ParamFilterNodes,
-		ParamFilterNamespaces,
-		ParamFilterControllerKinds,
-		ParamFilterControllers,
-		ParamFilterPods,
-		ParamFilterContainers,
-
-		ParamFilterDepartments,
-		ParamFilterEnvironments,
-		ParamFilterOwners,
-		ParamFilterProducts,
-		ParamFilterTeams,
-
-		ParamFilterAnnotations,
-		ParamFilterLabels,
-		ParamFilterServices,
-	}
-}
-
-type FilterV1 struct {
-	Annotations     []string `json:"annotations,omitempty"`
-	Containers      []string `json:"containers,omitempty"`
-	Controllers     []string `json:"controllers,omitempty"`
-	ControllerKinds []string `json:"controllerKinds,omitempty"`
-	Clusters        []string `json:"clusters,omitempty"`
-	Departments     []string `json:"departments,omitempty"`
-	Environments    []string `json:"environments,omitempty"`
-	Labels          []string `json:"labels,omitempty"`
-	Namespaces      []string `json:"namespaces,omitempty"`
-	Nodes           []string `json:"nodes,omitempty"`
-	Owners          []string `json:"owners,omitempty"`
-	Pods            []string `json:"pods,omitempty"`
-	Products        []string `json:"products,omitempty"`
-	Services        []string `json:"services,omitempty"`
-	Teams           []string `json:"teams,omitempty"`
-}
-
-func (f FilterV1) Equals(that FilterV1) bool {
-	return reflect.DeepEqual(f.Annotations, that.Annotations) &&
-		reflect.DeepEqual(f.Containers, that.Containers) &&
-		reflect.DeepEqual(f.Controllers, that.Controllers) &&
-		reflect.DeepEqual(f.ControllerKinds, that.ControllerKinds) &&
-		reflect.DeepEqual(f.Clusters, that.Clusters) &&
-		reflect.DeepEqual(f.Departments, that.Departments) &&
-		reflect.DeepEqual(f.Environments, that.Environments) &&
-		reflect.DeepEqual(f.Labels, that.Labels) &&
-		reflect.DeepEqual(f.Namespaces, that.Namespaces) &&
-		reflect.DeepEqual(f.Nodes, that.Nodes) &&
-		reflect.DeepEqual(f.Owners, that.Owners) &&
-		reflect.DeepEqual(f.Pods, that.Pods) &&
-		reflect.DeepEqual(f.Products, that.Products) &&
-		reflect.DeepEqual(f.Services, that.Services) &&
-		reflect.DeepEqual(f.Teams, that.Teams)
-}
-
-// ============================================================================
-// This file contains:
-// Parsing (HTTP query params -> AllocationFilter) for V1 of filters
-//
-// e.g. "filterNamespaces=ku&filterControllers=deployment:kc"
-// ============================================================================
-
-// parseWildcardEnd checks if the given filter value is wildcarded, meaning
-// it ends in "*". If it does, it removes the suffix and returns the cleaned
-// string and true. Otherwise, it returns the same filter and false.
-//
-// parseWildcardEnd("kube*") = "kube", true
-// parseWildcardEnd("kube") = "kube", false
-func parseWildcardEnd(rawFilterValue string) (string, bool) {
-	return strings.TrimSuffix(rawFilterValue, "*"), strings.HasSuffix(rawFilterValue, "*")
-}
-
-// ParseAllocationFilterV1 takes a FilterV1 struct and
-// converts them to an AllocationFilter, which is a structured in-Go
-// representation of a set of filters.
-//
-// The HTTP query parameters are the "v1" filters attached to the Allocation
-// API: "filterNamespaces=", "filterNodes=", etc.
-//
-// It takes an optional LabelConfig, which if provided enables "label-mapped"
-// filters like "filterDepartments".
-//
-// It takes an optional ClusterMap, which if provided enables cluster name
-// filtering. This turns all `filterClusters=foo` arguments into the equivalent
-// of `clusterID = "foo" OR clusterName = "foo"`.
-func ParseAllocationFilterV1(filters FilterV1, labelConfig *kubecost.LabelConfig, clusterMap clusters.ClusterMap) kubecost.AllocationFilter {
-	filter := kubecost.AllocationFilterAnd{
-		Filters: []kubecost.AllocationFilter{},
-	}
-
-	// ClusterMap does not provide a cluster name -> cluster ID mapping in the
-	// interface, probably because there could be multiple IDs with the same
-	// name. However, V1 filter logic demands that the parameters to
-	// filterClusters= be checked against both cluster ID AND cluster name.
-	//
-	// To support expected filterClusters= behavior, we construct a mapping
-	// of cluster name -> cluster IDs (could be multiple IDs for the same name)
-	// so that we can create AllocationFilters that use only ClusterIDEquals.
-	//
-	//
-	// AllocationFilter intentionally does not support cluster name filters
-	// because those should be considered presentation-layer only.
-	clusterNameToIDs := map[string][]string{}
-	if clusterMap != nil {
-		cMap := clusterMap.AsMap()
-		for _, info := range cMap {
-			if info == nil {
-				continue
-			}
-
-			if _, ok := clusterNameToIDs[info.Name]; ok {
-				clusterNameToIDs[info.Name] = append(clusterNameToIDs[info.Name], info.ID)
-			} else {
-				clusterNameToIDs[info.Name] = []string{info.ID}
-			}
-		}
-	}
-
-	// The proliferation of > 0 guards in the function is to avoid constructing
-	// empty filter structs. While it is functionally equivalent to add empty
-	// filter structs (they evaluate to true always) there could be overhead
-	// when calling Matches() repeatedly for no purpose.
-
-	if len(filters.Clusters) > 0 {
-		clustersOr := kubecost.AllocationFilterOr{
-			Filters: []kubecost.AllocationFilter{},
-		}
-
-		if idFilters := filterV1SingleValueFromList(filters.Clusters, kubecost.FilterClusterID); len(idFilters.Filters) > 0 {
-			clustersOr.Filters = append(clustersOr.Filters, idFilters)
-		}
-		for _, rawFilterValue := range filters.Clusters {
-			clusterNameFilter, wildcard := parseWildcardEnd(rawFilterValue)
-
-			clusterIDsToFilter := []string{}
-			for clusterName := range clusterNameToIDs {
-				if wildcard && strings.HasPrefix(clusterName, clusterNameFilter) {
-					clusterIDsToFilter = append(clusterIDsToFilter, clusterNameToIDs[clusterName]...)
-				} else if !wildcard && clusterName == clusterNameFilter {
-					clusterIDsToFilter = append(clusterIDsToFilter, clusterNameToIDs[clusterName]...)
-				}
-			}
-
-			for _, clusterID := range clusterIDsToFilter {
-				clustersOr.Filters = append(clustersOr.Filters,
-					kubecost.AllocationFilterCondition{
-						Field: kubecost.FilterClusterID,
-						Op:    kubecost.FilterEquals,
-						Value: clusterID,
-					},
-				)
-			}
-		}
-		filter.Filters = append(filter.Filters, clustersOr)
-	}
-
-	if len(filters.Nodes) > 0 {
-		filter.Filters = append(filter.Filters, filterV1SingleValueFromList(filters.Nodes, kubecost.FilterNode))
-	}
-
-	if len(filters.Namespaces) > 0 {
-		filter.Filters = append(filter.Filters, filterV1SingleValueFromList(filters.Namespaces, kubecost.FilterNamespace))
-	}
-
-	if len(filters.ControllerKinds) > 0 {
-		filter.Filters = append(filter.Filters, filterV1SingleValueFromList(filters.ControllerKinds, kubecost.FilterControllerKind))
-	}
-
-	// filterControllers= accepts controllerkind:controllername filters, e.g.
-	// "deployment:kubecost-cost-analyzer"
-	//
-	// Thus, we have to make a custom OR filter for this condition.
-	if len(filters.Controllers) > 0 {
-		controllersOr := kubecost.AllocationFilterOr{
-			Filters: []kubecost.AllocationFilter{},
-		}
-		for _, rawFilterValue := range filters.Controllers {
-			split := strings.Split(rawFilterValue, ":")
-			if len(split) == 1 {
-				filterValue, wildcard := parseWildcardEnd(split[0])
-				subFilter := kubecost.AllocationFilterCondition{
-					Field: kubecost.FilterControllerName,
-					Op:    kubecost.FilterEquals,
-					Value: filterValue,
-				}
-
-				if wildcard {
-					subFilter.Op = kubecost.FilterStartsWith
-				}
-				controllersOr.Filters = append(controllersOr.Filters, subFilter)
-			} else if len(split) == 2 {
-				kindFilterVal := split[0]
-				nameFilterVal, wildcard := parseWildcardEnd(split[1])
-
-				kindFilter := kubecost.AllocationFilterCondition{
-					Field: kubecost.FilterControllerKind,
-					Op:    kubecost.FilterEquals,
-					Value: kindFilterVal,
-				}
-				nameFilter := kubecost.AllocationFilterCondition{
-					Field: kubecost.FilterControllerName,
-					Op:    kubecost.FilterEquals,
-					Value: nameFilterVal,
-				}
-
-				if wildcard {
-					nameFilter.Op = kubecost.FilterStartsWith
-				}
-
-				// The controller name AND the controller kind must match
-				multiFilter := kubecost.AllocationFilterAnd{
-					Filters: []kubecost.AllocationFilter{kindFilter, nameFilter},
-				}
-				controllersOr.Filters = append(controllersOr.Filters, multiFilter)
-			} else {
-				log.Warnf("illegal filter for controller: %s", rawFilterValue)
-			}
-		}
-		if len(controllersOr.Filters) > 0 {
-			filter.Filters = append(filter.Filters, controllersOr)
-		}
-	}
-
-	if len(filters.Pods) > 0 {
-		filter.Filters = append(filter.Filters, filterV1SingleValueFromList(filters.Pods, kubecost.FilterPod))
-	}
-
-	if len(filters.Containers) > 0 {
-		filter.Filters = append(filter.Filters, filterV1SingleValueFromList(filters.Containers, kubecost.FilterContainer))
-	}
-
-	// Label-mapped queries require a label config to be present.
-	if labelConfig != nil {
-		if len(filters.Departments) > 0 {
-			filter.Filters = append(filter.Filters, filterV1LabelAliasMappedFromList(filters.Departments, labelConfig.DepartmentLabel))
-		}
-		if len(filters.Environments) > 0 {
-			filter.Filters = append(filter.Filters, filterV1LabelAliasMappedFromList(filters.Environments, labelConfig.EnvironmentLabel))
-		}
-		if len(filters.Owners) > 0 {
-			filter.Filters = append(filter.Filters, filterV1LabelAliasMappedFromList(filters.Owners, labelConfig.OwnerLabel))
-		}
-		if len(filters.Products) > 0 {
-			filter.Filters = append(filter.Filters, filterV1LabelAliasMappedFromList(filters.Products, labelConfig.ProductLabel))
-		}
-		if len(filters.Teams) > 0 {
-			filter.Filters = append(filter.Filters, filterV1LabelAliasMappedFromList(filters.Teams, labelConfig.TeamLabel))
-		}
-	} else {
-		log.Debugf("No label config is available. Not creating filters for label-mapped 'fields'.")
-	}
-
-	if len(filters.Annotations) > 0 {
-		filter.Filters = append(filter.Filters, filterV1DoubleValueFromList(filters.Annotations, kubecost.FilterAnnotation))
-	}
-
-	if len(filters.Labels) > 0 {
-		filter.Filters = append(filter.Filters, filterV1DoubleValueFromList(filters.Labels, kubecost.FilterLabel))
-	}
-
-	if len(filters.Services) > 0 {
-		// filterServices= is the only filter that uses the "contains" operator.
-		servicesFilter := kubecost.AllocationFilterOr{
-			Filters: []kubecost.AllocationFilter{},
-		}
-		for _, filterValue := range filters.Services {
-			// TODO: wildcard support
-			filterValue, wildcard := parseWildcardEnd(filterValue)
-			subFilter := kubecost.AllocationFilterCondition{
-				Field: kubecost.FilterServices,
-				Op:    kubecost.FilterContains,
-				Value: filterValue,
-			}
-			if wildcard {
-				subFilter.Op = kubecost.FilterContainsPrefix
-			}
-			servicesFilter.Filters = append(servicesFilter.Filters, subFilter)
-		}
-		filter.Filters = append(filter.Filters, servicesFilter)
-	}
-
-	return filter
-}
-
-// AllocationFilterFromParamsV1 takes a set of HTTP query parameters and
-// converts them to an AllocationFilter, which is a structured in-Go
-// representation of a set of filters.
-//
-// The HTTP query parameters are the "v1" filters attached to the Allocation
-// API: "filterNamespaces=", "filterNodes=", etc.
-//
-// It takes an optional LabelConfig, which if provided enables "label-mapped"
-// filters like "filterDepartments".
-//
-// It takes an optional ClusterMap, which if provided enables cluster name
-// filtering. This turns all `filterClusters=foo` arguments into the equivalent
-// of `clusterID = "foo" OR clusterName = "foo"`.
-func AllocationFilterFromParamsV1(
-	qp mapper.PrimitiveMapReader,
-	labelConfig *kubecost.LabelConfig,
-	clusterMap clusters.ClusterMap,
-) kubecost.AllocationFilter {
-	filter := ConvertFilterQueryParams(qp, labelConfig)
-	return ParseAllocationFilterV1(filter, labelConfig, clusterMap)
-}
-
-// filterV1SingleValueFromList creates an OR of equality filters for a given
-// filter field.
-//
-// The v1 query language (e.g. "filterNamespaces=XYZ,ABC") uses OR within
-// a field (e.g. namespace = XYZ OR namespace = ABC)
-func filterV1SingleValueFromList(rawFilterValues []string, filterField kubecost.FilterField) kubecost.AllocationFilterOr {
-	filter := kubecost.AllocationFilterOr{
-		Filters: []kubecost.AllocationFilter{},
-	}
-
-	for _, filterValue := range rawFilterValues {
-		filterValue = strings.TrimSpace(filterValue)
-		filterValue, wildcard := parseWildcardEnd(filterValue)
-
-		subFilter := kubecost.AllocationFilterCondition{
-			Field: filterField,
-			// All v1 filters are equality comparisons
-			Op:    kubecost.FilterEquals,
-			Value: filterValue,
-		}
-
-		if wildcard {
-			subFilter.Op = kubecost.FilterStartsWith
-		}
-
-		filter.Filters = append(filter.Filters, subFilter)
-	}
-
-	return filter
-}
-
-func ConvertFilterQueryParams(qp mapper.PrimitiveMapReader, labelConfig *kubecost.LabelConfig) FilterV1 {
-	filter := FilterV1{
-		Annotations:     qp.GetList(ParamFilterAnnotations, ","),
-		Containers:      qp.GetList(ParamFilterContainers, ","),
-		Controllers:     qp.GetList(ParamFilterControllers, ","),
-		ControllerKinds: qp.GetList(ParamFilterControllerKinds, ","),
-		Clusters:        qp.GetList(ParamFilterClusters, ","),
-		Labels:          qp.GetList(ParamFilterLabels, ","),
-		Namespaces:      qp.GetList(ParamFilterNamespaces, ","),
-		Nodes:           qp.GetList(ParamFilterNodes, ","),
-		Pods:            qp.GetList(ParamFilterPods, ","),
-		Services:        qp.GetList(ParamFilterServices, ","),
-	}
-
-	if labelConfig != nil {
-		filter.Departments = qp.GetList(ParamFilterDepartments, ",")
-		filter.Environments = qp.GetList(ParamFilterEnvironments, ",")
-		filter.Owners = qp.GetList(ParamFilterOwners, ",")
-		filter.Products = qp.GetList(ParamFilterProducts, ",")
-		filter.Teams = qp.GetList(ParamFilterTeams, ",")
-	} else {
-		log.Debugf("No label config is available. Not creating filters for label-mapped 'fields'.")
-	}
-
-	return filter
-}
-
-// filterV1LabelAliasMappedFromList is like filterV1SingleValueFromList but is
-// explicitly for labels and annotations because "label-mapped" filters (like filterTeams=)
-// are actually label filters with a fixed label key.
-func filterV1LabelAliasMappedFromList(rawFilterValues []string, labelName string) kubecost.AllocationFilterOr {
-	filter := kubecost.AllocationFilterOr{
-		Filters: []kubecost.AllocationFilter{},
-	}
-	labelName = prom.SanitizeLabelName(labelName)
-
-	for _, filterValue := range rawFilterValues {
-		filterValue = strings.TrimSpace(filterValue)
-		filterValue, wildcard := parseWildcardEnd(filterValue)
-
-		subFilter := kubecost.AllocationFilterCondition{
-			Field: kubecost.FilterAlias,
-			// All v1 filters are equality comparisons
-			Op:    kubecost.FilterEquals,
-			Key:   labelName,
-			Value: filterValue,
-		}
-
-		if wildcard {
-			subFilter.Op = kubecost.FilterStartsWith
-		}
-
-		filter.Filters = append(filter.Filters, subFilter)
-	}
-
-	return filter
-}
-
-// filterV1DoubleValueFromList creates an OR of key:value equality filters for
-// colon-split filter values.
-//
-// The v1 query language (e.g. "filterLabels=app:foo,l2:bar") uses OR within
-// a field (e.g. label[app] = foo OR label[l2] = bar)
-func filterV1DoubleValueFromList(rawFilterValuesUnsplit []string, filterField kubecost.FilterField) kubecost.AllocationFilterOr {
-	filter := kubecost.AllocationFilterOr{
-		Filters: []kubecost.AllocationFilter{},
-	}
-
-	for _, unsplit := range rawFilterValuesUnsplit {
-		if unsplit != "" {
-			split := strings.Split(unsplit, ":")
-			if len(split) != 2 {
-				log.Warnf("illegal key/value filter (ignoring): %s", unsplit)
-				continue
-			}
-			labelName := prom.SanitizeLabelName(strings.TrimSpace(split[0]))
-			val := strings.TrimSpace(split[1])
-			val, wildcard := parseWildcardEnd(val)
-
-			subFilter := kubecost.AllocationFilterCondition{
-				Field: filterField,
-				// All v1 filters are equality comparisons
-				Op:    kubecost.FilterEquals,
-				Key:   labelName,
-				Value: val,
-			}
-
-			if wildcard {
-				subFilter.Op = kubecost.FilterStartsWith
-			}
-
-			filter.Filters = append(filter.Filters, subFilter)
-		}
-	}
-
-	return filter
-}

+ 0 - 340
pkg/util/allocationfilterutil/v2/parser.go

@@ -1,340 +0,0 @@
-// allocationfilterutil provides functionality for parsing V2 of the Kubecost
-// filter language for Allocation types.
-//
-// e.g. "filter=namespace:kubecost+controllerkind:deployment"
-package allocationfilterutil
-
-import (
-	"fmt"
-
-	"github.com/hashicorp/go-multierror"
-	"github.com/opencost/opencost/pkg/kubecost"
-)
-
-// ParseAllocationFilter converts a string of the V2 Allocation Filter language
-// into a kubecost.AllocationFilter.
-//
-// Example queries:
-//
-//	namespace:"kubecost"
-//	label[app]:"cost-analyzer"
-//	node!:"node1","node2"
-//	cluster:"cluster-one"+namespace!:"kube-system"
-//
-// The grammar is approximately as follows:
-//
-// Original design doc [1] contains first grammar. This is a slight modification
-// of that grammar to help guide the implementation of the parser.
-//
-// [1] https://docs.google.com/document/d/1HKkp2bv3mnvfQoBZlpHjfZwQ0FzDLOHKpnwV9gQ_KgU/edit?pli=1
-//
-// <filter> ::= <comparison> ('+' <comparison>)*
-//
-//	NOTE: Language can be extended to support ORs between
-//	comparisons by adding a '|' operator in between comparisons,
-//	though precedence will have to be carefully defined and it may
-//	require adding support for ()-enclosed statements to deal with
-//	precedence.
-//	This would allow for queries like:
-//	  namespace:"x"|label[app]="foo"
-//
-// <comparison> ::= <filter-key> <filter-op> <filter-value>
-//
-// <filter-key> ::= <filter-field-2> <keyed-access>
-//
-//	| <filter-field-1>
-//
-// <filter-op> ::= ':' | '!:'
-//
-// <filter-value> ::= '"' <identifier> '"' (',' <filter-value>)*
-//
-// <filter-field-2> ::= 'label' | 'annotation'
-//
-// <filter-field-1> ::= 'cluster' | 'node' | 'namespace'
-//
-//	| 'controllerName' | 'controllerKind'
-//	| 'container' | 'pod' | 'services'
-//
-// <keyed-access> ::= '[' <identifier> ']'
-//
-// <identifier> ::= --- valid K8s name or Prom-sanitized K8s name
-func ParseAllocationFilter(filter string) (kubecost.AllocationFilter, error) {
-	tokens, err := lexAllocationFilterV2(filter)
-	if err != nil {
-		return nil, fmt.Errorf("lexing filter: %s", err)
-	}
-
-	p := parser{tokens: tokens}
-
-	parsedFilter, err := p.filter()
-	if err != nil {
-		return nil, fmt.Errorf("parsing filter: %s", err)
-	}
-
-	return parsedFilter, nil
-}
-
-// ============================================================================
-// Parser
-//
-// Based on the Parser class in Chapter 6: Parsing Expressions of Crafting
-// Interpreters by Robert Nystrom
-// ============================================================================
-
-// parseError produces error messages tailored to the needs of the parser
-func parseError(t token, message string) error {
-	if t.kind == eof {
-		return fmt.Errorf("at end: %s", message)
-	}
-
-	return fmt.Errorf("at '%s': %s", t.s, message)
-}
-
-type parser struct {
-	tokens  []token
-	current int
-}
-
-// ----------------------------------------------------------------------------
-// Parser helper methods for token handling
-// ----------------------------------------------------------------------------
-
-func (p *parser) atEnd() bool {
-	return p.peek().kind == eof
-}
-
-func (p *parser) advance() token {
-	if !p.atEnd() {
-		p.current += 1
-	}
-
-	return p.previous()
-}
-
-func (p *parser) previous() token {
-	return p.tokens[p.current-1]
-}
-
-// match return true and advances the parser by one token if the next token has
-// a kind that matches one of the arguments. Otherwise, it returns false and
-// DOES NOT advance the parser.
-func (p *parser) match(tokenKinds ...tokenKind) bool {
-	for _, kind := range tokenKinds {
-		if p.check(kind) {
-			p.advance()
-			return true
-		}
-	}
-	return false
-}
-
-// check returns true iff the next token matches the provided kind.
-func (p *parser) check(tk tokenKind) bool {
-	if p.atEnd() {
-		return false
-	}
-	return p.peek().kind == tk
-}
-
-func (p *parser) peek() token {
-	return p.tokens[p.current]
-}
-
-// consume is a "next token must be this kind" method. If the next token is of
-// the correct kind, the parser is advanced and that token is returned. If it
-// is not of the correct kind, a parse error is returned and the parser is NOT
-// advanced.
-func (p *parser) consume(tk tokenKind, message string) (token, error) {
-	if p.check(tk) {
-		return p.advance(), nil
-	}
-
-	return token{}, parseError(p.peek(), message)
-}
-
-// synchronize attempts to skip forward until the next '+', indicating the
-// start of a new <comparison>. This lets us do best-effort reporting of
-// multiple parse errors.
-func (p *parser) synchronize() {
-	p.advance()
-	for !p.atEnd() {
-		if p.previous().kind == plus {
-			return
-		}
-
-		p.advance()
-	}
-}
-
-// ----------------------------------------------------------------------------
-// Parser grammar rules as recursive descent methods
-// ----------------------------------------------------------------------------
-
-// filter is the main method of the parser. It turns the token stream into an
-// AllocationFilter, reporting parse errors that occurred along the way.
-func (p *parser) filter() (kubecost.AllocationFilter, error) {
-	var errs *multierror.Error
-
-	// Currently, a filter is only a sequence of AND operations
-	f := kubecost.AllocationFilterAnd{}
-	comparison, err := p.comparison()
-	if err != nil {
-		errs = multierror.Append(errs, err)
-		p.synchronize()
-	} else {
-		f.Filters = append(f.Filters, comparison)
-	}
-	for p.match(plus) {
-		right, err := p.comparison()
-		if err != nil {
-			errs = multierror.Append(errs, err)
-			p.synchronize()
-		} else {
-			f.Filters = append(f.Filters, right)
-		}
-	}
-
-	return f, errs.ErrorOrNil()
-}
-
-func (p *parser) comparison() (kubecost.AllocationFilter, error) {
-	field, key, err := p.filterKey()
-	if err != nil {
-		return nil, err
-	}
-
-	opToken, err := p.filterOp()
-	if err != nil {
-		return nil, err
-	}
-
-	var op kubecost.FilterOp
-
-	switch field {
-	case "services":
-		switch opToken.kind {
-		case colon:
-			op = kubecost.FilterContains
-		case bangColon:
-			op = kubecost.FilterNotContains
-		default:
-			return nil, parseError(opToken, "implementation problem: unhandled op token for services filter")
-		}
-	default:
-		switch opToken.kind {
-		case colon:
-			op = kubecost.FilterEquals
-		case bangColon:
-			op = kubecost.FilterNotEquals
-		default:
-			return nil, parseError(opToken, "implementation problem: unhandled op token")
-		}
-
-	}
-
-	values, err := p.filterValues()
-	if err != nil {
-		return nil, err
-	}
-
-	switch opToken.kind {
-	// In the != case, a sequence of filter values is ANDed
-	// Example:
-	// namespace!:"foo","bar" -> (and (notequals namespace foo)
-	//                                (notequals namespace bar))
-	case bangColon:
-		baseFilter := kubecost.AllocationFilterAnd{}
-
-		for _, v := range values {
-			baseFilter.Filters = append(baseFilter.Filters, kubecost.AllocationFilterCondition{
-				Field: field,
-				Key:   key,
-				Op:    op,
-				Value: v,
-			})
-		}
-
-		return baseFilter, nil
-	default:
-		baseFilter := kubecost.AllocationFilterOr{}
-
-		for _, v := range values {
-			baseFilter.Filters = append(baseFilter.Filters, kubecost.AllocationFilterCondition{
-				Field: field,
-				Key:   key,
-				Op:    op,
-				Value: v,
-			})
-		}
-
-		return baseFilter, nil
-	}
-
-}
-
-// filterKey parses a series of tokens that represent a "filter key", returning
-// an error if a filter key cannot be constructed.
-//
-// Examples:
-// tokens = [filterField2:label keyedAccess:app] -> FilterLabel, app, nil
-// tokens = [filterField1:namespace] -> FilterNamespace, "", nil
-func (p *parser) filterKey() (field kubecost.FilterField, key string, err error) {
-
-	if p.match(filterField2) {
-		rawField := p.previous().s
-		mappedField, ok := ff2ToKCFilterField[rawField]
-		if !ok {
-			return "", "", parseError(p.previous(), "expect key-mapped filter field, like 'label' or 'annotation'")
-		}
-
-		_, err := p.consume(keyedAccess, "expect keyed access like '[app]' after a mapped field")
-		if err != nil {
-			return "", "", err
-		}
-
-		key = p.previous().s
-		return mappedField, key, nil
-	}
-
-	_, err = p.consume(filterField1, "expect filter field")
-	if err != nil {
-		return "", "", err
-	}
-
-	rawField := p.previous().s
-	mappedField, ok := ff1ToKCFilterField[rawField]
-	if !ok {
-		return "", "", parseError(p.previous(), "expect known filter field, like 'cluster' or 'namespace'")
-	}
-
-	return mappedField, "", nil
-}
-
-func (p *parser) filterOp() (token, error) {
-	if p.match(bangColon, colon) {
-		return p.previous(), nil
-	}
-
-	return token{}, parseError(p.peek(), "expect filter op like ':' or '!:'")
-}
-
-func (p *parser) filterValues() ([]string, error) {
-	vals := []string{}
-
-	_, err := p.consume(str, "expect string as filter value")
-	if err != nil {
-		return nil, err
-	}
-	vals = append(vals, p.previous().s)
-
-	for p.match(comma) {
-		_, err := p.consume(str, "expect string as filter value")
-		if err != nil {
-			return nil, err
-		}
-
-		vals = append(vals, p.previous().s)
-	}
-
-	return vals, nil
-}

+ 0 - 545
pkg/util/allocationfilterutil/v2/parser_test.go

@@ -1,545 +0,0 @@
-package allocationfilterutil
-
-import (
-	"fmt"
-	"reflect"
-	"testing"
-
-	"github.com/opencost/opencost/pkg/kubecost"
-)
-
-func allocGenerator(props kubecost.AllocationProperties) kubecost.Allocation {
-	a := kubecost.Allocation{
-		Properties: &props,
-	}
-
-	a.Name = a.Properties.String()
-	return a
-}
-
-func TestParse(t *testing.T) {
-	cases := []struct {
-		input          string
-		expected       kubecost.AllocationFilter
-		shouldMatch    []kubecost.Allocation
-		shouldNotMatch []kubecost.Allocation
-	}{
-		{
-			input: `namespace:"kubecost"`,
-			expected: kubecost.AllocationFilterAnd{[]kubecost.AllocationFilter{
-				kubecost.AllocationFilterOr{[]kubecost.AllocationFilter{
-					kubecost.AllocationFilterCondition{
-						Field: kubecost.FilterNamespace,
-						Op:    kubecost.FilterEquals,
-						Value: "kubecost",
-					},
-				}},
-			}},
-			shouldMatch: []kubecost.Allocation{
-				allocGenerator(kubecost.AllocationProperties{Namespace: "kubecost"}),
-			},
-			shouldNotMatch: []kubecost.Allocation{
-				allocGenerator(kubecost.AllocationProperties{Namespace: "kube-system"}),
-			},
-		},
-		{
-			input: `cluster:"cluster-one"+namespace:"kubecost"+controllerKind:"daemonset"+controllerName:"kubecost-network-costs"+container:"kubecost-network-costs"`,
-			expected: kubecost.AllocationFilterAnd{[]kubecost.AllocationFilter{
-				kubecost.AllocationFilterOr{[]kubecost.AllocationFilter{
-					kubecost.AllocationFilterCondition{
-						Field: kubecost.FilterClusterID,
-						Op:    kubecost.FilterEquals,
-						Value: "cluster-one",
-					},
-				}},
-				kubecost.AllocationFilterOr{[]kubecost.AllocationFilter{
-					kubecost.AllocationFilterCondition{
-						Field: kubecost.FilterNamespace,
-						Op:    kubecost.FilterEquals,
-						Value: "kubecost",
-					},
-				}},
-				kubecost.AllocationFilterOr{[]kubecost.AllocationFilter{
-					kubecost.AllocationFilterCondition{
-						Field: kubecost.FilterControllerKind,
-						Op:    kubecost.FilterEquals,
-						Value: "daemonset",
-					},
-				}},
-				kubecost.AllocationFilterOr{[]kubecost.AllocationFilter{
-					kubecost.AllocationFilterCondition{
-						Field: kubecost.FilterControllerName,
-						Op:    kubecost.FilterEquals,
-						Value: "kubecost-network-costs",
-					},
-				}},
-				kubecost.AllocationFilterOr{[]kubecost.AllocationFilter{
-					kubecost.AllocationFilterCondition{
-						Field: kubecost.FilterContainer,
-						Op:    kubecost.FilterEquals,
-						Value: "kubecost-network-costs",
-					},
-				}},
-			}},
-			shouldMatch: []kubecost.Allocation{
-				allocGenerator(kubecost.AllocationProperties{
-					Cluster:        "cluster-one",
-					Namespace:      "kubecost",
-					ControllerKind: "daemonset",
-					Controller:     "kubecost-network-costs",
-					Pod:            "kubecost-network-costs-abc123",
-					Container:      "kubecost-network-costs",
-				}),
-			},
-			shouldNotMatch: []kubecost.Allocation{
-				allocGenerator(kubecost.AllocationProperties{
-					Cluster:        "cluster-one",
-					Namespace:      "default",
-					ControllerKind: "deployment",
-					Controller:     "workload-abc",
-					Pod:            "workload-abc-123abc",
-					Container:      "abc",
-				}),
-			},
-		},
-		{
-			input: `namespace!:"kubecost","kube-system"`,
-			expected: kubecost.AllocationFilterAnd{[]kubecost.AllocationFilter{
-				kubecost.AllocationFilterAnd{[]kubecost.AllocationFilter{
-					kubecost.AllocationFilterCondition{
-						Field: kubecost.FilterNamespace,
-						Op:    kubecost.FilterNotEquals,
-						Value: "kubecost",
-					},
-					kubecost.AllocationFilterCondition{
-						Field: kubecost.FilterNamespace,
-						Op:    kubecost.FilterNotEquals,
-						Value: "kube-system",
-					},
-				}},
-			}},
-			shouldMatch: []kubecost.Allocation{
-				allocGenerator(kubecost.AllocationProperties{Namespace: "abc"}),
-			},
-			shouldNotMatch: []kubecost.Allocation{
-				allocGenerator(kubecost.AllocationProperties{Namespace: "kubecost"}),
-				allocGenerator(kubecost.AllocationProperties{Namespace: "kube-system"}),
-			},
-		},
-		{
-			input: `namespace:"kubecost","kube-system"`,
-			expected: kubecost.AllocationFilterAnd{[]kubecost.AllocationFilter{
-				kubecost.AllocationFilterOr{[]kubecost.AllocationFilter{
-					kubecost.AllocationFilterCondition{
-						Field: kubecost.FilterNamespace,
-						Op:    kubecost.FilterEquals,
-						Value: "kubecost",
-					},
-					kubecost.AllocationFilterCondition{
-						Field: kubecost.FilterNamespace,
-						Op:    kubecost.FilterEquals,
-						Value: "kube-system",
-					},
-				}},
-			}},
-			shouldMatch: []kubecost.Allocation{
-				allocGenerator(kubecost.AllocationProperties{Namespace: "kubecost"}),
-				allocGenerator(kubecost.AllocationProperties{Namespace: "kube-system"}),
-			},
-			shouldNotMatch: []kubecost.Allocation{
-				allocGenerator(kubecost.AllocationProperties{Namespace: "abc"}),
-			},
-		},
-		{
-			input: `node:"node a b c" , "node 12 3"` + string('\n') + "+" + string('\n') + string('\r') + `namespace : "kubecost"`,
-			expected: kubecost.AllocationFilterAnd{[]kubecost.AllocationFilter{
-				kubecost.AllocationFilterOr{[]kubecost.AllocationFilter{
-					kubecost.AllocationFilterCondition{
-						Field: kubecost.FilterNode,
-						Op:    kubecost.FilterEquals,
-						Value: "node a b c",
-					},
-					kubecost.AllocationFilterCondition{
-						Field: kubecost.FilterNode,
-						Op:    kubecost.FilterEquals,
-						Value: "node 12 3",
-					},
-				}},
-				kubecost.AllocationFilterOr{[]kubecost.AllocationFilter{
-					kubecost.AllocationFilterCondition{
-						Field: kubecost.FilterNamespace,
-						Op:    kubecost.FilterEquals,
-						Value: "kubecost",
-					},
-				}},
-			}},
-		},
-		{
-			input: `label[app_abc]:"cost_analyzer"`,
-			expected: kubecost.AllocationFilterAnd{[]kubecost.AllocationFilter{
-				kubecost.AllocationFilterOr{[]kubecost.AllocationFilter{
-					kubecost.AllocationFilterCondition{
-						Field: kubecost.FilterLabel,
-						Key:   "app_abc",
-						Op:    kubecost.FilterEquals,
-						Value: "cost_analyzer",
-					},
-				}},
-			}},
-		},
-		{
-			input: `services:"123","abc"`,
-			expected: kubecost.AllocationFilterAnd{[]kubecost.AllocationFilter{
-				kubecost.AllocationFilterOr{[]kubecost.AllocationFilter{
-					kubecost.AllocationFilterCondition{
-						Field: kubecost.FilterServices,
-						Op:    kubecost.FilterContains,
-						Value: "123",
-					},
-					kubecost.AllocationFilterCondition{
-						Field: kubecost.FilterServices,
-						Op:    kubecost.FilterContains,
-						Value: "abc",
-					},
-				}},
-			}},
-		},
-		{
-			input: `services!:"123","abc"`,
-			expected: kubecost.AllocationFilterAnd{[]kubecost.AllocationFilter{
-				kubecost.AllocationFilterAnd{[]kubecost.AllocationFilter{
-					kubecost.AllocationFilterCondition{
-						Field: kubecost.FilterServices,
-						Op:    kubecost.FilterNotContains,
-						Value: "123",
-					},
-					kubecost.AllocationFilterCondition{
-						Field: kubecost.FilterServices,
-						Op:    kubecost.FilterNotContains,
-						Value: "abc",
-					},
-				}},
-			}},
-		},
-		{
-			input: `label[app_abc]:"cost_analyzer"`,
-			expected: kubecost.AllocationFilterAnd{[]kubecost.AllocationFilter{
-				kubecost.AllocationFilterOr{[]kubecost.AllocationFilter{
-					kubecost.AllocationFilterCondition{
-						Field: kubecost.FilterLabel,
-						Key:   "app_abc",
-						Op:    kubecost.FilterEquals,
-						Value: "cost_analyzer",
-					},
-				}},
-			}},
-		},
-		{
-			input: `label[app_abc]:"cost_analyzer"+label[foo]:"bar"`,
-			expected: kubecost.AllocationFilterAnd{[]kubecost.AllocationFilter{
-				kubecost.AllocationFilterOr{[]kubecost.AllocationFilter{
-					kubecost.AllocationFilterCondition{
-						Field: kubecost.FilterLabel,
-						Key:   "app_abc",
-						Op:    kubecost.FilterEquals,
-						Value: "cost_analyzer",
-					},
-				}},
-				kubecost.AllocationFilterOr{[]kubecost.AllocationFilter{
-					kubecost.AllocationFilterCondition{
-						Field: kubecost.FilterLabel,
-						Key:   "foo",
-						Op:    kubecost.FilterEquals,
-						Value: "bar",
-					},
-				}},
-			}},
-		},
-		{
-			input: `
-namespace:"kubecost" +
-label[app]:"cost_analyzer" +
-annotation[a1]:"b2" +
-cluster:"cluster-one" +
-node!:
-  "node-123",
-  "node-456" +
-controllerName:
-  "kubecost-cost-analyzer",
-  "kubecost-prometheus-server" +
-controllerKind!:
-  "daemonset",
-  "statefulset",
-  "job" +
-container!:"123-abc_foo" +
-pod!:"aaaaaaaaaaaaaaaaaaaaaaaaa" +
-services!:"abc123"
-`,
-			expected: kubecost.AllocationFilterAnd{[]kubecost.AllocationFilter{
-				kubecost.AllocationFilterOr{[]kubecost.AllocationFilter{
-					kubecost.AllocationFilterCondition{
-						Field: kubecost.FilterNamespace,
-						Op:    kubecost.FilterEquals,
-						Value: "kubecost",
-					},
-				}},
-				kubecost.AllocationFilterOr{[]kubecost.AllocationFilter{
-					kubecost.AllocationFilterCondition{
-						Field: kubecost.FilterLabel,
-						Key:   "app",
-						Op:    kubecost.FilterEquals,
-						Value: "cost_analyzer",
-					},
-				}},
-				kubecost.AllocationFilterOr{[]kubecost.AllocationFilter{
-					kubecost.AllocationFilterCondition{
-						Field: kubecost.FilterAnnotation,
-						Key:   "a1",
-						Op:    kubecost.FilterEquals,
-						Value: "b2",
-					},
-				}},
-				kubecost.AllocationFilterOr{[]kubecost.AllocationFilter{
-					kubecost.AllocationFilterCondition{
-						Field: kubecost.FilterClusterID,
-						Op:    kubecost.FilterEquals,
-						Value: "cluster-one",
-					},
-				}},
-				kubecost.AllocationFilterAnd{[]kubecost.AllocationFilter{
-					kubecost.AllocationFilterCondition{
-						Field: kubecost.FilterNode,
-						Op:    kubecost.FilterNotEquals,
-						Value: "node-123",
-					},
-					kubecost.AllocationFilterCondition{
-						Field: kubecost.FilterNode,
-						Op:    kubecost.FilterNotEquals,
-						Value: "node-456",
-					},
-				}},
-				kubecost.AllocationFilterOr{[]kubecost.AllocationFilter{
-					kubecost.AllocationFilterCondition{
-						Field: kubecost.FilterControllerName,
-						Op:    kubecost.FilterEquals,
-						Value: "kubecost-cost-analyzer",
-					},
-					kubecost.AllocationFilterCondition{
-						Field: kubecost.FilterControllerName,
-						Op:    kubecost.FilterEquals,
-						Value: "kubecost-prometheus-server",
-					},
-				}},
-				kubecost.AllocationFilterAnd{[]kubecost.AllocationFilter{
-					kubecost.AllocationFilterCondition{
-						Field: kubecost.FilterControllerKind,
-						Op:    kubecost.FilterNotEquals,
-						Value: "daemonset",
-					},
-					kubecost.AllocationFilterCondition{
-						Field: kubecost.FilterControllerKind,
-						Op:    kubecost.FilterNotEquals,
-						Value: "statefulset",
-					},
-					kubecost.AllocationFilterCondition{
-						Field: kubecost.FilterControllerKind,
-						Op:    kubecost.FilterNotEquals,
-						Value: "job",
-					},
-				}},
-				kubecost.AllocationFilterAnd{[]kubecost.AllocationFilter{
-					kubecost.AllocationFilterCondition{
-						Field: kubecost.FilterContainer,
-						Op:    kubecost.FilterNotEquals,
-						Value: "123-abc_foo",
-					},
-				}},
-				kubecost.AllocationFilterAnd{[]kubecost.AllocationFilter{
-					kubecost.AllocationFilterCondition{
-						Field: kubecost.FilterPod,
-						Op:    kubecost.FilterNotEquals,
-						Value: "aaaaaaaaaaaaaaaaaaaaaaaaa",
-					},
-				}},
-				kubecost.AllocationFilterAnd{[]kubecost.AllocationFilter{
-					kubecost.AllocationFilterCondition{
-						Field: kubecost.FilterServices,
-						Op:    kubecost.FilterNotContains,
-						Value: "abc123",
-					},
-				}},
-			}},
-		},
-		{
-			input: `namespace:"__unallocated__"`,
-			expected: kubecost.AllocationFilterAnd{[]kubecost.AllocationFilter{
-				kubecost.AllocationFilterOr{[]kubecost.AllocationFilter{
-					kubecost.AllocationFilterCondition{
-						Field: kubecost.FilterNamespace,
-						Op:    kubecost.FilterEquals,
-						Value: kubecost.UnallocatedSuffix,
-					},
-				}},
-			}},
-			shouldMatch: []kubecost.Allocation{
-				allocGenerator(kubecost.AllocationProperties{Namespace: ""}),
-			},
-			shouldNotMatch: []kubecost.Allocation{
-				allocGenerator(kubecost.AllocationProperties{Namespace: "kube-system"}),
-			},
-		},
-		{
-			input: `namespace!:"__unallocated__"`,
-			expected: kubecost.AllocationFilterAnd{[]kubecost.AllocationFilter{
-				kubecost.AllocationFilterAnd{[]kubecost.AllocationFilter{
-					kubecost.AllocationFilterCondition{
-						Field: kubecost.FilterNamespace,
-						Op:    kubecost.FilterNotEquals,
-						Value: kubecost.UnallocatedSuffix,
-					},
-				}},
-			}},
-			shouldMatch: []kubecost.Allocation{
-				allocGenerator(kubecost.AllocationProperties{Namespace: "kubecost"}),
-			},
-			shouldNotMatch: []kubecost.Allocation{
-				allocGenerator(kubecost.AllocationProperties{Namespace: ""}),
-			},
-		},
-		{
-			input: `controllerKind:"__unallocated__"`,
-			expected: kubecost.AllocationFilterAnd{[]kubecost.AllocationFilter{
-				kubecost.AllocationFilterOr{[]kubecost.AllocationFilter{
-					kubecost.AllocationFilterCondition{
-						Field: kubecost.FilterControllerKind,
-						Op:    kubecost.FilterEquals,
-						Value: kubecost.UnallocatedSuffix,
-					},
-				}},
-			}},
-			shouldMatch: []kubecost.Allocation{
-				allocGenerator(kubecost.AllocationProperties{ControllerKind: ""}),
-			},
-			shouldNotMatch: []kubecost.Allocation{
-				allocGenerator(kubecost.AllocationProperties{ControllerKind: "deployment"}),
-			},
-		},
-		{
-			input: `controllerKind!:"__unallocated__"`,
-			expected: kubecost.AllocationFilterAnd{[]kubecost.AllocationFilter{
-				kubecost.AllocationFilterAnd{[]kubecost.AllocationFilter{
-					kubecost.AllocationFilterCondition{
-						Field: kubecost.FilterControllerKind,
-						Op:    kubecost.FilterNotEquals,
-						Value: kubecost.UnallocatedSuffix,
-					},
-				}},
-			}},
-			shouldMatch: []kubecost.Allocation{
-				allocGenerator(kubecost.AllocationProperties{ControllerKind: "deployment"}),
-			},
-			shouldNotMatch: []kubecost.Allocation{
-				allocGenerator(kubecost.AllocationProperties{ControllerKind: ""}),
-			},
-		},
-		{
-			input: `label[app]:"__unallocated__"`,
-			expected: kubecost.AllocationFilterAnd{[]kubecost.AllocationFilter{
-				kubecost.AllocationFilterOr{[]kubecost.AllocationFilter{
-					kubecost.AllocationFilterCondition{
-						Field: kubecost.FilterLabel,
-						Key:   "app",
-						Op:    kubecost.FilterEquals,
-						Value: kubecost.UnallocatedSuffix,
-					},
-				}},
-			}},
-			shouldMatch: []kubecost.Allocation{
-				allocGenerator(kubecost.AllocationProperties{Labels: map[string]string{"foo": "bar"}}),
-			},
-			shouldNotMatch: []kubecost.Allocation{
-				allocGenerator(kubecost.AllocationProperties{Labels: map[string]string{"app": "test"}}),
-			},
-		},
-		{
-			input: `label[app]!:"__unallocated__"`,
-			expected: kubecost.AllocationFilterAnd{[]kubecost.AllocationFilter{
-				kubecost.AllocationFilterAnd{[]kubecost.AllocationFilter{
-					kubecost.AllocationFilterCondition{
-						Field: kubecost.FilterLabel,
-						Key:   "app",
-						Op:    kubecost.FilterNotEquals,
-						Value: kubecost.UnallocatedSuffix,
-					},
-				}},
-			}},
-			shouldMatch: []kubecost.Allocation{
-				allocGenerator(kubecost.AllocationProperties{Labels: map[string]string{"app": "test"}}),
-			},
-			shouldNotMatch: []kubecost.Allocation{
-				allocGenerator(kubecost.AllocationProperties{Labels: map[string]string{"foo": "bar"}}),
-			},
-		},
-		{
-			input: `services:"__unallocated__"`,
-			expected: kubecost.AllocationFilterAnd{[]kubecost.AllocationFilter{
-				kubecost.AllocationFilterOr{[]kubecost.AllocationFilter{
-					kubecost.AllocationFilterCondition{
-						Field: kubecost.FilterServices,
-						Op:    kubecost.FilterContains,
-						Value: kubecost.UnallocatedSuffix,
-					},
-				}},
-			}},
-			shouldMatch: []kubecost.Allocation{
-				allocGenerator(kubecost.AllocationProperties{Services: []string{}}),
-			},
-			shouldNotMatch: []kubecost.Allocation{
-				allocGenerator(kubecost.AllocationProperties{Services: []string{"svc1", "svc2"}}),
-			},
-		},
-		{
-			input: `services!:"__unallocated__"`,
-			expected: kubecost.AllocationFilterAnd{[]kubecost.AllocationFilter{
-				kubecost.AllocationFilterAnd{[]kubecost.AllocationFilter{
-					kubecost.AllocationFilterCondition{
-						Field: kubecost.FilterServices,
-						Op:    kubecost.FilterNotContains,
-						Value: kubecost.UnallocatedSuffix,
-					},
-				}},
-			}},
-			shouldMatch: []kubecost.Allocation{
-				allocGenerator(kubecost.AllocationProperties{Services: []string{"svc1", "svc2"}}),
-			},
-			shouldNotMatch: []kubecost.Allocation{
-				allocGenerator(kubecost.AllocationProperties{Services: []string{}}),
-			},
-		},
-	}
-
-	for i, c := range cases {
-		t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
-			t.Logf("Query: %s", c.input)
-			result, err := ParseAllocationFilter(c.input)
-			t.Logf("Result: %s", result)
-			if err != nil {
-				t.Fatalf("Unexpected parse error: %s", err)
-			}
-			if !reflect.DeepEqual(result, c.expected) {
-				t.Fatalf("Expected:\n%s\nGot:\n%s", c.expected, result)
-			}
-
-			for _, shouldMatch := range c.shouldMatch {
-				if !result.Matches(&shouldMatch) {
-					t.Errorf("Failed to match %s", shouldMatch.Name)
-				}
-			}
-			for _, shouldNotMatch := range c.shouldNotMatch {
-				if result.Matches(&shouldNotMatch) {
-					t.Errorf("Incorrectly matched %s", shouldNotMatch.Name)
-				}
-			}
-		})
-	}
-}

+ 470 - 0
pkg/util/filterutil/asset_test.go

@@ -0,0 +1,470 @@
+package filterutil
+
+import (
+	"testing"
+
+	"github.com/opencost/opencost/pkg/costmodel/clusters"
+	"github.com/opencost/opencost/pkg/kubecost"
+	"github.com/opencost/opencost/pkg/util/mapper"
+)
+
+var assetCompiler = kubecost.NewAssetMatchCompiler()
+
+func TestAssetFiltersFromParamsV1(t *testing.T) {
+	cases := []struct {
+		name           string
+		qp             map[string]string
+		shouldMatch    []kubecost.Asset
+		shouldNotMatch []kubecost.Asset
+	}{
+		{
+			name: "empty",
+			qp:   map[string]string{},
+			shouldMatch: []kubecost.Asset{
+				&kubecost.Node{},
+				&kubecost.Any{},
+				&kubecost.Cloud{},
+				&kubecost.LoadBalancer{},
+				&kubecost.ClusterManagement{},
+				&kubecost.Disk{},
+				&kubecost.Network{},
+				&kubecost.SharedAsset{},
+			},
+			shouldNotMatch: []kubecost.Asset{},
+		},
+		{
+			name: "type: node",
+			qp: map[string]string{
+				ParamFilterTypes: "node",
+			},
+			shouldMatch: []kubecost.Asset{
+				&kubecost.Node{},
+			},
+			shouldNotMatch: []kubecost.Asset{
+				&kubecost.Any{},
+				&kubecost.Cloud{},
+				&kubecost.LoadBalancer{},
+				&kubecost.ClusterManagement{},
+				&kubecost.Disk{},
+				&kubecost.Network{},
+				&kubecost.SharedAsset{},
+			},
+		},
+		{
+			name: "type: node capitalized",
+			qp: map[string]string{
+				ParamFilterTypes: "Node",
+			},
+			shouldMatch: []kubecost.Asset{
+				&kubecost.Node{},
+			},
+			shouldNotMatch: []kubecost.Asset{
+				&kubecost.Any{},
+				&kubecost.Cloud{},
+				&kubecost.LoadBalancer{},
+				&kubecost.ClusterManagement{},
+				&kubecost.Disk{},
+				&kubecost.Network{},
+				&kubecost.SharedAsset{},
+			},
+		},
+		{
+			name: "type: disk",
+			qp: map[string]string{
+				ParamFilterTypes: "disk",
+			},
+			shouldMatch: []kubecost.Asset{
+				&kubecost.Disk{},
+			},
+			shouldNotMatch: []kubecost.Asset{
+				&kubecost.Any{},
+				&kubecost.Cloud{},
+				&kubecost.Network{},
+				&kubecost.Node{},
+				&kubecost.LoadBalancer{},
+				&kubecost.ClusterManagement{},
+				&kubecost.SharedAsset{},
+			},
+		},
+		{
+			name: "type: loadbalancer",
+			qp: map[string]string{
+				ParamFilterTypes: "loadbalancer",
+			},
+			shouldMatch: []kubecost.Asset{
+				&kubecost.LoadBalancer{},
+			},
+			shouldNotMatch: []kubecost.Asset{
+				&kubecost.Any{},
+				&kubecost.Cloud{},
+				&kubecost.Node{},
+				&kubecost.ClusterManagement{},
+				&kubecost.Disk{},
+				&kubecost.Network{},
+				&kubecost.SharedAsset{},
+			},
+		},
+		{
+			name: "type: clustermanagement",
+			qp: map[string]string{
+				ParamFilterTypes: "clustermanagement",
+			},
+			shouldMatch: []kubecost.Asset{
+				&kubecost.ClusterManagement{},
+			},
+			shouldNotMatch: []kubecost.Asset{
+				&kubecost.Any{},
+				&kubecost.Cloud{},
+				&kubecost.LoadBalancer{},
+				&kubecost.Node{},
+				&kubecost.Disk{},
+				&kubecost.Network{},
+				&kubecost.SharedAsset{},
+			},
+		},
+		{
+			name: "type: network",
+			qp: map[string]string{
+				ParamFilterTypes: "network",
+			},
+			shouldMatch: []kubecost.Asset{
+				&kubecost.Network{},
+			},
+			shouldNotMatch: []kubecost.Asset{
+				&kubecost.Any{},
+				&kubecost.Cloud{},
+				&kubecost.LoadBalancer{},
+				&kubecost.ClusterManagement{},
+				&kubecost.Node{},
+				&kubecost.Disk{},
+				&kubecost.SharedAsset{},
+			},
+		},
+		{
+			name: "account",
+			qp: map[string]string{
+				ParamFilterAccounts: "foo,bar",
+			},
+			shouldMatch: []kubecost.Asset{
+				&kubecost.Node{
+					Properties: &kubecost.AssetProperties{
+						Account: "foo",
+					},
+				},
+				&kubecost.Network{
+					Properties: &kubecost.AssetProperties{
+						Account: "bar",
+					},
+				},
+			},
+			shouldNotMatch: []kubecost.Asset{
+				&kubecost.Network{
+					Properties: &kubecost.AssetProperties{
+						Account: "baz",
+					},
+				},
+			},
+		},
+		{
+			name: "category",
+			qp: map[string]string{
+				ParamFilterCategories: "Network,Compute",
+			},
+			shouldMatch: []kubecost.Asset{
+				&kubecost.Network{
+					Properties: &kubecost.AssetProperties{
+						Category: kubecost.NetworkCategory,
+					},
+				},
+				&kubecost.Node{
+					Properties: &kubecost.AssetProperties{
+						Category: kubecost.ComputeCategory,
+					},
+				},
+			},
+			shouldNotMatch: []kubecost.Asset{
+				&kubecost.ClusterManagement{
+					Properties: &kubecost.AssetProperties{
+						Category: kubecost.ManagementCategory,
+					},
+				},
+			},
+		},
+		{
+			name: "cluster",
+			qp: map[string]string{
+				ParamFilterClusters: "cluster-one",
+			},
+			shouldMatch: []kubecost.Asset{
+				&kubecost.LoadBalancer{
+					Properties: &kubecost.AssetProperties{
+						Cluster: "cluster-one",
+					},
+				},
+				&kubecost.Node{
+					Properties: &kubecost.AssetProperties{
+						Cluster: "cluster-one",
+					},
+				},
+			},
+			shouldNotMatch: []kubecost.Asset{
+				&kubecost.ClusterManagement{
+					Properties: &kubecost.AssetProperties{
+						Cluster: "cluster-two",
+					},
+				},
+			},
+		},
+		{
+			name: "project",
+			qp: map[string]string{
+				ParamFilterProjects: "proj1,proj2",
+			},
+			shouldMatch: []kubecost.Asset{
+				&kubecost.Disk{
+					Properties: &kubecost.AssetProperties{
+						Project: "proj1",
+					},
+				},
+				&kubecost.Node{
+					Properties: &kubecost.AssetProperties{
+						Project: "proj2",
+					},
+				},
+			},
+			shouldNotMatch: []kubecost.Asset{
+				&kubecost.ClusterManagement{
+					Properties: &kubecost.AssetProperties{
+						Project: "proj3",
+					},
+				},
+			},
+		},
+		{
+			name: "provider",
+			qp: map[string]string{
+				ParamFilterProviders: "p1,p2",
+			},
+			shouldMatch: []kubecost.Asset{
+				&kubecost.Disk{
+					Properties: &kubecost.AssetProperties{
+						Provider: "p1",
+					},
+				},
+				&kubecost.Network{
+					Properties: &kubecost.AssetProperties{
+						Provider: "p2",
+					},
+				},
+			},
+			shouldNotMatch: []kubecost.Asset{
+				&kubecost.Node{
+					Properties: &kubecost.AssetProperties{
+						Provider: "p3",
+					},
+				},
+			},
+		},
+		{
+			name: "providerID v1",
+			qp: map[string]string{
+				ParamFilterProviderIDs: "p1,p2",
+			},
+			shouldMatch: []kubecost.Asset{
+				&kubecost.Disk{
+					Properties: &kubecost.AssetProperties{
+						ProviderID: "p1",
+					},
+				},
+				&kubecost.Network{
+					Properties: &kubecost.AssetProperties{
+						ProviderID: "p2",
+					},
+				},
+			},
+			shouldNotMatch: []kubecost.Asset{
+				&kubecost.Node{
+					Properties: &kubecost.AssetProperties{
+						ProviderID: "p3",
+					},
+				},
+			},
+		},
+		{
+			name: "providerID v2",
+			qp: map[string]string{
+				ParamFilterProviderIDsV2: "p1,p2",
+			},
+			shouldMatch: []kubecost.Asset{
+				&kubecost.Disk{
+					Properties: &kubecost.AssetProperties{
+						ProviderID: "p1",
+					},
+				},
+				&kubecost.Network{
+					Properties: &kubecost.AssetProperties{
+						ProviderID: "p2",
+					},
+				},
+			},
+			shouldNotMatch: []kubecost.Asset{
+				&kubecost.Node{
+					Properties: &kubecost.AssetProperties{
+						ProviderID: "p3",
+					},
+				},
+			},
+		},
+		{
+			name: "service",
+			qp: map[string]string{
+				ParamFilterServices: "p1,p2",
+			},
+			shouldMatch: []kubecost.Asset{
+				&kubecost.Disk{
+					Properties: &kubecost.AssetProperties{
+						Service: "p1",
+					},
+				},
+				&kubecost.Network{
+					Properties: &kubecost.AssetProperties{
+						Service: "p2",
+					},
+				},
+			},
+			shouldNotMatch: []kubecost.Asset{
+				&kubecost.Node{
+					Properties: &kubecost.AssetProperties{
+						Service: "p3",
+					},
+				},
+			},
+		},
+		{
+			name: "label",
+			qp: map[string]string{
+				ParamFilterLabels: "foo:bar,baz:qux",
+			},
+			shouldMatch: []kubecost.Asset{
+				&kubecost.Disk{
+					Labels: kubecost.AssetLabels{
+						"foo": "bar",
+						"baz": "other",
+					},
+				},
+				&kubecost.Node{
+					Labels: kubecost.AssetLabels{
+						"baz": "qux",
+					},
+				},
+			},
+			shouldNotMatch: []kubecost.Asset{
+				&kubecost.ClusterManagement{
+					Labels: kubecost.AssetLabels{
+						"baz": "other",
+					},
+				},
+			},
+		},
+		{
+			name: "region",
+			qp: map[string]string{
+				ParamFilterRegions: "r1,r2",
+			},
+			shouldMatch: []kubecost.Asset{
+				&kubecost.Node{
+					Labels: kubecost.AssetLabels{
+						"label_topology_kubernetes_io_region": "r1",
+					},
+				},
+				&kubecost.Node{
+					Labels: kubecost.AssetLabels{
+						"label_topology_kubernetes_io_region": "r2",
+					},
+				},
+			},
+			shouldNotMatch: []kubecost.Asset{
+				&kubecost.Node{
+					Labels: kubecost.AssetLabels{
+						"label_topology_kubernetes_io_region": "r3",
+					},
+				},
+			},
+		},
+		{
+			name: "complex",
+			qp: map[string]string{
+				ParamFilterRegions:  "r1,r2",
+				ParamFilterTypes:    "node",
+				ParamFilterAccounts: "a*",
+			},
+			shouldMatch: []kubecost.Asset{
+				&kubecost.Node{
+					Labels: kubecost.AssetLabels{
+						"label_topology_kubernetes_io_region": "r1",
+					},
+					Properties: &kubecost.AssetProperties{
+						Account: "a1",
+					},
+				},
+				&kubecost.Node{
+					Labels: kubecost.AssetLabels{
+						"label_topology_kubernetes_io_region": "r2",
+					},
+					Properties: &kubecost.AssetProperties{
+						Account: "a2",
+					},
+				},
+			},
+			shouldNotMatch: []kubecost.Asset{
+				&kubecost.Node{
+					Properties: &kubecost.AssetProperties{
+						Account: "b1",
+					},
+				},
+				&kubecost.Node{
+					Properties: &kubecost.AssetProperties{
+						Account: "3a",
+					},
+				},
+			},
+		},
+	}
+
+	for _, c := range cases {
+		t.Run(c.name, func(t *testing.T) {
+			// Convert map[string]string representation to the mapper
+			// library type
+			qpMap := mapper.NewMap()
+			for k, v := range c.qp {
+				qpMap.Set(k, v)
+			}
+			qpMapper := mapper.NewMapper(qpMap)
+
+			clustersMap := mockClusterMap{
+				m: map[string]*clusters.ClusterInfo{
+					"mapped-cluster-ID-1": {
+						ID:   "mapped-cluster-ID-ABC",
+						Name: "cluster ABC",
+					},
+				},
+			}
+
+			filterTree := AssetFilterFromParamsV1(qpMapper, clustersMap)
+			filter, err := assetCompiler.Compile(filterTree)
+			if err != nil {
+				t.Fatalf("compiling filter: %s", err)
+			}
+			for _, asset := range c.shouldMatch {
+				if !filter.Matches(asset) {
+					t.Errorf("should have matched: %s", asset.String())
+				}
+			}
+			for _, asset := range c.shouldNotMatch {
+				if filter.Matches(asset) {
+					t.Errorf("incorrectly matched: %s", asset.String())
+				}
+			}
+		})
+	}
+}

+ 743 - 0
pkg/util/filterutil/filterutil.go

@@ -0,0 +1,743 @@
+package filterutil
+
+import (
+	"strings"
+
+	"github.com/opencost/opencost/pkg/costmodel/clusters"
+	"github.com/opencost/opencost/pkg/kubecost"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/prom"
+	"github.com/opencost/opencost/pkg/util/mapper"
+	"github.com/opencost/opencost/pkg/util/typeutil"
+
+	filter "github.com/opencost/opencost/pkg/filter21"
+	afilter "github.com/opencost/opencost/pkg/filter21/allocation"
+	assetfilter "github.com/opencost/opencost/pkg/filter21/asset"
+	"github.com/opencost/opencost/pkg/filter21/ast"
+	// cloudfilter "github.com/opencost/opencost/pkg/filter/cloud"
+)
+
+// ============================================================================
+// This file contains:
+// Parsing (HTTP query params -> v2.1 filter) for V1 of query param filters
+//
+// e.g. "filterNamespaces=ku&filterControllers=deployment:kc"
+// ============================================================================
+
+// This is somewhat of a fancy solution, but allows us to "register" DefaultFieldByName funcs
+// funcs by Field type.
+var defaultFieldByType = map[string]any{
+	// typeutil.TypeOf[cloudfilter.CloudAggregationField](): cloudfilter.DefaultFieldByName,
+	typeutil.TypeOf[afilter.AllocationField](): afilter.DefaultFieldByName,
+	typeutil.TypeOf[assetfilter.AssetField]():  assetfilter.DefaultFieldByName,
+}
+
+// DefaultFieldByName looks up a specific T field instance by name and returns the default
+// ast.Field value for that type.
+func DefaultFieldByName[T ~string](field T) *ast.Field {
+	lookup, ok := defaultFieldByType[typeutil.TypeOf[T]()]
+	if !ok {
+		log.Errorf("Failed to get default field lookup for: %s", typeutil.TypeOf[T]())
+		return nil
+	}
+
+	defaultLookup, ok := lookup.(func(T) *ast.Field)
+	if !ok {
+		log.Errorf("Failed to cast default field lookup for: %s", typeutil.TypeOf[T]())
+		return nil
+	}
+
+	return defaultLookup(field)
+}
+
+const (
+	ParamFilterClusters        = "filterClusters"
+	ParamFilterNodes           = "filterNodes"
+	ParamFilterNamespaces      = "filterNamespaces"
+	ParamFilterControllerKinds = "filterControllerKinds"
+	ParamFilterControllers     = "filterControllers"
+	ParamFilterPods            = "filterPods"
+	ParamFilterContainers      = "filterContainers"
+
+	ParamFilterDepartments  = "filterDepartments"
+	ParamFilterEnvironments = "filterEnvironments"
+	ParamFilterOwners       = "filterOwners"
+	ParamFilterProducts     = "filterProducts"
+	ParamFilterTeams        = "filterTeams"
+
+	ParamFilterAnnotations = "filterAnnotations"
+	ParamFilterLabels      = "filterLabels"
+	ParamFilterServices    = "filterServices"
+
+	ParamFilterAccounts      = "filterAccounts"
+	ParamFilterCategories    = "filterCategories"
+	ParamFilterNames         = "filterNames"
+	ParamFilterProjects      = "filterProjects"
+	ParamFilterProviders     = "filterProviders"
+	ParamFilterProviderIDs   = "filterProviderIDs"
+	ParamFilterProviderIDsV2 = "filterProviderIds"
+	ParamFilterRegions       = "filterRegions"
+	ParamFilterTypes         = "filterTypes"
+)
+
+// ValidAssetFilterParams returns a list of all possible filter parameters
+func ValidAssetFilterParams() []string {
+	return []string{
+		ParamFilterAccounts,
+		ParamFilterCategories,
+		ParamFilterClusters,
+		ParamFilterLabels,
+		ParamFilterNames,
+		ParamFilterProjects,
+		ParamFilterProviders,
+		ParamFilterProviderIDs,
+		ParamFilterProviderIDsV2,
+		ParamFilterRegions,
+		ParamFilterServices,
+		ParamFilterTypes,
+	}
+}
+
+// AllocationPropToV1FilterParamKey maps allocation string property
+// representations to v1 filter param keys for legacy filter config support
+// (e.g. reports). Example mapping: "cluster" -> "filterClusters"
+var AllocationPropToV1FilterParamKey = map[string]string{
+	kubecost.AllocationClusterProp:        ParamFilterClusters,
+	kubecost.AllocationNodeProp:           ParamFilterNodes,
+	kubecost.AllocationNamespaceProp:      ParamFilterNamespaces,
+	kubecost.AllocationControllerProp:     ParamFilterControllers,
+	kubecost.AllocationControllerKindProp: ParamFilterControllerKinds,
+	kubecost.AllocationPodProp:            ParamFilterPods,
+	kubecost.AllocationLabelProp:          ParamFilterLabels,
+	kubecost.AllocationServiceProp:        ParamFilterServices,
+	kubecost.AllocationDepartmentProp:     ParamFilterDepartments,
+	kubecost.AllocationEnvironmentProp:    ParamFilterEnvironments,
+	kubecost.AllocationOwnerProp:          ParamFilterOwners,
+	kubecost.AllocationProductProp:        ParamFilterProducts,
+	kubecost.AllocationTeamProp:           ParamFilterTeams,
+}
+
+// Map to store Kubecost Asset property to Asset Filter types.
+// AssetPropToV1FilterParamKey maps asset string property representations to v1
+// filter param keys for legacy filter config support (e.g. reports). Example
+// mapping: "category" -> "filterCategories"
+var AssetPropToV1FilterParamKey = map[kubecost.AssetProperty]string{
+	kubecost.AssetNameProp:       ParamFilterNames,
+	kubecost.AssetTypeProp:       ParamFilterTypes,
+	kubecost.AssetAccountProp:    ParamFilterAccounts,
+	kubecost.AssetCategoryProp:   ParamFilterCategories,
+	kubecost.AssetClusterProp:    ParamFilterClusters,
+	kubecost.AssetProjectProp:    ParamFilterProjects,
+	kubecost.AssetProviderProp:   ParamFilterProviders,
+	kubecost.AssetProviderIDProp: ParamFilterProviderIDs,
+	kubecost.AssetServiceProp:    ParamFilterServices,
+}
+
+// AllHTTPParamKeys returns all HTTP GET parameters used for v1 filters. It is
+// intended to help validate HTTP queries in handlers to help avoid e.g.
+// spelling errors.
+func AllHTTPParamKeys() []string {
+	return []string{
+		ParamFilterClusters,
+		ParamFilterNodes,
+		ParamFilterNamespaces,
+		ParamFilterControllerKinds,
+		ParamFilterControllers,
+		ParamFilterPods,
+		ParamFilterContainers,
+
+		ParamFilterDepartments,
+		ParamFilterEnvironments,
+		ParamFilterOwners,
+		ParamFilterProducts,
+		ParamFilterTeams,
+
+		ParamFilterAnnotations,
+		ParamFilterLabels,
+		ParamFilterServices,
+	}
+}
+
+// AllocationFilterFromParamsV1 takes a set of HTTP query parameters and
+// converts them to an AllocationFilter, which is a structured in-Go
+// representation of a set of filters.
+//
+// The HTTP query parameters are the "v1" filters attached to the Allocation
+// API: "filterNamespaces=", "filterNodes=", etc.
+//
+// It takes an optional LabelConfig, which if provided enables "label-mapped"
+// filters like "filterDepartments".
+//
+// It takes an optional ClusterMap, which if provided enables cluster name
+// filtering. This turns all `filterClusters=foo` arguments into the equivalent
+// of `clusterID = "foo" OR clusterName = "foo"`.
+func AllocationFilterFromParamsV1(
+	params AllocationFilterV1,
+	labelConfig *kubecost.LabelConfig,
+	clusterMap clusters.ClusterMap,
+) filter.Filter {
+
+	var filterOps []ast.FilterNode
+
+	// ClusterMap does not provide a cluster name -> cluster ID mapping in the
+	// interface, probably because there could be multiple IDs with the same
+	// name. However, V1 filter logic demands that the parameters to
+	// filterClusters= be checked against both cluster ID AND cluster name.
+	//
+	// To support expected filterClusters= behavior, we construct a mapping
+	// of cluster name -> cluster IDs (could be multiple IDs for the same name)
+	// so that we can create AllocationFilters that use only ClusterIDEquals.
+	//
+	//
+	// AllocationFilter intentionally does not support cluster name filters
+	// because those should be considered presentation-layer only.
+	clusterNameToIDs := map[string][]string{}
+	if clusterMap != nil {
+		cMap := clusterMap.AsMap()
+		for _, info := range cMap {
+			if info == nil {
+				continue
+			}
+
+			if _, ok := clusterNameToIDs[info.Name]; ok {
+				clusterNameToIDs[info.Name] = append(clusterNameToIDs[info.Name], info.ID)
+			} else {
+				clusterNameToIDs[info.Name] = []string{info.ID}
+			}
+		}
+	}
+
+	// The proliferation of > 0 guards in the function is to avoid constructing
+	// empty filter structs. While it is functionally equivalent to add empty
+	// filter structs (they evaluate to true always) there could be overhead
+	// when calling Matches() repeatedly for no purpose.
+
+	if len(params.Clusters) > 0 {
+		var ops []ast.FilterNode
+
+		// filter my cluster identifier
+		ops = push(ops, filterV1SingleValueFromList(params.Clusters, afilter.FieldClusterID))
+
+		for _, rawFilterValue := range params.Clusters {
+			clusterNameFilter, wildcard := parseWildcardEnd(rawFilterValue)
+
+			clusterIDsToFilter := []string{}
+			for clusterName := range clusterNameToIDs {
+				if wildcard && strings.HasPrefix(clusterName, clusterNameFilter) {
+					clusterIDsToFilter = append(clusterIDsToFilter, clusterNameToIDs[clusterName]...)
+				} else if !wildcard && clusterName == clusterNameFilter {
+					clusterIDsToFilter = append(clusterIDsToFilter, clusterNameToIDs[clusterName]...)
+				}
+			}
+
+			for _, clusterID := range clusterIDsToFilter {
+				ops = append(ops, &ast.EqualOp{
+					Left: ast.Identifier{
+						Field: afilter.DefaultFieldByName(afilter.FieldClusterID),
+						Key:   "",
+					},
+					Right: clusterID,
+				})
+			}
+		}
+
+		//
+		clustersOp := opsToOr(ops)
+		filterOps = push(filterOps, clustersOp)
+	}
+
+	if len(params.Nodes) > 0 {
+		filterOps = push(filterOps, filterV1SingleValueFromList(params.Nodes, afilter.FieldNode))
+	}
+
+	if len(params.Namespaces) > 0 {
+		filterOps = push(filterOps, filterV1SingleValueFromList(params.Namespaces, afilter.FieldNamespace))
+	}
+
+	if len(params.ControllerKinds) > 0 {
+		filterOps = push(filterOps, filterV1SingleValueFromList(params.ControllerKinds, afilter.FieldControllerKind))
+	}
+
+	// filterControllers= accepts controllerkind:controllername filters, e.g.
+	// "deployment:kubecost-cost-analyzer"
+	//
+	// Thus, we have to make a custom OR filter for this condition.
+	if len(params.Controllers) > 0 {
+		var ops []ast.FilterNode
+
+		for _, rawFilterValue := range params.Controllers {
+			split := strings.Split(rawFilterValue, ":")
+			if len(split) == 1 {
+				filterValue, wildcard := parseWildcardEnd(split[0])
+
+				subFilter := toEqualOp(afilter.FieldControllerName, "", filterValue, wildcard)
+				ops = append(ops, subFilter)
+			} else if len(split) == 2 {
+				kindFilterVal := split[0]
+				nameFilterVal, wildcard := parseWildcardEnd(split[1])
+
+				kindFilter := toEqualOp(afilter.FieldControllerKind, "", kindFilterVal, false)
+				nameFilter := toEqualOp(afilter.FieldControllerName, "", nameFilterVal, wildcard)
+
+				// The controller name AND the controller kind must match
+				ops = append(ops, &ast.AndOp{
+					Operands: []ast.FilterNode{
+						kindFilter,
+						nameFilter,
+					},
+				})
+			} else {
+				log.Warnf("illegal filter for controller: %s", rawFilterValue)
+			}
+		}
+		controllersOp := opsToOr(ops)
+		filterOps = push(filterOps, controllersOp)
+	}
+
+	if len(params.Pods) > 0 {
+		filterOps = push(filterOps, filterV1SingleValueFromList(params.Pods, afilter.FieldPod))
+	}
+
+	if len(params.Containers) > 0 {
+		filterOps = push(filterOps, filterV1SingleValueFromList(params.Containers, afilter.FieldContainer))
+	}
+
+	// Label-mapped queries require a label config to be present.
+	if labelConfig != nil {
+		if len(params.Departments) > 0 {
+			filterOps = push(filterOps, filterV1LabelAliasMappedFromList(params.Departments, labelConfig.DepartmentLabel))
+		}
+		if len(params.Environments) > 0 {
+			filterOps = push(filterOps, filterV1LabelAliasMappedFromList(params.Environments, labelConfig.EnvironmentLabel))
+		}
+		if len(params.Owners) > 0 {
+			filterOps = push(filterOps, filterV1LabelAliasMappedFromList(params.Owners, labelConfig.OwnerLabel))
+		}
+		if len(params.Products) > 0 {
+			filterOps = push(filterOps, filterV1LabelAliasMappedFromList(params.Products, labelConfig.ProductLabel))
+		}
+		if len(params.Teams) > 0 {
+			filterOps = push(filterOps, filterV1LabelAliasMappedFromList(params.Teams, labelConfig.TeamLabel))
+		}
+	} else {
+		log.Debugf("No label config is available. Not creating filters for label-mapped 'fields'.")
+	}
+
+	if len(params.Annotations) > 0 {
+		filterOps = push(filterOps, filterV1DoubleValueFromList(params.Annotations, afilter.FieldAnnotation))
+	}
+
+	if len(params.Labels) > 0 {
+		filterOps = push(filterOps, filterV1DoubleValueFromList(params.Labels, afilter.FieldLabel))
+	}
+
+	if len(params.Services) > 0 {
+		var ops []ast.FilterNode
+
+		// filterServices= is the only filter that uses the "contains" operator.
+		for _, filterValue := range params.Services {
+			// TODO: wildcard support
+			filterValue, wildcard := parseWildcardEnd(filterValue)
+
+			subFilter := toContainsOp(afilter.FieldServices, "", filterValue, wildcard)
+			ops = append(ops, subFilter)
+		}
+
+		serviceOps := opsToOr(ops)
+		filterOps = push(filterOps, serviceOps)
+	}
+
+	andFilter := opsToAnd(filterOps)
+	if andFilter == nil {
+		return &ast.VoidOp{} // no filter
+	}
+
+	return andFilter
+}
+
+func AssetFilterFromParamsV1(
+	qp mapper.PrimitiveMapReader,
+	clusterMap clusters.ClusterMap,
+) filter.Filter {
+
+	var filterOps []ast.FilterNode
+
+	// ClusterMap does not provide a cluster name -> cluster ID mapping in the
+	// interface, probably because there could be multiple IDs with the same
+	// name. However, V1 filter logic demands that the parameters to
+	// filterClusters= be checked against both cluster ID AND cluster name.
+	//
+	// To support expected filterClusters= behavior, we construct a mapping
+	// of cluster name -> cluster IDs (could be multiple IDs for the same name)
+	// so that we can create AllocationFilters that use only ClusterIDEquals.
+	//
+	//
+	// AllocationFilter intentionally does not support cluster name filters
+	// because those should be considered presentation-layer only.
+	clusterNameToIDs := map[string][]string{}
+	if clusterMap != nil {
+		cMap := clusterMap.AsMap()
+		for _, info := range cMap {
+			if info == nil {
+				continue
+			}
+
+			if _, ok := clusterNameToIDs[info.Name]; ok {
+				clusterNameToIDs[info.Name] = append(clusterNameToIDs[info.Name], info.ID)
+			} else {
+				clusterNameToIDs[info.Name] = []string{info.ID}
+			}
+		}
+	}
+
+	// The proliferation of > 0 guards in the function is to avoid constructing
+	// empty filter structs. While it is functionally equivalent to add empty
+	// filter structs (they evaluate to true always) there could be overhead
+	// when calling Matches() repeatedly for no purpose.
+
+	if filterClusters := qp.GetList(ParamFilterClusters, ","); len(filterClusters) > 0 {
+		var ops []ast.FilterNode
+
+		// filter my cluster identifier
+		ops = push(ops, filterV1SingleValueFromList(filterClusters, assetfilter.FieldClusterID))
+
+		for _, rawFilterValue := range filterClusters {
+			clusterNameFilter, wildcard := parseWildcardEnd(rawFilterValue)
+
+			clusterIDsToFilter := []string{}
+			for clusterName := range clusterNameToIDs {
+				if wildcard && strings.HasPrefix(clusterName, clusterNameFilter) {
+					clusterIDsToFilter = append(clusterIDsToFilter, clusterNameToIDs[clusterName]...)
+				} else if !wildcard && clusterName == clusterNameFilter {
+					clusterIDsToFilter = append(clusterIDsToFilter, clusterNameToIDs[clusterName]...)
+				}
+			}
+
+			for _, clusterID := range clusterIDsToFilter {
+				ops = append(ops, &ast.EqualOp{
+					Left: ast.Identifier{
+						Field: assetfilter.DefaultFieldByName(assetfilter.FieldClusterID),
+						Key:   "",
+					},
+					Right: clusterID,
+				})
+			}
+		}
+
+		clustersOp := opsToOr(ops)
+		filterOps = push(filterOps, clustersOp)
+	}
+
+	if raw := qp.GetList(ParamFilterAccounts, ","); len(raw) > 0 {
+		filterOps = push(filterOps, filterV1SingleValueFromList(raw, assetfilter.FieldAccount))
+	}
+
+	if raw := qp.GetList(ParamFilterCategories, ","); len(raw) > 0 {
+		filterOps = push(filterOps, filterV1SingleValueFromList(raw, assetfilter.FieldCategory))
+	}
+
+	if raw := qp.GetList(ParamFilterNames, ","); len(raw) > 0 {
+		filterOps = push(filterOps, filterV1SingleValueFromList(raw, assetfilter.FieldName))
+	}
+
+	if raw := qp.GetList(ParamFilterProjects, ","); len(raw) > 0 {
+		filterOps = push(filterOps, filterV1SingleValueFromList(raw, assetfilter.FieldProject))
+	}
+
+	if raw := qp.GetList(ParamFilterProviders, ","); len(raw) > 0 {
+		filterOps = push(filterOps, filterV1SingleValueFromList(raw, assetfilter.FieldProvider))
+	}
+
+	if raw := GetList(ParamFilterProviderIDs, ParamFilterProviderIDsV2, qp); len(raw) > 0 {
+		filterOps = push(filterOps, filterV1SingleValueFromList(raw, assetfilter.FieldProviderID))
+	}
+
+	if raw := qp.GetList(ParamFilterServices, ","); len(raw) > 0 {
+		filterOps = push(filterOps, filterV1SingleValueFromList(raw, assetfilter.FieldService))
+	}
+
+	if raw := qp.GetList(ParamFilterTypes, ","); len(raw) > 0 {
+		// Types have a special situation where we allow users to enter them
+		// capitalized or uncapitalized
+		for i := range raw {
+			raw[i] = strings.ToLower(raw[i])
+		}
+		filterOps = push(filterOps, filterV1SingleValueFromList(raw, assetfilter.FieldType))
+	}
+
+	if raw := qp.GetList(ParamFilterLabels, ","); len(raw) > 0 {
+		filterOps = push(filterOps, filterV1DoubleValueFromList(raw, assetfilter.FieldLabel))
+	}
+
+	if raw := qp.GetList(ParamFilterRegions, ","); len(raw) > 0 {
+		filterOps = push(filterOps, filterV1SingleLabelKeyFromList(raw, "label_topology_kubernetes_io_region", assetfilter.FieldLabel))
+	}
+
+	andFilter := opsToAnd(filterOps)
+	if andFilter == nil {
+		return &ast.VoidOp{} // no filter
+	}
+
+	return andFilter
+}
+
+// filterV1SingleValueFromList creates an OR of equality filters for a given
+// filter field.
+//
+// The v1 query language (e.g. "filterNamespaces=XYZ,ABC") uses OR within
+// a field (e.g. namespace = XYZ OR namespace = ABC)
+func filterV1SingleValueFromList[T ~string](rawFilterValues []string, filterField T) ast.FilterNode {
+	var ops []ast.FilterNode
+
+	for _, filterValue := range rawFilterValues {
+		filterValue = strings.TrimSpace(filterValue)
+		filterValue, wildcard := parseWildcardEnd(filterValue)
+
+		subFilter := toEqualOp(filterField, "", filterValue, wildcard)
+		ops = append(ops, subFilter)
+	}
+
+	return opsToOr(ops)
+}
+
+func filterV1SingleLabelKeyFromList[T ~string](rawFilterValues []string, labelName string, labelField T) ast.FilterNode {
+	var ops []ast.FilterNode
+	labelName = prom.SanitizeLabelName(labelName)
+
+	for _, filterValue := range rawFilterValues {
+		filterValue = strings.TrimSpace(filterValue)
+		filterValue, wildcard := parseWildcardEnd(filterValue)
+
+		subFilter := toEqualOp(labelField, labelName, filterValue, wildcard)
+
+		ops = append(ops, subFilter)
+	}
+
+	return opsToOr(ops)
+}
+
+// filterV1LabelAliasMappedFromList is like filterV1SingleValueFromList but is
+// explicitly for labels and annotations because "label-mapped" filters (like filterTeams=)
+// are actually label filters with a fixed label key.
+func filterV1LabelAliasMappedFromList(rawFilterValues []string, labelName string) ast.FilterNode {
+	var ops []ast.FilterNode
+	labelName = prom.SanitizeLabelName(labelName)
+
+	for _, filterValue := range rawFilterValues {
+		filterValue = strings.TrimSpace(filterValue)
+		filterValue, wildcard := parseWildcardEnd(filterValue)
+
+		subFilter := toAllocationAliasOp(labelName, filterValue, wildcard)
+
+		ops = append(ops, subFilter)
+	}
+
+	return opsToOr(ops)
+}
+
+// filterV1DoubleValueFromList creates an OR of key:value equality filters for
+// colon-split filter values.
+//
+// The v1 query language (e.g. "filterLabels=app:foo,l2:bar") uses OR within
+// a field (e.g. label[app] = foo OR label[l2] = bar)
+func filterV1DoubleValueFromList[T ~string](rawFilterValuesUnsplit []string, filterField T) ast.FilterNode {
+	var ops []ast.FilterNode
+
+	for _, unsplit := range rawFilterValuesUnsplit {
+		if unsplit != "" {
+			split := strings.Split(unsplit, ":")
+			if len(split) != 2 {
+				log.Warnf("illegal key/value filter (ignoring): %s", unsplit)
+				continue
+			}
+			labelName := prom.SanitizeLabelName(strings.TrimSpace(split[0]))
+			val := strings.TrimSpace(split[1])
+			val, wildcard := parseWildcardEnd(val)
+
+			subFilter := toEqualOp(filterField, labelName, val, wildcard)
+			ops = append(ops, subFilter)
+		}
+	}
+
+	return opsToOr(ops)
+}
+
+// parseWildcardEnd checks if the given filter value is wildcarded, meaning
+// it ends in "*". If it does, it removes the suffix and returns the cleaned
+// string and true. Otherwise, it returns the same filter and false.
+//
+// parseWildcardEnd("kube*") = "kube", true
+// parseWildcardEnd("kube") = "kube", false
+func parseWildcardEnd(rawFilterValue string) (string, bool) {
+	return strings.TrimSuffix(rawFilterValue, "*"), strings.HasSuffix(rawFilterValue, "*")
+}
+
+func push(a []ast.FilterNode, item ast.FilterNode) []ast.FilterNode {
+	if item == nil {
+		return a
+	}
+
+	return append(a, item)
+}
+
+func opsToOr(ops []ast.FilterNode) ast.FilterNode {
+	if len(ops) == 0 {
+		return nil
+	}
+
+	if len(ops) == 1 {
+		return ops[0]
+	}
+
+	return &ast.OrOp{
+		Operands: ops,
+	}
+}
+
+func opsToAnd(ops []ast.FilterNode) ast.FilterNode {
+	if len(ops) == 0 {
+		return nil
+	}
+
+	if len(ops) == 1 {
+		return ops[0]
+	}
+
+	return &ast.AndOp{
+		Operands: ops,
+	}
+}
+
+func toEqualOp[T ~string](field T, key string, value string, wildcard bool) ast.FilterNode {
+	left := ast.Identifier{
+		Field: DefaultFieldByName(field),
+		Key:   key,
+	}
+	right := value
+
+	if wildcard {
+		return &ast.ContainsPrefixOp{
+			Left:  left,
+			Right: right,
+		}
+	}
+
+	return &ast.EqualOp{
+		Left:  left,
+		Right: right,
+	}
+}
+
+func toContainsOp[T ~string](field T, key string, value string, wildcard bool) ast.FilterNode {
+	left := ast.Identifier{
+		Field: DefaultFieldByName(field),
+		Key:   key,
+	}
+	right := value
+
+	if wildcard {
+		return &ast.ContainsPrefixOp{
+			Left:  left,
+			Right: right,
+		}
+	}
+
+	return &ast.ContainsOp{
+		Left:  left,
+		Right: right,
+	}
+}
+
+func toAllocationAliasOp(labelName string, filterValue string, wildcard bool) *ast.OrOp {
+	// labels.Contains(labelName)
+	labelContainsKey := &ast.ContainsOp{
+		Left: ast.Identifier{
+			Field: afilter.DefaultFieldByName(afilter.FieldLabel),
+			Key:   "",
+		},
+		Right: labelName,
+	}
+
+	// annotations.Contains(labelName)
+	annotationContainsKey := &ast.ContainsOp{
+		Left: ast.Identifier{
+			Field: afilter.DefaultFieldByName(afilter.FieldAnnotation),
+			Key:   "",
+		},
+		Right: labelName,
+	}
+
+	// labels[labelName] equals/startswith filterValue
+	var labelSubFilter ast.FilterNode
+	if wildcard {
+		labelSubFilter = &ast.ContainsPrefixOp{
+			Left: ast.Identifier{
+				Field: afilter.DefaultFieldByName(afilter.FieldLabel),
+				Key:   labelName,
+			},
+			Right: filterValue,
+		}
+	} else {
+		labelSubFilter = &ast.EqualOp{
+			Left: ast.Identifier{
+				Field: afilter.DefaultFieldByName(afilter.FieldLabel),
+				Key:   labelName,
+			},
+			Right: filterValue,
+		}
+	}
+
+	// annotations[labelName] equals/startswith filterValue
+	var annotationSubFilter ast.FilterNode
+	if wildcard {
+		annotationSubFilter = &ast.ContainsPrefixOp{
+			Left: ast.Identifier{
+				Field: afilter.DefaultFieldByName(afilter.FieldAnnotation),
+				Key:   labelName,
+			},
+			Right: filterValue,
+		}
+	} else {
+		annotationSubFilter = &ast.EqualOp{
+			Left: ast.Identifier{
+				Field: afilter.DefaultFieldByName(afilter.FieldAnnotation),
+				Key:   labelName,
+			},
+			Right: filterValue,
+		}
+	}
+
+	// Logically, this is equivalent to:
+	// (labels.Contains(labelName) && labels[labelName] = filterValue) ||
+	// (!labels.Contains(labelName) && annotations.Contains(labelName) && annotations[labelName] = filterValue)
+
+	return &ast.OrOp{
+		Operands: []ast.FilterNode{
+			&ast.AndOp{
+				Operands: []ast.FilterNode{
+					labelContainsKey,
+					labelSubFilter,
+				},
+			},
+			&ast.AndOp{
+				Operands: []ast.FilterNode{
+					&ast.NotOp{
+						Operand: ast.Clone(labelContainsKey),
+					},
+					annotationContainsKey,
+					annotationSubFilter,
+				},
+			},
+		},
+	}
+}
+
+// GetList provides a list of values from the first key if they exist, otherwise, it returns
+// the values from the second key.
+func GetList(primaryKey, secondaryKey string, qp mapper.PrimitiveMapReader) []string {
+	if raw := qp.GetList(primaryKey, ","); len(raw) > 0 {
+		return raw
+	}
+
+	return qp.GetList(secondaryKey, ",")
+}

+ 33 - 212
pkg/util/allocationfilterutil/queryfilters_test.go → pkg/util/filterutil/queryfilters_test.go

@@ -1,4 +1,4 @@
-package allocationfilterutil
+package filterutil
 
 import (
 	"testing"
@@ -8,6 +8,8 @@ import (
 	"github.com/opencost/opencost/pkg/util/mapper"
 )
 
+var allocCompiler = kubecost.NewAllocationMatchCompiler(nil)
+
 type mockClusterMap struct {
 	m map[string]*clusters.ClusterInfo
 }
@@ -45,7 +47,7 @@ func allocGenerator(props kubecost.AllocationProperties) kubecost.Allocation {
 	return a
 }
 
-func TestFiltersFromParamsV1(t *testing.T) {
+func TestAllocationFiltersFromParamsV1(t *testing.T) {
 	// TODO: __unallocated__ case?
 	cases := []struct {
 		name           string
@@ -384,6 +386,30 @@ func TestFiltersFromParamsV1(t *testing.T) {
 				}),
 			},
 		},
+		{
+			name: "single department, no label, annotation",
+			qp: map[string]string{
+				"filterDepartments": "pa-1",
+			},
+			shouldMatch: []kubecost.Allocation{
+				allocGenerator(kubecost.AllocationProperties{
+					Annotations: map[string]string{
+						"internal_product_umbrella": "pa-1",
+					},
+				}),
+			},
+			// should find labels first and fail
+			shouldNotMatch: []kubecost.Allocation{
+				allocGenerator(kubecost.AllocationProperties{
+					Labels: map[string]string{
+						"internal_product_umbrella": "ps-N",
+					},
+					Annotations: map[string]string{
+						"internal_product_umbrella": "pa-1",
+					},
+				}),
+			},
+		},
 		{
 			name: "wildcard department",
 			qp: map[string]string{
@@ -705,7 +731,11 @@ func TestFiltersFromParamsV1(t *testing.T) {
 				},
 			}
 
-			filter := AllocationFilterFromParamsV1(qpMapper, &labelConfig, clustersMap)
+			filterTree := AllocationFilterFromParamsV1(ConvertFilterQueryParams(qpMapper, &labelConfig), &labelConfig, clustersMap)
+			filter, err := allocCompiler.Compile(filterTree)
+			if err != nil {
+				t.Fatalf("compiling filter: %s", err)
+			}
 			for _, alloc := range c.shouldMatch {
 				if !filter.Matches(&alloc) {
 					t.Errorf("should have matched: %s", alloc.Name)
@@ -719,212 +749,3 @@ func TestFiltersFromParamsV1(t *testing.T) {
 		})
 	}
 }
-
-type FilterV1EqualsTestcase struct {
-	name     string
-	this     FilterV1
-	that     FilterV1
-	expected bool
-}
-
-func TestFilterV1_Equals(t *testing.T) {
-	testCases := []FilterV1EqualsTestcase{
-		{
-			name: "both filters nil",
-			this: FilterV1{
-				Annotations:     nil,
-				Containers:      nil,
-				Controllers:     nil,
-				ControllerKinds: nil,
-				Clusters:        nil,
-				Departments:     nil,
-				Environments:    nil,
-				Labels:          nil,
-				Namespaces:      nil,
-				Nodes:           nil,
-				Owners:          nil,
-				Pods:            nil,
-				Products:        nil,
-				Services:        nil,
-				Teams:           nil,
-			},
-			that: FilterV1{
-				Annotations:     nil,
-				Containers:      nil,
-				Controllers:     nil,
-				ControllerKinds: nil,
-				Clusters:        nil,
-				Departments:     nil,
-				Environments:    nil,
-				Labels:          nil,
-				Namespaces:      nil,
-				Nodes:           nil,
-				Owners:          nil,
-				Pods:            nil,
-				Products:        nil,
-				Services:        nil,
-				Teams:           nil,
-			},
-			expected: true,
-		},
-		{
-			name: "both filters not nil and matching",
-			this: FilterV1{
-				Annotations:     []string{"a1", "b1"},
-				Containers:      []string{"a1", "b1"},
-				Controllers:     []string{"a1", "b1"},
-				ControllerKinds: []string{"a1", "b1"},
-				Clusters:        []string{"a1", "b1"},
-				Departments:     []string{"a1", "b1"},
-				Environments:    []string{"a1", "b1"},
-				Labels:          []string{"a1", "b1"},
-				Namespaces:      []string{"a1", "b1"},
-				Nodes:           []string{"a1", "b1"},
-				Owners:          []string{"a1", "b1"},
-				Pods:            []string{"a1", "b1"},
-				Products:        []string{"a1", "b1"},
-				Services:        []string{"a1", "b1"},
-				Teams:           []string{"a1", "b1"},
-			},
-			that: FilterV1{
-				Annotations:     []string{"a1", "b1"},
-				Containers:      []string{"a1", "b1"},
-				Controllers:     []string{"a1", "b1"},
-				ControllerKinds: []string{"a1", "b1"},
-				Clusters:        []string{"a1", "b1"},
-				Departments:     []string{"a1", "b1"},
-				Environments:    []string{"a1", "b1"},
-				Labels:          []string{"a1", "b1"},
-				Namespaces:      []string{"a1", "b1"},
-				Nodes:           []string{"a1", "b1"},
-				Owners:          []string{"a1", "b1"},
-				Pods:            []string{"a1", "b1"},
-				Products:        []string{"a1", "b1"},
-				Services:        []string{"a1", "b1"},
-				Teams:           []string{"a1", "b1"},
-			},
-			expected: true,
-		},
-		{
-			name: "both filters diff count",
-			this: FilterV1{
-				Annotations:     []string{"a1", "b1", "c1"},
-				Containers:      []string{"a1", "b1"},
-				Controllers:     []string{"a1", "b1"},
-				ControllerKinds: []string{"a1", "b1"},
-				Clusters:        []string{"a1", "b1"},
-				Departments:     []string{"a1", "b1"},
-				Environments:    []string{"a1", "b1"},
-				Labels:          []string{"a1", "b1"},
-				Namespaces:      []string{"a1", "b1"},
-				Nodes:           []string{"a1", "b1"},
-				Owners:          []string{"a1", "b1"},
-				Pods:            []string{"a1", "b1"},
-				Products:        []string{"a1", "b1"},
-				Services:        []string{"a1", "b1"},
-				Teams:           []string{"a1", "b1"},
-			},
-			that: FilterV1{
-				Annotations:     []string{"a1", "b1"},
-				Containers:      []string{"a1", "b1"},
-				Controllers:     []string{"a1", "b1"},
-				ControllerKinds: []string{"a1", "b1"},
-				Clusters:        []string{"a1", "b1"},
-				Departments:     []string{"a1", "b1"},
-				Environments:    []string{"a1", "b1"},
-				Labels:          []string{"a1", "b1"},
-				Namespaces:      []string{"a1", "b1"},
-				Nodes:           []string{"a1", "b1"},
-				Owners:          []string{"a1", "b1"},
-				Pods:            []string{"a1", "b1"},
-				Products:        []string{"a1", "b1"},
-				Services:        []string{"a1", "b1"},
-				Teams:           []string{"a1", "b1"},
-			},
-			expected: false,
-		},
-		{
-			name: "slight mismatch",
-			this: FilterV1{
-				Annotations:     []string{"x1", "b1"},
-				Containers:      []string{"a1", "b1"},
-				Controllers:     []string{"a1", "b1"},
-				ControllerKinds: []string{"a1", "b1"},
-				Clusters:        []string{"a1", "b1"},
-				Departments:     []string{"a1", "b1"},
-				Environments:    []string{"a1", "b1"},
-				Labels:          []string{"a1", "b1"},
-				Namespaces:      []string{"a1", "b1"},
-				Nodes:           []string{"a1", "b1"},
-				Owners:          []string{"a1", "b1"},
-				Pods:            []string{"a1", "b1"},
-				Products:        []string{"a1", "b1"},
-				Services:        []string{"a1", "b1"},
-				Teams:           []string{"a1", "b1"},
-			},
-			that: FilterV1{
-				Annotations:     []string{"a1", "b1"},
-				Containers:      []string{"a1", "b1"},
-				Controllers:     []string{"a1", "b1"},
-				ControllerKinds: []string{"a1", "b1"},
-				Clusters:        []string{"a1", "b1"},
-				Departments:     []string{"a1", "b1"},
-				Environments:    []string{"a1", "b1"},
-				Labels:          []string{"a1", "b1"},
-				Namespaces:      []string{"a1", "b1"},
-				Nodes:           []string{"a1", "b1"},
-				Owners:          []string{"a1", "b1"},
-				Pods:            []string{"a1", "b1"},
-				Products:        []string{"a1", "b1"},
-				Services:        []string{"a1", "b1"},
-				Teams:           []string{"a1", "b1"},
-			},
-			expected: false,
-		},
-		{
-			name: "one nil",
-			this: FilterV1{
-				Annotations:     []string{"x1", "b1"},
-				Containers:      []string{"a1", "b1"},
-				Controllers:     []string{"a1", "b1"},
-				ControllerKinds: []string{"a1", "b1"},
-				Clusters:        []string{"a1", "b1"},
-				Departments:     []string{"a1", "b1"},
-				Environments:    []string{"a1", "b1"},
-				Labels:          []string{"a1", "b1"},
-				Namespaces:      []string{"a1", "b1"},
-				Nodes:           []string{"a1", "b1"},
-				Owners:          []string{"a1", "b1"},
-				Pods:            []string{"a1", "b1"},
-				Products:        []string{"a1", "b1"},
-				Services:        []string{"a1", "b1"},
-				Teams:           []string{"a1", "b1"},
-			},
-			that: FilterV1{
-				Annotations:     nil,
-				Containers:      nil,
-				Controllers:     nil,
-				ControllerKinds: nil,
-				Clusters:        nil,
-				Departments:     nil,
-				Environments:    nil,
-				Labels:          nil,
-				Namespaces:      nil,
-				Nodes:           nil,
-				Owners:          nil,
-				Pods:            nil,
-				Products:        nil,
-				Services:        nil,
-				Teams:           nil,
-			},
-			expected: false,
-		},
-	}
-
-	for _, tc := range testCases {
-		got := tc.this.Equals(tc.that)
-		if got != tc.expected {
-			t.Fatalf("expected %t, got: %t for test case: %s", tc.expected, got, tc.name)
-		}
-	}
-}

+ 53 - 0
pkg/util/filterutil/testhelpers.go

@@ -0,0 +1,53 @@
+package filterutil
+
+import (
+	"sort"
+	"strings"
+
+	"github.com/opencost/opencost/pkg/filter21/ast"
+)
+
+func testingOnlyLess(left, right ast.FilterNode) bool {
+	leftStr := ast.ToPreOrderShortString(left)
+	rightStr := ast.ToPreOrderShortString(right)
+	return strings.Compare(leftStr, rightStr) < 0
+}
+
+func testingOnlySortedOperands(operands []ast.FilterNode) []ast.FilterNode {
+	var copy []ast.FilterNode
+	for _, operand := range operands {
+		copy = append(copy, operand)
+	}
+	sort.SliceStable(copy, func(i, j int) bool {
+		leftSorted := TestingOnlySortNode(copy[i])
+		rightSorted := TestingOnlySortNode(copy[j])
+
+		return testingOnlyLess(leftSorted, rightSorted)
+	})
+	return copy
+}
+
+// TestingOnlySortNode sorts the provided node deterministically, intended only
+// for use in unit tests to ensure that filter parsing steps produce logically-
+// equivalent filters. This is useful only for cases where filters are
+// constructed nondeterministically, like via a map iteration.
+func TestingOnlySortNode(n ast.FilterNode) ast.FilterNode {
+	switch concrete := n.(type) {
+	case *ast.AndOp:
+		return &ast.AndOp{
+			Operands: testingOnlySortedOperands(concrete.Operands),
+		}
+	case *ast.OrOp:
+		return &ast.OrOp{
+			Operands: testingOnlySortedOperands(concrete.Operands),
+		}
+	case *ast.NotOp:
+		return &ast.NotOp{
+			Operand: TestingOnlySortNode(concrete.Operand),
+		}
+	// This isn't great, but non-container ops are mostly safe. We don't need
+	// full deepcopy because this is for testing-only comparison
+	default:
+		return concrete
+	}
+}

+ 72 - 0
pkg/util/filterutil/v1.go

@@ -0,0 +1,72 @@
+package filterutil
+
+import (
+	"reflect"
+
+	"github.com/opencost/opencost/pkg/kubecost"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util/mapper"
+)
+
+func ConvertFilterQueryParams(qp mapper.PrimitiveMapReader, labelConfig *kubecost.LabelConfig) AllocationFilterV1 {
+	filter := AllocationFilterV1{
+		Annotations:     qp.GetList(ParamFilterAnnotations, ","),
+		Containers:      qp.GetList(ParamFilterContainers, ","),
+		Controllers:     qp.GetList(ParamFilterControllers, ","),
+		ControllerKinds: qp.GetList(ParamFilterControllerKinds, ","),
+		Clusters:        qp.GetList(ParamFilterClusters, ","),
+		Labels:          qp.GetList(ParamFilterLabels, ","),
+		Namespaces:      qp.GetList(ParamFilterNamespaces, ","),
+		Nodes:           qp.GetList(ParamFilterNodes, ","),
+		Pods:            qp.GetList(ParamFilterPods, ","),
+		Services:        qp.GetList(ParamFilterServices, ","),
+	}
+
+	if labelConfig != nil {
+		filter.Departments = qp.GetList(ParamFilterDepartments, ",")
+		filter.Environments = qp.GetList(ParamFilterEnvironments, ",")
+		filter.Owners = qp.GetList(ParamFilterOwners, ",")
+		filter.Products = qp.GetList(ParamFilterProducts, ",")
+		filter.Teams = qp.GetList(ParamFilterTeams, ",")
+	} else {
+		log.Debugf("No label config is available. Not creating filters for label-mapped 'fields'.")
+	}
+
+	return filter
+}
+
+type AllocationFilterV1 struct {
+	Annotations     []string `json:"annotations,omitempty"`
+	Containers      []string `json:"containers,omitempty"`
+	Controllers     []string `json:"controllers,omitempty"`
+	ControllerKinds []string `json:"controllerKinds,omitempty"`
+	Clusters        []string `json:"clusters,omitempty"`
+	Departments     []string `json:"departments,omitempty"`
+	Environments    []string `json:"environments,omitempty"`
+	Labels          []string `json:"labels,omitempty"`
+	Namespaces      []string `json:"namespaces,omitempty"`
+	Nodes           []string `json:"nodes,omitempty"`
+	Owners          []string `json:"owners,omitempty"`
+	Pods            []string `json:"pods,omitempty"`
+	Products        []string `json:"products,omitempty"`
+	Services        []string `json:"services,omitempty"`
+	Teams           []string `json:"teams,omitempty"`
+}
+
+func (f AllocationFilterV1) Equals(that AllocationFilterV1) bool {
+	return reflect.DeepEqual(f.Annotations, that.Annotations) &&
+		reflect.DeepEqual(f.Containers, that.Containers) &&
+		reflect.DeepEqual(f.Controllers, that.Controllers) &&
+		reflect.DeepEqual(f.ControllerKinds, that.ControllerKinds) &&
+		reflect.DeepEqual(f.Clusters, that.Clusters) &&
+		reflect.DeepEqual(f.Departments, that.Departments) &&
+		reflect.DeepEqual(f.Environments, that.Environments) &&
+		reflect.DeepEqual(f.Labels, that.Labels) &&
+		reflect.DeepEqual(f.Namespaces, that.Namespaces) &&
+		reflect.DeepEqual(f.Nodes, that.Nodes) &&
+		reflect.DeepEqual(f.Owners, that.Owners) &&
+		reflect.DeepEqual(f.Pods, that.Pods) &&
+		reflect.DeepEqual(f.Products, that.Products) &&
+		reflect.DeepEqual(f.Services, that.Services) &&
+		reflect.DeepEqual(f.Teams, that.Teams)
+}

+ 214 - 0
pkg/util/filterutil/v1_test.go

@@ -0,0 +1,214 @@
+package filterutil
+
+import (
+	"testing"
+)
+
+type FilterV1EqualsTestcase struct {
+	name     string
+	this     AllocationFilterV1
+	that     AllocationFilterV1
+	expected bool
+}
+
+func TestFilterV1_Equals(t *testing.T) {
+	testCases := []FilterV1EqualsTestcase{
+		{
+			name: "both filters nil",
+			this: AllocationFilterV1{
+				Annotations:     nil,
+				Containers:      nil,
+				Controllers:     nil,
+				ControllerKinds: nil,
+				Clusters:        nil,
+				Departments:     nil,
+				Environments:    nil,
+				Labels:          nil,
+				Namespaces:      nil,
+				Nodes:           nil,
+				Owners:          nil,
+				Pods:            nil,
+				Products:        nil,
+				Services:        nil,
+				Teams:           nil,
+			},
+			that: AllocationFilterV1{
+				Annotations:     nil,
+				Containers:      nil,
+				Controllers:     nil,
+				ControllerKinds: nil,
+				Clusters:        nil,
+				Departments:     nil,
+				Environments:    nil,
+				Labels:          nil,
+				Namespaces:      nil,
+				Nodes:           nil,
+				Owners:          nil,
+				Pods:            nil,
+				Products:        nil,
+				Services:        nil,
+				Teams:           nil,
+			},
+			expected: true,
+		},
+		{
+			name: "both filters not nil and matching",
+			this: AllocationFilterV1{
+				Annotations:     []string{"a1", "b1"},
+				Containers:      []string{"a1", "b1"},
+				Controllers:     []string{"a1", "b1"},
+				ControllerKinds: []string{"a1", "b1"},
+				Clusters:        []string{"a1", "b1"},
+				Departments:     []string{"a1", "b1"},
+				Environments:    []string{"a1", "b1"},
+				Labels:          []string{"a1", "b1"},
+				Namespaces:      []string{"a1", "b1"},
+				Nodes:           []string{"a1", "b1"},
+				Owners:          []string{"a1", "b1"},
+				Pods:            []string{"a1", "b1"},
+				Products:        []string{"a1", "b1"},
+				Services:        []string{"a1", "b1"},
+				Teams:           []string{"a1", "b1"},
+			},
+			that: AllocationFilterV1{
+				Annotations:     []string{"a1", "b1"},
+				Containers:      []string{"a1", "b1"},
+				Controllers:     []string{"a1", "b1"},
+				ControllerKinds: []string{"a1", "b1"},
+				Clusters:        []string{"a1", "b1"},
+				Departments:     []string{"a1", "b1"},
+				Environments:    []string{"a1", "b1"},
+				Labels:          []string{"a1", "b1"},
+				Namespaces:      []string{"a1", "b1"},
+				Nodes:           []string{"a1", "b1"},
+				Owners:          []string{"a1", "b1"},
+				Pods:            []string{"a1", "b1"},
+				Products:        []string{"a1", "b1"},
+				Services:        []string{"a1", "b1"},
+				Teams:           []string{"a1", "b1"},
+			},
+			expected: true,
+		},
+		{
+			name: "both filters diff count",
+			this: AllocationFilterV1{
+				Annotations:     []string{"a1", "b1", "c1"},
+				Containers:      []string{"a1", "b1"},
+				Controllers:     []string{"a1", "b1"},
+				ControllerKinds: []string{"a1", "b1"},
+				Clusters:        []string{"a1", "b1"},
+				Departments:     []string{"a1", "b1"},
+				Environments:    []string{"a1", "b1"},
+				Labels:          []string{"a1", "b1"},
+				Namespaces:      []string{"a1", "b1"},
+				Nodes:           []string{"a1", "b1"},
+				Owners:          []string{"a1", "b1"},
+				Pods:            []string{"a1", "b1"},
+				Products:        []string{"a1", "b1"},
+				Services:        []string{"a1", "b1"},
+				Teams:           []string{"a1", "b1"},
+			},
+			that: AllocationFilterV1{
+				Annotations:     []string{"a1", "b1"},
+				Containers:      []string{"a1", "b1"},
+				Controllers:     []string{"a1", "b1"},
+				ControllerKinds: []string{"a1", "b1"},
+				Clusters:        []string{"a1", "b1"},
+				Departments:     []string{"a1", "b1"},
+				Environments:    []string{"a1", "b1"},
+				Labels:          []string{"a1", "b1"},
+				Namespaces:      []string{"a1", "b1"},
+				Nodes:           []string{"a1", "b1"},
+				Owners:          []string{"a1", "b1"},
+				Pods:            []string{"a1", "b1"},
+				Products:        []string{"a1", "b1"},
+				Services:        []string{"a1", "b1"},
+				Teams:           []string{"a1", "b1"},
+			},
+			expected: false,
+		},
+		{
+			name: "slight mismatch",
+			this: AllocationFilterV1{
+				Annotations:     []string{"x1", "b1"},
+				Containers:      []string{"a1", "b1"},
+				Controllers:     []string{"a1", "b1"},
+				ControllerKinds: []string{"a1", "b1"},
+				Clusters:        []string{"a1", "b1"},
+				Departments:     []string{"a1", "b1"},
+				Environments:    []string{"a1", "b1"},
+				Labels:          []string{"a1", "b1"},
+				Namespaces:      []string{"a1", "b1"},
+				Nodes:           []string{"a1", "b1"},
+				Owners:          []string{"a1", "b1"},
+				Pods:            []string{"a1", "b1"},
+				Products:        []string{"a1", "b1"},
+				Services:        []string{"a1", "b1"},
+				Teams:           []string{"a1", "b1"},
+			},
+			that: AllocationFilterV1{
+				Annotations:     []string{"a1", "b1"},
+				Containers:      []string{"a1", "b1"},
+				Controllers:     []string{"a1", "b1"},
+				ControllerKinds: []string{"a1", "b1"},
+				Clusters:        []string{"a1", "b1"},
+				Departments:     []string{"a1", "b1"},
+				Environments:    []string{"a1", "b1"},
+				Labels:          []string{"a1", "b1"},
+				Namespaces:      []string{"a1", "b1"},
+				Nodes:           []string{"a1", "b1"},
+				Owners:          []string{"a1", "b1"},
+				Pods:            []string{"a1", "b1"},
+				Products:        []string{"a1", "b1"},
+				Services:        []string{"a1", "b1"},
+				Teams:           []string{"a1", "b1"},
+			},
+			expected: false,
+		},
+		{
+			name: "one nil",
+			this: AllocationFilterV1{
+				Annotations:     []string{"x1", "b1"},
+				Containers:      []string{"a1", "b1"},
+				Controllers:     []string{"a1", "b1"},
+				ControllerKinds: []string{"a1", "b1"},
+				Clusters:        []string{"a1", "b1"},
+				Departments:     []string{"a1", "b1"},
+				Environments:    []string{"a1", "b1"},
+				Labels:          []string{"a1", "b1"},
+				Namespaces:      []string{"a1", "b1"},
+				Nodes:           []string{"a1", "b1"},
+				Owners:          []string{"a1", "b1"},
+				Pods:            []string{"a1", "b1"},
+				Products:        []string{"a1", "b1"},
+				Services:        []string{"a1", "b1"},
+				Teams:           []string{"a1", "b1"},
+			},
+			that: AllocationFilterV1{
+				Annotations:     nil,
+				Containers:      nil,
+				Controllers:     nil,
+				ControllerKinds: nil,
+				Clusters:        nil,
+				Departments:     nil,
+				Environments:    nil,
+				Labels:          nil,
+				Namespaces:      nil,
+				Nodes:           nil,
+				Owners:          nil,
+				Pods:            nil,
+				Products:        nil,
+				Services:        nil,
+				Teams:           nil,
+			},
+			expected: false,
+		},
+	}
+
+	for _, tc := range testCases {
+		got := tc.this.Equals(tc.that)
+		if got != tc.expected {
+			t.Fatalf("expected %t, got: %t for test case: %s", tc.expected, got, tc.name)
+		}
+	}
+}

+ 30 - 0
pkg/util/typeutil/typeutil.go

@@ -0,0 +1,30 @@
+package typeutil
+
+import (
+	"fmt"
+	"reflect"
+)
+
+// TypeOf is a utility that can covert a T type to a package + type name for generic types.
+func TypeOf[T any]() string {
+	var inst T
+	var prefix string
+
+	// get a reflect.Type of a variable with type T
+	t := reflect.TypeOf(inst)
+
+	// pointer types do not carry the adequate type information, so we need to extract the
+	// underlying types until we reach the non-pointer type, we prepend a * each depth
+	for t != nil && t.Kind() == reflect.Pointer {
+		prefix += "*"
+		t = t.Elem()
+	}
+
+	// this should not be possible, but in the event that it does, we want to be loud about it
+	if t == nil {
+		panic(fmt.Sprintf("Unable to generate a key for type: %+v", reflect.TypeOf(inst)))
+	}
+
+	// combine the prefix, package path, and the type name
+	return fmt.Sprintf("%s%s/%s", prefix, t.PkgPath(), t.Name())
+}

+ 20 - 18
spec/opencost-specv01.md

@@ -1,16 +1,16 @@
 # OpenCost Specification
 
 
-The OpenCost Spec is a vendor-neutral specification for measuring and allocating infrastructure and container costs in Kubernetes environments. 
+The OpenCost Spec is a vendor-neutral specification for measuring and allocating infrastructure and container costs in Kubernetes environments.
 
 
 ## Introduction
 
 
-Kubernetes enables complex deployments of containerized workloads, which are often transient and consume variable amounts of cluster resources. While this enables teams to construct powerful solutions to a broad range of technical problems, it also creates complexities when measuring the resource utilization and costs of workloads and their associated infrastructure within the  dynamics of shared Kubernetes environments. 
+Kubernetes enables complex deployments of containerized workloads, which are often transient and consume variable amounts of cluster resources. While this enables teams to construct powerful solutions to a broad range of technical problems, it also creates complexities when measuring the resource utilization and costs of workloads and their associated infrastructure within the  dynamics of shared Kubernetes environments.
 
 
-As Kubernetes adoption increases within an organization, these complexities become a business-critical challenge to solve. In this document, we specify a vendor-agnostic methodology for accurately measuring and allocating the costs of a Kubernetes cluster to its hosted tenants. This community resource is maintained by Kubernetes practitioners and we welcome all contributions. 
+As Kubernetes adoption increases within an organization, these complexities become a business-critical challenge to solve. In this document, we specify a vendor-agnostic methodology for accurately measuring and allocating the costs of a Kubernetes cluster to its hosted tenants. This community resource is maintained by Kubernetes practitioners and we welcome all contributions.
 
 
 ## Foundational definitions
@@ -46,18 +46,21 @@ Cluster Asset Costs can be further segmented into **Resource Allocation Costs**
    <td><strong>Resource Allocation Costs</strong>
 <p>
 (for all assets)
+</p>
    </td>
    <td><strong>+</strong>
    </td>
    <td><strong>Resource Usage Costs</strong>
 <p>
 (for all assets)
+</p>
    </td>
    <td><strong>+</strong>
    </td>
    <td><strong>Cluster Overhead Costs</strong>
 <p>
 (for cluster)
+</p>
    </td>
   </tr>
 </table>
@@ -65,11 +68,11 @@ Cluster Asset Costs can be further segmented into **Resource Allocation Costs**
 
 The following chart shows these relationships:
 
-<img width="796" alt="image4" src="https://user-images.githubusercontent.com/453512/171577990-8f7c9a53-f5b1-4fbc-b2f6-75cd6ea67960.png">
+<img width="796" alt="image4" src="https://user-images.githubusercontent.com/453512/171577990-8f7c9a53-f5b1-4fbc-b2f6-75cd6ea67960.png"/>
 
 While billing models can differ by environment, below are common examples of segmentation by Allocation, Usage and Overhead Costs.
 
-<img width="292" alt="image1" src="https://user-images.githubusercontent.com/453512/171578190-d84dc3a7-1d20-4575-9bcc-2a5722de5eea.png">
+<img width="292" alt="image1" src="https://user-images.githubusercontent.com/453512/171578190-d84dc3a7-1d20-4575-9bcc-2a5722de5eea.png"/>
 
 
 Once calculated, these Asset Costs can then be distributed to the tenants that consume them, where Workload Costs plus Idle Costs equals Asset Costs. **Workload costs** are expenses that can be directly attributed to a set of Kubernetes workloads, e.g. a container, pod, deployment, etc. **Cluster Idle Costs** are the portion of Resource Allocation Costs that are not allocated to any workload[^1].
@@ -101,7 +104,7 @@ The following chart shows these relationships:
 
 ## Cluster Asset Costs
 
-Cluster Assets are observable entities within a Kubernetes cluster that directly incur costs related to their resources. Asset Costs consist of Resource Allocation Costs and Resource Usage Costs. Every Asset conforming to this specification MUST include at least one cost component with Amount, Unit and Rate attributes as well as a TotalCost value. 
+Cluster Assets are observable entities within a Kubernetes cluster that directly incur costs related to their resources. Asset Costs consist of Resource Allocation Costs and Resource Usage Costs. Every Asset conforming to this specification MUST include at least one cost component with Amount, Unit and Rate attributes as well as a TotalCost value.
 
 Attributes for measured Resource Allocation Costs:
 
@@ -109,7 +112,7 @@ Attributes for measured Resource Allocation Costs:
 
 * [float] Amount - the amount of resource reserved by the asset, e.g. 2 CPU cores
 * [float] Duration - time between the start and end of the allocation period measured in hours, e.g. 24 hours
-* [string] Unit - the amount’s unit of measurement, e.g. CPU cores 
+* [string] Unit - the amount’s unit of measurement, e.g. CPU cores
 * [float] HourlyRate - cost per one unit hour, e.g. $0.2 per CPU hourly rate
 * [float] Total Cost - defined as Amount * Duration * HourlyRate
 
@@ -189,7 +192,7 @@ Workloads are defined as entities to which Asset Costs are committed. Some resou
   <tr>
    <td>Storage Volume
    </td>
-   <td>The storage capacity of Persistent Volume Claim (PVC) requests measured in bytes or gigabytes. Attached at the Kubernetes pod-level. 
+   <td>The storage capacity of Persistent Volume Claim (PVC) requests measured in bytes or gigabytes. Attached at the Kubernetes pod-level.
    </td>
   </tr>
   <tr>
@@ -206,7 +209,7 @@ Workloads are defined as entities to which Asset Costs are committed. Some resou
   </tr>
 </table>
 
-The following workload cost aggregations are supported in a complete implementation in the OpenCost Spec: 
+The following workload cost aggregations are supported in a complete implementation in the OpenCost Spec:
 
 * container
 * pod
@@ -230,7 +233,7 @@ Shared Workload Costs, Cluster Idle Costs, and Overhead Costs are common example
 2. Proportionate to a tenant's consumption of Cluster Asset costs
 3. Custom metric, e.g. bytes of network egress
 
-A full implementation of the spec should support various methods of distributing shared costs. 
+A full implementation of the spec should support various methods of distributing shared costs.
 
 
 ## Idle Costs
@@ -260,6 +263,7 @@ Idle Costs can be calculated at both the Asset/Resource level as well as the Wor
    <td><strong>Cluster </strong>
 <p>
 <strong>Idle %</strong>
+</p>
    </td>
    <td><strong>=</strong>
    </td>
@@ -274,12 +278,12 @@ Idle Costs can be calculated at both the Asset/Resource level as well as the Wor
 
 
 
-## 
+##
 The following chart shows these relationships:
 ![image3](https://user-images.githubusercontent.com/453512/171579570-055bebe8-cc97-4129-9238-c4bcda8e123c.png)
 
 
-Asset Idle Cost can be calculated by individual assets, groups of assets, cluster(s), and by individual resources, e.g. CPU. Resources that are strictly billed on usage can be viewed to have 100% efficiency but should not be included when measuring idle percentage of a cluster. 
+Asset Idle Cost can be calculated by individual assets, groups of assets, cluster(s), and by individual resources, e.g. CPU. Resources that are strictly billed on usage can be viewed to have 100% efficiency but should not be included when measuring idle percentage of a cluster.
 
 Workload Idle Costs is a cost-weighted measurement of [requested](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-requests-and-limits-of-pod-and-container) resources that are unused. Workload Idle Costs can be calculated on any grouping of Kubernetes workloads, e.g. containers, pods, labels, annotations, namespaces, etc.
 
@@ -298,7 +302,7 @@ The state of a pod will affect the ability to assign costs and whether a resourc
 **Cluster Assets** – Observable entities within a Kubernetes cluster that directly incur costs related to their resources. Examples include nodes, persistent volumes, attached disks, load balancers.
 
 
-**Container** - An instance of a container image. You may have multiple copies of the same image running at the same time. [More info](https://kubernetes.io/docs/concepts/containers/) 
+**Container** - An instance of a container image. You may have multiple copies of the same image running at the same time. [More info](https://kubernetes.io/docs/concepts/containers/)
 
 
 **Image** - A template of a container which contains software (usually microservices) that needs to be run. [More info](https://kubernetes.io/docs/concepts/containers/images/)
@@ -310,10 +314,10 @@ The state of a pod will affect the ability to assign costs and whether a resourc
 **Pod** - A Kubernetes specific concept that consists of a group of containers. A pod is treated as a single block of resources that may be scheduled or scaled on a cluster. [More info](https://kubernetes.io/docs/concepts/workloads/pods/)
 
 
-**Container Orchestration** - Manages the cluster of server instances and maintains the lifecycle of containers and pods. Scheduling is a function of the container orchestrator which schedules pods/containers to run on a server instance. 
+**Container Orchestration** - Manages the cluster of server instances and maintains the lifecycle of containers and pods. Scheduling is a function of the container orchestrator which schedules pods/containers to run on a server instance.
 
 
-**Cluster** - A group of server instances 
+**Cluster** - A group of server instances
 
 
 **Namespace** - A Kubernetes concept which creates a ‘virtual’ cluster where pods/containers may be deployed and observed discreetly from other namespaces. [More info](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/)
@@ -347,7 +351,7 @@ Sampling Kubernetes resources is recommended with the following metrics / dataso
 ## Appendix C
 
 
-Working examples of OpenCost data to come! 
+Working examples of OpenCost data to come!
 
 
 ## Notes
@@ -357,5 +361,3 @@ Working examples of OpenCost data to come!
 
 [^2]:
      This is because containers are the smallest identifiable unit of "thing that uses resources." For example, the lowest level of reliable CPU usage information is usually a container.
-
-

+ 8 - 0
ui/Dockerfile

@@ -9,9 +9,17 @@ FROM nginx:alpine
 COPY --from=builder /opt/ui/dist /var/www
 COPY default.nginx.conf /etc/nginx/conf.d/
 COPY nginx.conf /etc/nginx/
+RUN rm -rf /etc/nginx/conf.d/default.conf
+
+RUN adduser 1001 -g 1000 -D
+RUN chown 1001:1000 -R /var/www
+RUN chown 1001:1000 -R /etc/nginx
 
 ENV BASE_URL=/model
 
+
+USER 1001
+
 COPY ./docker-entrypoint.sh /usr/local/bin/
 ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"]
 CMD ["nginx", "-g", "daemon off;"]

+ 2 - 1
ui/src/Reports.js

@@ -40,6 +40,7 @@ const aggregationOptions = [
   { name: 'Controller', value: 'controller' },
   { name: 'Service', value: 'service' },
   { name: 'Pod', value: 'pod' },
+  { name: 'Container', value: 'container' },
 ]
 
 const accumulateOptions = [
@@ -121,7 +122,7 @@ const ReportsPage = () => {
   const [fetch, setFetch] = useState(false)
   const [loading, setLoading] = useState(true)
   const [errors, setErrors] = useState([])
-  
+
   // Initialize once, then fetch report each time setFetch(true) is called
   useEffect(() => {
     if (!init) {