Quellcode durchsuchen

Merge branch 'develop' into sean/fix-aws-catalog-load

Sean Holcomb vor 2 Jahren
Ursprung
Commit
ea0157e6dd

+ 4 - 4
.github/workflows/stale.yml

@@ -9,10 +9,10 @@ jobs:
     steps:
       - uses: actions/stale@v8
         with:
-          stale-issue-message: 'This issue has been marked as stale because it has been open for 180 days with no activity. Please remove the stale label or comment or this issue will be closed in 5 days.'
-          close-issue-message: 'This issue was closed because it has been inactive for 185 days with no activity.'
-          stale-pr-message: 'This pull request has been marked as stale because it has been open for 60 days with no activity. Please remove the stale label or comment or this pull request will be closed in 5 days.'
-          close-pr-message: 'This pull request was closed because it has been inactive for 65 days with no activity.'
+          stale-issue-message: 'This issue has been marked as stale because it has been open for 360 days with no activity. Please remove the stale label or comment or this issue will be closed in 5 days.'
+          close-issue-message: 'This issue was closed because it has been inactive for 365 days with no activity.'
+          stale-pr-message: 'This pull request has been marked as stale because it has been open for 90 days with no activity. Please remove the stale label or comment or this pull request will be closed in 5 days.'
+          close-pr-message: 'This pull request was closed because it has been inactive for 95 days with no activity.'
           days-before-issue-stale: 360
           days-before-issue-close: 5
           days-before-pr-stale: 90

+ 3 - 0
.gitignore

@@ -10,3 +10,6 @@ cmd/costmodel/costmodel
 cmd/costmodel/costmodel-amd64
 cmd/costmodel/costmodel-arm64
 pkg/cloud/azureorphan_test.go
+
+# VS Code
+.vscode

+ 4 - 10
CODE_OF_CONDUCT.md

@@ -23,22 +23,16 @@ Examples of unacceptable behavior include:
 * Trolling, insulting or derogatory comments, and personal or political attacks
 * Public or private harassment
 * Publishing others' private information, such as a physical or email address, without their explicit permission
-* No solicitation. This is a community to help foster innovation, education, and professional & personal networking. 
+* No solicitation. This is a community to help foster innovation, education, and professional & personal networking.
 * Other conduct which could reasonably be considered inappropriate in a professional setting
 
-Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct. 
-By adopting this Code of Conduct, project maintainers commit themselves to fairly and consistently applying these principles to every aspect of managing this project. 
+Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct.
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and consistently applying these principles to every aspect of managing this project.
 Project maintainers who do not follow or enforce the Code of Conduct may be permanently removed from the project team.
 
 # Reporting
 
-For incidents occurring in the OpenCost community, contact the OpenCost Code of Conduct Committee via conduct@kubecost.com. You can expect a response within two business days.
-For other projects, please contact the OpenCost staff via conduct@kubecost.com. You can expect a response within three business days.
-
-# Enforcement
-
-The OpenCost project's Code of Conduct Committee enforces code of conduct issues. For all other projects, the OpenCost enforces code of conduct issues.
-Both bodies try to resolve incidents without punishment, but may remove people from the project or OpenCost communities at their discretion.
+Instances of abusive, harassing, or otherwise unacceptable behavior in the OpenCost community may be reported by contacting the project maintainer(s) who will try to resolve incidents without punishment, but may remove people from the project or OpenCost community at their discretion.
 
 # Acknowledgements
 This Code of Conduct is adapted from the Contributor Covenant (http://contributor-covenant.org), version 2.0 available at http://contributor-covenant.org/version/2/0/code_of_conduct/

+ 4 - 4
CONTRIBUTING.md

@@ -3,7 +3,7 @@
 Thanks for your help improving the OpenCost project! There are many ways to contribute to the project, including the following:
 
 * contributing or providing feedback on the [OpenCost Spec](https://github.com/opencost/opencost/tree/develop/spec)
-* contributing documentation here or to the [OpenCost website](https://github.com/kubecost/opencost-website)
+* contributing documentation here or to the [OpenCost website](https://github.com/opencost/opencost-website)
 * joining the discussion in the [CNCF Slack](https://slack.cncf.io/) in the [#opencost](https://cloud-native.slack.com/archives/C03D56FPD4G) channel
 * keep up with community events using our [Calendar](https://bit.ly/opencost-calendar)
 * participating in the fortnightly [OpenCost Working Group](https://bit.ly/opencost-calendar) meetings ([notes here](https://bit.ly/opencost-meeting))
@@ -12,7 +12,7 @@ Thanks for your help improving the OpenCost project! There are many ways to cont
 ## Getting Help
 
 If you have a question about OpenCost or have encountered problems using it,
-you can start by asking a question on [CNCF Slack](https://slack.cncf.io/) in the [#opencost](https://cloud-native.slack.com/archives/C03D56FPD4G) channel or via email at [opencost@kubecost.com](opencost@kubecost.com)
+you can start by asking a question on [CNCF Slack](https://slack.cncf.io/) in the [#opencost](https://cloud-native.slack.com/archives/C03D56FPD4G) channel or attend the biweekly [OpenCost Working Group community meeting](https://bit.ly/opencost-meeting) from the [Community Calendar](https://bit.ly/opencost-calendar) to discuss OpenCost development.
 
 ## Workflow
 
@@ -85,7 +85,7 @@ export KUBECONFIG=~/.kube/config
 An example of the full command:
 
 ```bash
-ETL_PATH_PREFIX="/my/cool/path/kubecost/var/config" CONFIG_PATH="/my/cool/path/kubecost/var/config" PROMETHEUS_SERVER_ENDPOINT="http://127.0.0.1:9090" go run main.go
+PROMETHEUS_SERVER_ENDPOINT="http://127.0.0.1:9090" go run main.go
 ```
 
 ## Running the integration tests
@@ -109,4 +109,4 @@ Please write a commit message with Fixes Issue # if there is an outstanding issu
 
 Please run `go fmt` on the project directory. Lint can be okay (for example, comments on exported functions are nice but not required on the server).
 
-Please email us [opencost@kubecost.com](opencost@kubecost.com) or reach out to us on [CNCF Slack](https://slack.cncf.io/) in the [#opencost](https://cloud-native.slack.com/archives/C03D56FPD4G) channel if you need help or have any questions!
+Please reach us on [CNCF Slack](https://slack.cncf.io/) in the [#opencost](https://cloud-native.slack.com/archives/C03D56FPD4G) channel or attend the biweekly [OpenCost Working Group community meeting](https://bit.ly/opencost-meeting) from the [Community Calendar](https://bit.ly/opencost-calendar) to discuss OpenCost development.

+ 1 - 1
README.md

@@ -37,7 +37,7 @@ and contributing changes.
 
 ## Community
 
-If you need any support or have any questions on contributing to the project, you can reach us on [CNCF Slack](https://slack.cncf.io/) in the [#opencost](https://cloud-native.slack.com/archives/C03D56FPD4G) channel, email at [opencost@kubecost.com](opencost@kubecost.com), or attend the biweekly [OpenCost Working Group community meeting](https://bit.ly/opencost-meeting) from the [Community Calendar](https://bit.ly/opencost-calendar).
+If you need any support or have any questions on contributing to the project, you can reach us on [CNCF Slack](https://slack.cncf.io/) in the [#opencost](https://cloud-native.slack.com/archives/C03D56FPD4G) channel or attend the biweekly [OpenCost Working Group community meeting](https://bit.ly/opencost-meeting) from the [Community Calendar](https://bit.ly/opencost-calendar) to discuss OpenCost development.
 
 ## FAQ
 

+ 1 - 1
ROADMAP.md

@@ -11,4 +11,4 @@ __2023 roadmap__
 * More robust [API documentation](https://www.opencost.io/docs/integrations/api) and examples.
 * Expose carbon emission ratings
 
-Please contact us at opencost@kubecost.com if you're interest in more detail.
+Please reach us on [CNCF Slack](https://slack.cncf.io/) in the [#opencost](https://cloud-native.slack.com/archives/C03D56FPD4G) channel or attend the biweekly [OpenCost Working Group community meeting](https://bit.ly/opencost-meeting) from the [Community Calendar](https://bit.ly/opencost-calendar) to discuss OpenCost development.

+ 2 - 1
pkg/cloud/azure/azurestorageintegration.go

@@ -30,12 +30,13 @@ func (asi *AzureStorageIntegration) GetCloudCost(start, end time.Time) (*kubecos
 			k8sPtc = 1.0
 		}
 
+		providerID, _ := AzureSetProviderID(abv)
 		// Create CloudCost
 		// Using the NetCost as a 'placeholder' for Invoiced and Amortized Net costs now,
 		// until we can revisit and spend the time to do the calculations correctly
 		cc := &kubecost.CloudCost{
 			Properties: &kubecost.CloudCostProperties{
-				ProviderID:      AzureSetProviderID(abv),
+				ProviderID:      providerID,
 				Provider:        kubecost.AzureProvider,
 				AccountID:       abv.SubscriptionID,
 				InvoiceEntityID: abv.InvoiceEntityID,

+ 10 - 8
pkg/cloud/azure/billingexportparser.go

@@ -258,28 +258,30 @@ func encloseInBrackets(jsonString string) string {
 	return fmt.Sprintf("{%s}", jsonString)
 }
 
-func AzureSetProviderID(abv *BillingRowValues) string {
+// isVMSSShared represents a bool that lets you know while setting providerID we were
+// able to get the actual VMName associated with a VM of a group of VMs in VMSS.
+func AzureSetProviderID(abv *BillingRowValues) (providerID string, isVMSSShared bool) {
 	category := SelectAzureCategory(abv.MeterCategory)
 	if value, ok := abv.AdditionalInfo["VMName"]; ok {
-		return "azure://" + resourceGroupToLowerCase(abv.InstanceID) + getVMNumberForVMSS(fmt.Sprintf("%v", value))
+		return "azure://" + resourceGroupToLowerCase(abv.InstanceID) + getVMNumberForVMSS(fmt.Sprintf("%v", value)), false
 	} else if value, ok := abv.AdditionalInfo["VmName"]; ok {
-		return "azure://" + resourceGroupToLowerCase(abv.InstanceID) + getVMNumberForVMSS(fmt.Sprintf("%v", value))
+		return "azure://" + resourceGroupToLowerCase(abv.InstanceID) + getVMNumberForVMSS(fmt.Sprintf("%v", value)), false
 	} else if value2, ook := abv.AdditionalInfo["IpAddress"]; ook && abv.MeterCategory == "Virtual Network" {
-		return fmt.Sprintf("%v", value2)
+		return fmt.Sprintf("%v", value2), false
 	}
 
 	if category == kubecost.StorageCategory || (category == kubecost.NetworkCategory && abv.MeterCategory == "Bandwidth") {
 		if value2, ok2 := abv.Tags["creationSource"]; ok2 {
 			creationSource := fmt.Sprintf("%v", value2)
-			return strings.TrimPrefix(creationSource, "aks-")
+			return strings.TrimPrefix(creationSource, "aks-"), true
 		} else if value2, ok2 := abv.Tags["aks-managed-creationSource"]; ok2 {
 			creationSource := fmt.Sprintf("%v", value2)
-			return strings.TrimPrefix(creationSource, "vmssclient-")
+			return strings.TrimPrefix(creationSource, "vmssclient-"), true
 		} else {
-			return getSubStringAfterFinalSlash(abv.InstanceID)
+			return getSubStringAfterFinalSlash(abv.InstanceID), true
 		}
 	}
-	return "azure://" + resourceGroupToLowerCase(abv.InstanceID)
+	return "azure://" + resourceGroupToLowerCase(abv.InstanceID), true
 }
 
 func SelectAzureCategory(meterCategory string) string {

+ 1 - 1
pkg/cmd/agent/agent.go

@@ -117,7 +117,7 @@ func newPrometheusClient() (prometheus.Client, error) {
 	api := prometheusAPI.NewAPI(promCli)
 	_, err = api.Config(context.Background())
 	if err != nil {
-		log.Infof("No valid prometheus config file at %s. Error: %s . Troubleshooting help available at: %s. Ignore if using cortex/thanos here.", address, err.Error(), prom.PrometheusTroubleshootingURL)
+		log.Infof("No valid prometheus config file at %s. Error: %s . Troubleshooting help available at: %s. Ignore if using cortex/mimir/thanos here.", address, err.Error(), prom.PrometheusTroubleshootingURL)
 	} else {
 		log.Infof("Retrieved a prometheus config file from: %s", address)
 	}

+ 1 - 1
pkg/costmodel/aggregation.go

@@ -2215,7 +2215,7 @@ func (a *Accesses) ComputeAllocationHandlerSummary(w http.ResponseWriter, r *htt
 
 	sasl := []*kubecost.SummaryAllocationSet{}
 	for _, as := range asr.Slice() {
-		sas := kubecost.NewSummaryAllocationSet(as, nil, []kubecost.AllocationMatchFunc{}, false, false)
+		sas := kubecost.NewSummaryAllocationSet(as, nil, nil, false, false)
 		sasl = append(sasl, sas)
 	}
 	sasr := kubecost.NewSummaryAllocationSetRange(sasl...)

+ 2 - 2
pkg/costmodel/cluster.go

@@ -179,8 +179,8 @@ func ClusterDisks(client prometheus.Client, provider models.Provider, start, end
 	queryPVCInfo := fmt.Sprintf(`avg(avg_over_time(kube_persistentvolumeclaim_info{%s}[%s])) by (%s, volumename, persistentvolumeclaim, namespace)`, env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel())
 	queryLocalStorageCost := fmt.Sprintf(`sum_over_time(sum(container_fs_limit_bytes{device!="tmpfs", id="/", %s}) by (instance, %s)[%s:%dm]) / 1024 / 1024 / 1024 * %f * %f`, env.GetPromClusterFilter(), env.GetPromClusterLabel(), durStr, minsPerResolution, hourlyToCumulative, costPerGBHr)
 	queryLocalStorageUsedCost := fmt.Sprintf(`sum_over_time(sum(container_fs_usage_bytes{device!="tmpfs", id="/", %s}) by (instance, %s)[%s:%dm]) / 1024 / 1024 / 1024 * %f * %f`, env.GetPromClusterFilter(), env.GetPromClusterLabel(), durStr, minsPerResolution, hourlyToCumulative, costPerGBHr)
-	queryLocalStorageUsedAvg := fmt.Sprintf(`avg(avg_over_time(container_fs_usage_bytes{device!="tmpfs", id="/", %s}[%s])) by (instance, %s)`, env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel())
-	queryLocalStorageUsedMax := fmt.Sprintf(`max(max_over_time(container_fs_usage_bytes{device!="tmpfs", id="/", %s}[%s])) by (instance, %s)`, env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel())
+	queryLocalStorageUsedAvg := fmt.Sprintf(`avg(sum(avg_over_time(container_fs_usage_bytes{device!="tmpfs", id="/", %s}[%s])) by (instance, %s, job)) by (instance, %s)`, env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel(), env.GetPromClusterLabel())
+	queryLocalStorageUsedMax := fmt.Sprintf(`max(sum(max_over_time(container_fs_usage_bytes{device!="tmpfs", id="/", %s}[%s])) by (instance, %s, job)) by (instance, %s)`, env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel(), env.GetPromClusterLabel())
 	queryLocalStorageBytes := fmt.Sprintf(`avg_over_time(sum(container_fs_limit_bytes{device!="tmpfs", id="/", %s}) by (instance, %s)[%s:%dm])`, env.GetPromClusterFilter(), env.GetPromClusterLabel(), durStr, minsPerResolution)
 	queryLocalActiveMins := fmt.Sprintf(`count(node_total_hourly_cost{%s}) by (%s, node)[%s:%dm]`, env.GetPromClusterFilter(), env.GetPromClusterLabel(), durStr, minsPerResolution)
 

+ 1 - 1
pkg/costmodel/router.go

@@ -1555,7 +1555,7 @@ func Initialize(additionalConfigWatchers ...*watcher.ConfigMapWatcher) *Accesses
 	api := prometheusAPI.NewAPI(promCli)
 	_, err = api.Config(context.Background())
 	if err != nil {
-		log.Infof("No valid prometheus config file at %s. Error: %s . Troubleshooting help available at: %s. Ignore if using cortex/thanos here.", address, err.Error(), prom.PrometheusTroubleshootingURL)
+		log.Infof("No valid prometheus config file at %s. Error: %s . Troubleshooting help available at: %s. Ignore if using cortex/mimir/thanos here.", address, err.Error(), prom.PrometheusTroubleshootingURL)
 	} else {
 		log.Infof("Retrieved a prometheus config file from: %s", address)
 	}

+ 40 - 0
pkg/filter21/ast/walker.go

@@ -2,6 +2,7 @@ package ast
 
 import (
 	"fmt"
+	"sort"
 	"strings"
 
 	"github.com/opencost/opencost/pkg/filter21/util"
@@ -367,3 +368,42 @@ func indent(depth int) string {
 	}
 	return strings.Repeat("  ", depth)
 }
+
+func Fields(filter FilterNode) []Field {
+	fields := map[Field]bool{}
+
+	PreOrderTraversal(filter, func(fn FilterNode, state TraversalState) {
+		if fn == nil {
+			return
+		}
+		switch n := fn.(type) {
+		case *EqualOp:
+			if n.Left.Field != nil {
+				fields[*n.Left.Field] = true
+			}
+		case *ContainsOp:
+			if n.Left.Field != nil {
+				fields[*n.Left.Field] = true
+			}
+		case *ContainsPrefixOp:
+			if n.Left.Field != nil {
+				fields[*n.Left.Field] = true
+			}
+		case *ContainsSuffixOp:
+			if n.Left.Field != nil {
+				fields[*n.Left.Field] = true
+			}
+		}
+	})
+
+	response := make([]Field, 0, len(fields))
+	for field := range fields {
+		response = append(response, field)
+	}
+
+	sort.Slice(response, func(i, j int) bool {
+		return response[i].Name < response[j].Name
+	})
+
+	return response
+}

+ 100 - 0
pkg/filter21/ast/walker_test.go

@@ -2,6 +2,8 @@ package ast
 
 import (
 	"fmt"
+	"reflect"
+	"testing"
 )
 
 func ExampleTransformLeaves() {
@@ -50,3 +52,101 @@ func ExampleTransformLeaves() {
 	//   }
 	// }
 }
+
+func TestFields(t *testing.T) {
+	type testCase struct {
+		name   string
+		filter FilterNode
+		exp    []Field
+	}
+
+	fieldNamespace := *NewField("namespace")
+	fieldCluster := *NewField("cluster")
+	fieldControllerKind := *NewField("controllerKind")
+
+	testCases := []testCase{
+		{
+			name:   ``,
+			filter: &VoidOp{},
+			exp:    []Field{},
+		},
+		{
+			name: `namespace:"kubecost"`,
+			filter: &EqualOp{
+				Left: Identifier{
+					Field: NewField("namespace"),
+					Key:   "",
+				},
+				Right: "kubecost",
+			},
+			exp: []Field{fieldNamespace},
+		},
+		{
+			name: `namespace: "kubecost" | cluster:"cluster-one" | controllerKind:"deployment"`,
+			filter: &OrOp{
+				Operands: []FilterNode{
+					&EqualOp{
+						Left: Identifier{
+							Field: NewField("namespace"),
+							Key:   "",
+						},
+						Right: "kubecost",
+					},
+					&EqualOp{
+						Left: Identifier{
+							Field: NewField("cluster"),
+							Key:   "",
+						},
+						Right: "cluster-one",
+					},
+					&EqualOp{
+						Left: Identifier{
+							Field: NewField("controllerKind"),
+							Key:   "",
+						},
+						Right: "deployment",
+					},
+				},
+			},
+			exp: []Field{fieldCluster, fieldControllerKind, fieldNamespace},
+		},
+		{
+			name: `namespace: "kubecost" | namespace:"kube-system" | namespace:"default"`,
+			filter: &OrOp{
+				Operands: []FilterNode{
+					&EqualOp{
+						Left: Identifier{
+							Field: NewField("namespace"),
+							Key:   "",
+						},
+						Right: "kubecost",
+					},
+					&EqualOp{
+						Left: Identifier{
+							Field: NewField("namespace"),
+							Key:   "",
+						},
+						Right: "kube-system",
+					},
+					&EqualOp{
+						Left: Identifier{
+							Field: NewField("namespace"),
+							Key:   "",
+						},
+						Right: "default",
+					},
+				},
+			},
+			exp: []Field{fieldNamespace},
+		},
+	}
+
+	for _, tc := range testCases {
+		t.Run(tc.name, func(t *testing.T) {
+			act := Fields(tc.filter)
+			if !reflect.DeepEqual(tc.exp, act) {
+				t.Errorf("fields do not match; expected %v; got %v", tc.exp, act)
+			}
+		})
+	}
+}

+ 70 - 24
pkg/kubecost/allocation.go

@@ -94,7 +94,10 @@ type Allocation struct {
 	ProportionalAssetResourceCosts ProportionalAssetResourceCosts `json:"proportionalAssetResourceCosts"` //@bingen:field[ignore]
 	SharedCostBreakdown            SharedCostBreakdowns           `json:"sharedCostBreakdown"`            //@bingen:field[ignore]
 	LoadBalancers                  LbAllocations                  `json:"LoadBalancers"`                  // @bingen:field[version=18]
-
+	// UnmountedPVCost is used to track how much of the cost in PVs is for an
+	// unmounted PV. It is not additive of PVCost() and need not be sent in API
+	// responses.
+	UnmountedPVCost float64 `json:"-"`
 }
 
 type LbAllocations map[string]*LbAllocation
@@ -682,6 +685,7 @@ func (a *Allocation) Clone() *Allocation {
 		ProportionalAssetResourceCosts: a.ProportionalAssetResourceCosts.Clone(),
 		SharedCostBreakdown:            a.SharedCostBreakdown.Clone(),
 		LoadBalancers:                  a.LoadBalancers.Clone(),
+		UnmountedPVCost:                a.UnmountedPVCost,
 	}
 }
 
@@ -781,6 +785,10 @@ func (a *Allocation) Equal(that *Allocation) bool {
 		return false
 	}
 
+	if !util.IsApproximately(a.UnmountedPVCost, that.UnmountedPVCost) {
+		return false
+	}
+
 	return true
 }
 
@@ -1027,21 +1035,12 @@ func (a *Allocation) IsUnallocated() bool {
 }
 
 // IsUnmounted is true if the given Allocation represents unmounted volume costs.
-// Note: Due to change in https://github.com/opencost/opencost/pull/1477 made to include Unmounted
-// PVC cost inside namespace we need to check unmounted suffix across all the three major properties
-// to actually classify it as unmounted.
 func (a *Allocation) IsUnmounted() bool {
 	if a == nil {
 		return false
 	}
 
-	props := a.Properties
-	if props != nil {
-		if props.Container == UnmountedSuffix && props.Namespace == UnmountedSuffix && props.Pod == UnmountedSuffix {
-			return true
-		}
-	}
-	return false
+	return strings.Contains(a.Name, UnmountedSuffix)
 }
 
 // Minutes returns the number of minutes the Allocation represents, as defined
@@ -1054,6 +1053,21 @@ func (a *Allocation) Minutes() float64 {
 	return a.End.Sub(a.Start).Minutes()
 }
 
+// SetUnmountedPVCost determines if the Allocation is unmounted and, if so, it
+// sets the UnmountedPVCost field appropriately.
+func (a *Allocation) SetUnmountedPVCost() float64 {
+	if a == nil {
+		return 0.0
+	}
+
+	if a.IsUnmounted() {
+		a.UnmountedPVCost = a.PVTotalCost()
+		return a.UnmountedPVCost
+	}
+
+	return 0.0
+}
+
 // Share adds the TotalCost of the given Allocation to the SharedCost of the
 // receiving Allocation. No Start, End, Window, or AllocationProperties are considered.
 // Neither Allocation is mutated; a new Allocation is always returned.
@@ -1201,6 +1215,7 @@ func (a *Allocation) add(that *Allocation) {
 	a.LoadBalancerCost += that.LoadBalancerCost
 	a.SharedCost += that.SharedCost
 	a.ExternalCost += that.ExternalCost
+	a.UnmountedPVCost += that.UnmountedPVCost
 
 	// Sum PVAllocations
 	a.PVs = a.PVs.Add(that.PVs)
@@ -1295,7 +1310,7 @@ type AllocationAggregationOptions struct {
 	MergeUnallocated                      bool
 	Reconcile                             bool
 	ReconcileNetwork                      bool
-	ShareFuncs                            []AllocationMatchFunc
+	Share                                 filter21.Filter
 	SharedNamespaces                      []string
 	SharedLabels                          map[string][]string
 	ShareIdle                             string
@@ -1395,6 +1410,16 @@ func (as *AllocationSet) AggregateBy(aggregateBy []string, options *AllocationAg
 		return fmt.Errorf("unexpected nil filter")
 	}
 
+	var sharer AllocationMatcher
+	if options.Share != nil {
+		compiler := NewAllocationMatchCompiler(options.LabelConfig)
+		var err error
+		sharer, err = compiler.Compile(options.Share)
+		if err != nil {
+			return fmt.Errorf("compiling sharer '%s': %w", ast.ToPreOrderShortString(options.Filter), err)
+		}
+	}
+
 	var allocatedTotalsMap map[string]map[string]float64
 
 	// If aggregateBy is nil, we don't aggregate anything. On the other hand,
@@ -1402,7 +1427,7 @@ func (as *AllocationSet) AggregateBy(aggregateBy []string, options *AllocationAg
 	// generateKey for why that makes sense.
 	shouldAggregate := aggregateBy != nil
 	shouldFilter := !isFilterEmpty(filter)
-	shouldShare := len(options.SharedHourlyCosts) > 0 || len(options.ShareFuncs) > 0
+	shouldShare := len(options.SharedHourlyCosts) > 0 || sharer != nil
 	if !shouldAggregate && !shouldFilter && !shouldShare && options.ShareIdle == ShareNone && !options.IncludeProportionalAssetResourceCosts {
 		// There is nothing for AggregateBy to do, so simply return nil
 		// before returning, set aggregated metadata inclusion in properties
@@ -1446,7 +1471,6 @@ func (as *AllocationSet) AggregateBy(aggregateBy []string, options *AllocationAg
 	// them to their respective sets, removing them from the set of allocations
 	// to aggregate.
 	for _, alloc := range as.Allocations {
-
 		alloc.Properties.AggregatedMetadata = options.IncludeAggregatedMetadata
 		// build a parallel set of allocations to only be used
 		// for computing PARCs
@@ -1483,13 +1507,11 @@ func (as *AllocationSet) AggregateBy(aggregateBy []string, options *AllocationAg
 		// Shared allocations must be identified and separated prior to
 		// aggregation and filtering. That is, if any of the ShareFuncs return
 		// true for the allocation, then move it to shareSet.
-		for _, sf := range options.ShareFuncs {
-			if sf(alloc) {
-				delete(as.IdleKeys, alloc.Name)
-				delete(as.Allocations, alloc.Name)
-				shareSet.Insert(alloc)
-				break
-			}
+		if sharer != nil && sharer.Matches(alloc) {
+			delete(as.IdleKeys, alloc.Name)
+			delete(as.Allocations, alloc.Name)
+			shareSet.Insert(alloc)
+			continue
 		}
 	}
 
@@ -2032,8 +2054,8 @@ func computeShareCoeffs(aggregateBy []string, options *AllocationAggregationOpti
 		} else {
 			// Both are additive for weighted distribution, where each
 			// cumulative coefficient will be divided by the total.
-			coeffs[name] += alloc.TotalCost() - alloc.SharedCost
-			total += alloc.TotalCost() - alloc.SharedCost
+			coeffs[name] += alloc.TotalCost() - alloc.SharedCost - alloc.UnmountedPVCost
+			total += alloc.TotalCost() - alloc.SharedCost - alloc.UnmountedPVCost
 		}
 	}
 
@@ -2314,7 +2336,7 @@ func (a *Allocation) determineSharingName(options *AllocationAggregationOptions)
 
 	// grab SharedLabels keys and sort them, to keep this function deterministic
 	var labelKeys []string
-	for labelKey, _ := range options.SharedLabels {
+	for labelKey := range options.SharedLabels {
 		labelKeys = append(labelKeys, labelKey)
 	}
 	slices.Sort(labelKeys)
@@ -2825,6 +2847,30 @@ func (as *AllocationSet) Set(alloc *Allocation) error {
 	return nil
 }
 
+// GetUnmountedPVCost returns the sum of all UnmountedPVCost fields across all
+// allocations in the set.
+func (as *AllocationSet) GetUnmountedPVCost() float64 {
+	upvc := 0.0
+
+	for _, a := range as.Allocations {
+		upvc += a.UnmountedPVCost
+	}
+
+	return upvc
+}
+
+// SetUnmountedPVCost sets the UnmountedPVCost field for all allocations in the
+// set.
+func (as *AllocationSet) SetUnmountedPVCost() float64 {
+	upvc := 0.0
+
+	for _, a := range as.Allocations {
+		upvc += a.SetUnmountedPVCost()
+	}
+
+	return upvc
+}
+
 // Start returns the Start time of the AllocationSet window
 func (as *AllocationSet) Start() time.Time {
 	if as == nil {

+ 46 - 36
pkg/kubecost/allocation_test.go

@@ -10,7 +10,9 @@ import (
 
 	"github.com/davecgh/go-spew/spew"
 	filter21 "github.com/opencost/opencost/pkg/filter21"
+	"github.com/opencost/opencost/pkg/filter21/allocation"
 	afilter "github.com/opencost/opencost/pkg/filter21/allocation"
+	"github.com/opencost/opencost/pkg/filter21/ast"
 	"github.com/opencost/opencost/pkg/filter21/ops"
 	"github.com/opencost/opencost/pkg/log"
 	"github.com/opencost/opencost/pkg/util"
@@ -718,25 +720,27 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 	idleTotalCost := 30.0
 	sharedOverheadHourlyCost := 7.0
 
-	// Match Functions
-	isNamespace3 := func(a *Allocation) bool {
-		ns := a.Properties.Namespace
-		return ns == "namespace3"
-	}
+	// Match filters
 
-	isApp1 := func(a *Allocation) bool {
-		ls := a.Properties.Labels
-		if app, ok := ls["app"]; ok && app == "app1" {
-			return true
+	// This is ugly, but required because cannot import filterutil due to import cycle
+	namespaceEquals := func(ns string) *ast.EqualOp {
+		return &ast.EqualOp{
+			Left: ast.Identifier{
+				Field: ast.NewField(allocation.FieldNamespace),
+				Key:   "",
+			},
+			Right: ns,
 		}
-		return false
 	}
 
-	// Filters
-	isNamespace := func(matchNamespace string) func(*Allocation) bool {
-		return func(a *Allocation) bool {
-			namespace := a.Properties.Namespace
-			return namespace == matchNamespace
+	// This is ugly, but required because cannot import filterutil due to import cycle
+	labelEquals := func(name, value string) *ast.EqualOp {
+		return &ast.EqualOp{
+			Left: ast.Identifier{
+				Field: ast.NewField(allocation.FieldLabel),
+				Key:   name,
+			},
+			Right: value,
 		}
 	}
 
@@ -1216,7 +1220,7 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 			start: start,
 			aggBy: []string{AllocationNamespaceProp},
 			aggOpts: &AllocationAggregationOptions{
-				ShareFuncs: []AllocationMatchFunc{isNamespace3},
+				Share:      namespaceEquals("namespace3"),
 				ShareSplit: ShareEven,
 			},
 			numResults: numNamespaces,
@@ -1238,7 +1242,7 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 			start: start,
 			aggBy: []string{AllocationNamespaceProp},
 			aggOpts: &AllocationAggregationOptions{
-				ShareFuncs:                            []AllocationMatchFunc{isNamespace3},
+				Share:                                 namespaceEquals("namespace3"),
 				ShareSplit:                            ShareWeighted,
 				IncludeProportionalAssetResourceCosts: true,
 			},
@@ -1298,7 +1302,7 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 			start: start,
 			aggBy: []string{AllocationNamespaceProp},
 			aggOpts: &AllocationAggregationOptions{
-				ShareFuncs: []AllocationMatchFunc{isApp1},
+				Share:      labelEquals("app", "app1"),
 				ShareSplit: ShareEven,
 			},
 			numResults: numNamespaces + numIdle,
@@ -1486,7 +1490,7 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 			aggBy: []string{AllocationNamespaceProp},
 			aggOpts: &AllocationAggregationOptions{
 				Filter:     mustParseFilter(`namespace:"namespace2"`),
-				ShareFuncs: []AllocationMatchFunc{isNamespace("namespace1")},
+				Share:      namespaceEquals("namespace1"),
 				ShareSplit: ShareWeighted,
 			},
 			numResults: 1 + numIdle,
@@ -1505,7 +1509,7 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 			aggBy: []string{AllocationNamespaceProp},
 			aggOpts: &AllocationAggregationOptions{
 				Filter:     mustParseFilter(`namespace:"namespace2"`),
-				ShareFuncs: []AllocationMatchFunc{isNamespace("namespace1")},
+				Share:      namespaceEquals("namespace1"),
 				ShareSplit: ShareWeighted,
 				ShareIdle:  ShareWeighted,
 			},
@@ -1556,7 +1560,7 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 			start: start,
 			aggBy: []string{AllocationNamespaceProp},
 			aggOpts: &AllocationAggregationOptions{
-				ShareFuncs: []AllocationMatchFunc{isNamespace("namespace1")},
+				Share:      namespaceEquals("namespace1"),
 				ShareSplit: ShareWeighted,
 				ShareIdle:  ShareWeighted,
 			},
@@ -1611,7 +1615,7 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 			aggBy: []string{AllocationNamespaceProp},
 			aggOpts: &AllocationAggregationOptions{
 				Filter:     mustParseFilter(`namespace:"namespace2"`),
-				ShareFuncs: []AllocationMatchFunc{isNamespace("namespace1")},
+				Share:      namespaceEquals("namespace1"),
 				ShareSplit: ShareWeighted,
 				ShareIdle:  ShareWeighted,
 			},
@@ -1888,6 +1892,9 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 	}
 
 	for name, testcase := range cases {
+		if name != "4a" {
+			continue
+		}
 		t.Run(name, func(t *testing.T) {
 			if testcase.aggOpts != nil && testcase.aggOpts.IdleByNode {
 				as = GenerateMockAllocationSetNodeIdle(testcase.start)
@@ -1895,6 +1902,12 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 				as = GenerateMockAllocationSetClusterIdle(testcase.start)
 			}
 			err = as.AggregateBy(testcase.aggBy, testcase.aggOpts)
+
+			log.Infof("RESULTS")
+			for name, alloc := range as.Allocations {
+				log.Infof("  %s = %f", name, alloc.TotalCost())
+			}
+
 			assertAllocationSetTotals(t, as, name, err, testcase.numResults, testcase.totalCost)
 			assertAllocationTotals(t, as, name, testcase.results)
 			assertParcResults(t, as, name, testcase.expectedParcResults)
@@ -1947,14 +1960,15 @@ func TestAllocationSet_AggregateBy_SharedCostBreakdown(t *testing.T) {
 	end := time.Now().UTC().Truncate(day)
 	start := end.Add(-day)
 
-	isNamespace1 := func(a *Allocation) bool {
-		ns := a.Properties.Namespace
-		return ns == "namespace1"
-	}
-
-	isNamespace3 := func(a *Allocation) bool {
-		ns := a.Properties.Namespace
-		return ns == "namespace3"
+	// This is ugly, but required because cannot import filterutil due to import cycle
+	namespaceEquals := func(ns string) *ast.EqualOp {
+		return &ast.EqualOp{
+			Left: ast.Identifier{
+				Field: ast.NewField(allocation.FieldNamespace),
+				Key:   "",
+			},
+			Right: ns,
+		}
 	}
 
 	cases := map[string]struct {
@@ -1974,9 +1988,7 @@ func TestAllocationSet_AggregateBy_SharedCostBreakdown(t *testing.T) {
 			start: start,
 			aggBy: []string{"namespace"},
 			aggOpts: &AllocationAggregationOptions{
-				ShareFuncs: []AllocationMatchFunc{
-					isNamespace1,
-				},
+				Share:                      namespaceEquals("namespace1"),
 				IncludeSharedCostBreakdown: true,
 			},
 		},
@@ -1984,9 +1996,7 @@ func TestAllocationSet_AggregateBy_SharedCostBreakdown(t *testing.T) {
 			start: start,
 			aggBy: []string{"namespace"},
 			aggOpts: &AllocationAggregationOptions{
-				ShareFuncs: []AllocationMatchFunc{
-					isNamespace3,
-				},
+				Share:                      namespaceEquals("namespace3"),
 				IncludeSharedCostBreakdown: true,
 			},
 		},

+ 10 - 7
pkg/kubecost/asset.go

@@ -3211,24 +3211,27 @@ func (as *AssetSet) ReconciliationMatchMap() map[string]map[string]Asset {
 			continue
 		}
 
-		if _, ok := matchMap[props.ProviderID]; !ok {
-			matchMap[props.ProviderID] = make(map[string]Asset)
+		// we can't guarantee case in providerID for Azure provider to have map working for all providers,
+		// lower casing providerID  while creating reconciliation map
+		providerID := strings.ToLower(props.ProviderID)
+		if _, ok := matchMap[providerID]; !ok {
+			matchMap[providerID] = make(map[string]Asset)
 		}
 
 		// Check if a match is already in the map
-		if duplicateAsset, ok := matchMap[props.ProviderID][props.Category]; ok {
+		if duplicateAsset, ok := matchMap[providerID][props.Category]; ok {
 			log.DedupedWarningf(5, "duplicate asset found when reconciling for %s", props.ProviderID)
 			// if one asset already has adjustment use that one
 			if duplicateAsset.GetAdjustment() == 0 && asset.GetAdjustment() != 0 {
-				matchMap[props.ProviderID][props.Category] = asset
+				matchMap[providerID][props.Category] = asset
 			} else if duplicateAsset.GetAdjustment() != 0 && asset.GetAdjustment() == 0 {
-				matchMap[props.ProviderID][props.Category] = duplicateAsset
+				matchMap[providerID][props.Category] = duplicateAsset
 				// otherwise use the one with the higher cost
 			} else if duplicateAsset.TotalCost() < asset.TotalCost() {
-				matchMap[props.ProviderID][props.Category] = asset
+				matchMap[providerID][props.Category] = asset
 			}
 		} else {
-			matchMap[props.ProviderID][props.Category] = asset
+			matchMap[providerID][props.Category] = asset
 		}
 
 	}

+ 9 - 0
pkg/kubecost/cloudcost.go

@@ -147,6 +147,15 @@ func (cc *CloudCost) GetCostMetric(costMetricName string) (CostMetric, error) {
 	return CostMetric{}, fmt.Errorf("invalid Cost Metric: %s", costMetricName)
 }
 
+// WeightCostMetrics weights all the cost metrics with the given weightedAverage
+func (cc *CloudCost) WeightCostMetrics(weightedAverge float64) {
+	cc.ListCost.Cost *= weightedAverge
+	cc.NetCost.Cost *= weightedAverge
+	cc.AmortizedNetCost.Cost *= weightedAverge
+	cc.InvoicedCost.Cost *= weightedAverge
+	cc.AmortizedCost.Cost *= weightedAverge
+}
+
 // CloudCostSet follows the established set pattern of windowed data types. It has addition metadata types that can be
 // used to preserve data consistency and be used for validation.
 // - Integration is the ID for the integration that a CloudCostSet was sourced from, this value is cleared if when a

+ 37 - 29
pkg/kubecost/summaryallocation.go

@@ -39,6 +39,7 @@ type SummaryAllocation struct {
 	SharedCost             float64               `json:"sharedCost"`
 	ExternalCost           float64               `json:"externalCost"`
 	Share                  bool                  `json:"-"`
+	UnmountedPVCost        float64               `json:"-"`
 }
 
 // NewSummaryAllocation converts an Allocation to a SummaryAllocation by
@@ -67,6 +68,7 @@ func NewSummaryAllocation(alloc *Allocation, reconcile, reconcileNetwork bool) *
 		RAMCost:                alloc.RAMCost + alloc.RAMCostAdjustment,
 		SharedCost:             alloc.SharedCost,
 		ExternalCost:           alloc.ExternalCost,
+		UnmountedPVCost:        alloc.UnmountedPVCost,
 	}
 
 	// Revert adjustments if reconciliation is off. If only network
@@ -82,6 +84,11 @@ func NewSummaryAllocation(alloc *Allocation, reconcile, reconcileNetwork bool) *
 		sa.NetworkCost -= alloc.NetworkCostAdjustment
 	}
 
+	// If the allocation is unmounted, set UnmountedPVCost to the full PVCost.
+	if sa.IsUnmounted() {
+		sa.UnmountedPVCost = sa.PVCost
+	}
+
 	return sa
 }
 
@@ -294,20 +301,12 @@ func (sa *SummaryAllocation) IsUnallocated() bool {
 
 // IsUnmounted is true if the given SummaryAllocation represents unmounted
 // volume costs.
-// Note: Due to change in https://github.com/opencost/opencost/pull/1477 made to include Unmounted
-// PVC cost inside namespace we need to check unmounted suffix across all the three major properties
-// to actually classify it as unmounted.
 func (sa *SummaryAllocation) IsUnmounted() bool {
 	if sa == nil {
 		return false
 	}
-	props := sa.Properties
-	if props != nil {
-		if props.Container == UnmountedSuffix && props.Namespace == UnmountedSuffix && props.Pod == UnmountedSuffix {
-			return true
-		}
-	}
-	return false
+
+	return strings.Contains(sa.Name, UnmountedSuffix)
 }
 
 // Minutes returns the number of minutes the SummaryAllocation represents, as
@@ -384,7 +383,7 @@ type SummaryAllocationSet struct {
 // This filter is an AllocationMatcher, not an AST, because at this point we
 // already have the data and want to make sure that the filter has already
 // gone through a compile step to deal with things like aliases.
-func NewSummaryAllocationSet(as *AllocationSet, filter AllocationMatcher, kfs []AllocationMatchFunc, reconcile, reconcileNetwork bool) *SummaryAllocationSet {
+func NewSummaryAllocationSet(as *AllocationSet, filter, keep AllocationMatcher, reconcile, reconcileNetwork bool) *SummaryAllocationSet {
 	if as == nil {
 		return nil
 	}
@@ -392,7 +391,7 @@ func NewSummaryAllocationSet(as *AllocationSet, filter AllocationMatcher, kfs []
 	// If we can know the exact size of the map, use it. If filters or sharing
 	// functions are present, we can't know the size, so we make a default map.
 	var sasMap map[string]*SummaryAllocation
-	if filter == nil && len(kfs) == 0 {
+	if filter == nil {
 		// No filters, so make the map of summary allocations exactly the size
 		// of the origin allocation set.
 		sasMap = make(map[string]*SummaryAllocation, len(as.Allocations))
@@ -409,14 +408,7 @@ func NewSummaryAllocationSet(as *AllocationSet, filter AllocationMatcher, kfs []
 	for _, alloc := range as.Allocations {
 		// First, detect if the allocation should be kept. If so, mark it as
 		// such, insert it, and continue.
-		shouldKeep := false
-		for _, kf := range kfs {
-			if kf(alloc) {
-				shouldKeep = true
-				break
-			}
-		}
-		if shouldKeep {
+		if keep != nil && keep.Matches(alloc) {
 			sa := NewSummaryAllocation(alloc, reconcile, reconcileNetwork)
 			sa.Share = true
 			sas.Insert(sa)
@@ -530,6 +522,16 @@ func (sas *SummaryAllocationSet) Add(that *SummaryAllocationSet) (*SummaryAlloca
 	return acc, nil
 }
 
+func (sas *SummaryAllocationSet) GetUnmountedPVCost() float64 {
+	upvc := 0.0
+
+	for _, sa := range sas.SummaryAllocations {
+		upvc += sa.UnmountedPVCost
+	}
+
+	return upvc
+}
+
 // AggregateBy aggregates the Allocations in the given AllocationSet by the given
 // AllocationProperty. This will only be legal if the AllocationSet is divisible by the
 // given AllocationProperty; e.g. Containers can be divided by Namespace, but not vice-a-versa.
@@ -570,7 +572,7 @@ func (sas *SummaryAllocationSet) AggregateBy(aggregateBy []string, options *Allo
 	// an empty slice implies that we should aggregate everything. (See
 	// generateKey for why that makes sense.)
 	shouldAggregate := aggregateBy != nil
-	shouldKeep := len(options.SharedHourlyCosts) > 0 || len(options.ShareFuncs) > 0
+	shouldKeep := len(options.SharedHourlyCosts) > 0 || options.Share != nil
 	if !shouldAggregate && !shouldKeep {
 		return nil
 	}
@@ -672,6 +674,7 @@ func (sas *SummaryAllocationSet) AggregateBy(aggregateBy []string, options *Allo
 			sharedResourceTotals[key].NetworkCost += sa.NetworkCost
 			sharedResourceTotals[key].PersistentVolumeCost += sa.PVCost
 			sharedResourceTotals[key].RAMCost += sa.RAMCost
+			sharedResourceTotals[key].UnmountedPVCost += sa.UnmountedPVCost
 
 			shareSet.Insert(sa)
 			delete(sas.SummaryAllocations, sa.Name)
@@ -831,8 +834,9 @@ func (sas *SummaryAllocationSet) AggregateBy(aggregateBy []string, options *Allo
 		// 0.0 and 1.0.
 		// NOTE: SummaryAllocation does not support ShareEven, so only record
 		// by cost for cost-weighted distribution.
-		if sharingCoeffs != nil {
-			sharingCoeffs[key] += sa.TotalCost() - sa.SharedCost
+		// if sharingCoeffs != nil {
+		if sharingCoeffs != nil && !sa.IsUnmounted() {
+			sharingCoeffs[key] += sa.TotalCost() - sa.SharedCost - sa.UnmountedPVCost
 		}
 
 		// 6. Distribute idle allocations according to the idle coefficients.
@@ -991,21 +995,24 @@ func (sas *SummaryAllocationSet) AggregateBy(aggregateBy []string, options *Allo
 	// NOTE: ShareEven is not supported
 	if len(shareSet.SummaryAllocations) > 0 {
 
+		shareCoeffSum := 0.0
+
 		sharingCoeffDenominator := 0.0
 		for _, rt := range allocTotals {
-			sharingCoeffDenominator += rt.TotalCost()
+			// Here, the allocation totals
+			sharingCoeffDenominator += rt.TotalCost() // does NOT include unmounted PVs at all
 		}
 
 		// Do not include the shared costs, themselves, when determining
 		// sharing coefficients.
 		for _, rt := range sharedResourceTotals {
-			sharingCoeffDenominator -= rt.TotalCost()
+			// Due to the fact that sharingCoeffDenominator already has no
+			// unmounted PV costs, we need to be careful not to additionally
+			// subtract the unmounted PV cost when we remove shared costs
+			// from the denominator.
+			sharingCoeffDenominator -= (rt.TotalCost() - rt.UnmountedPVCost)
 		}
 
-		// Do not include the unmounted costs when determining sharing
-		// coefficients because they do not receive shared costs.
-		sharingCoeffDenominator -= totalUnmountedCost
-
 		if sharingCoeffDenominator <= 0.0 {
 			log.Warnf("SummaryAllocation: sharing coefficient denominator is %f", sharingCoeffDenominator)
 		} else {
@@ -1019,6 +1026,7 @@ func (sas *SummaryAllocationSet) AggregateBy(aggregateBy []string, options *Allo
 				}
 				if sharingCoeffs[key] > 0.0 {
 					sharingCoeffs[key] /= sharingCoeffDenominator
+					shareCoeffSum += sharingCoeffs[key]
 				} else {
 					log.Warnf("SummaryAllocation: detected illegal sharing coefficient for %s: %v (setting to zero)", key, sharingCoeffs[key])
 					sharingCoeffs[key] = 0.0

+ 4 - 0
pkg/kubecost/summaryallocation_json.go

@@ -24,6 +24,8 @@ type SummaryAllocationResponse struct {
 	RAMCost                *float64  `json:"ramCost"`
 	SharedCost             *float64  `json:"sharedCost"`
 	ExternalCost           *float64  `json:"externalCost"`
+	TotalEfficiency        *float64  `json:"totalEfficiency"`
+	TotalCost              *float64  `json:"totalCost"`
 }
 
 // ToResponse converts a SummaryAllocation to a SummaryAllocationResponse,
@@ -49,6 +51,8 @@ func (sa *SummaryAllocation) ToResponse() *SummaryAllocationResponse {
 		RAMCost:                formatutil.Float64ToResponse(sa.RAMCost),
 		SharedCost:             formatutil.Float64ToResponse(sa.SharedCost),
 		ExternalCost:           formatutil.Float64ToResponse(sa.ExternalCost),
+		TotalEfficiency:        formatutil.Float64ToResponse(sa.TotalEfficiency()),
+		TotalCost:              formatutil.Float64ToResponse(sa.TotalCost()),
 	}
 }
 

+ 14 - 0
pkg/kubecost/totals.go

@@ -10,6 +10,16 @@ import (
 	"github.com/patrickmn/go-cache"
 )
 
+type AllocationTotalsResult struct {
+	Cluster map[string]*AllocationTotals `json:"cluster"`
+	Node    map[string]*AllocationTotals `json:"node"`
+}
+
+type AssetTotalsResult struct {
+	Cluster map[string]*AssetTotals `json:"cluster"`
+	Node    map[string]*AssetTotals `json:"node"`
+}
+
 // AllocationTotals represents aggregate costs of all Allocations for
 // a given cluster or tuple of (cluster, node) between a given start and end
 // time, where the costs are aggregated per-resource. AllocationTotals
@@ -35,6 +45,10 @@ type AllocationTotals struct {
 	PersistentVolumeCostAdjustment float64   `json:"persistentVolumeCostAdjustment"`
 	RAMCost                        float64   `json:"ramCost"`
 	RAMCostAdjustment              float64   `json:"ramCostAdjustment"`
+	// UnmountedPVCost is used to track how much of the cost in
+	// PersistentVolumeCost is for an unmounted PV. It is not additive of that
+	// field, and need not be sent in API responses.
+	UnmountedPVCost float64 `json:"-"`
 }
 
 // ClearAdjustments sets all adjustment fields to 0.0

+ 77 - 0
pkg/kubecost/totals_json.go

@@ -0,0 +1,77 @@
+package kubecost
+
+import (
+	"time"
+
+	"github.com/opencost/opencost/pkg/util/formatutil"
+)
+
+type AllocationTotalsResponse struct {
+	Start                          time.Time `json:"start"`
+	End                            time.Time `json:"end"`
+	Cluster                        string    `json:"cluster"`
+	Node                           string    `json:"node"`
+	Count                          int       `json:"count"`
+	CPUCost                        *float64  `json:"cpuCost"`
+	CPUCostAdjustment              *float64  `json:"cpuCostAdjustment"`
+	GPUCost                        *float64  `json:"gpuCost"`
+	GPUCostAdjustment              *float64  `json:"gpuCostAdjustment"`
+	LoadBalancerCost               *float64  `json:"loadBalancerCost"`
+	LoadBalancerCostAdjustment     *float64  `json:"loadBalancerCostAdjustment"`
+	NetworkCost                    *float64  `json:"networkCost"`
+	NetworkCostAdjustment          *float64  `json:"networkCostAdjustment"`
+	PersistentVolumeCost           *float64  `json:"persistentVolumeCost"`
+	PersistentVolumeCostAdjustment *float64  `json:"persistentVolumeCostAdjustment"`
+	RAMCost                        *float64  `json:"ramCost"`
+	RAMCostAdjustment              *float64  `json:"ramCostAdjustment"`
+	TotalCost                      *float64  `json:"totalCost"`
+}
+
+func (arts *AllocationTotals) ToResponse() *AllocationTotalsResponse {
+	if arts == nil {
+		return nil
+	}
+
+	return &AllocationTotalsResponse{
+		Start:                          arts.Start,
+		End:                            arts.End,
+		Cluster:                        arts.Cluster,
+		Node:                           arts.Node,
+		Count:                          arts.Count,
+		CPUCost:                        formatutil.Float64ToResponse(arts.CPUCost),
+		CPUCostAdjustment:              formatutil.Float64ToResponse(arts.CPUCostAdjustment),
+		GPUCost:                        formatutil.Float64ToResponse(arts.GPUCost),
+		GPUCostAdjustment:              formatutil.Float64ToResponse(arts.GPUCostAdjustment),
+		LoadBalancerCost:               formatutil.Float64ToResponse(arts.LoadBalancerCost),
+		LoadBalancerCostAdjustment:     formatutil.Float64ToResponse(arts.LoadBalancerCostAdjustment),
+		NetworkCost:                    formatutil.Float64ToResponse(arts.NetworkCost),
+		NetworkCostAdjustment:          formatutil.Float64ToResponse(arts.NetworkCostAdjustment),
+		PersistentVolumeCost:           formatutil.Float64ToResponse(arts.PersistentVolumeCost),
+		PersistentVolumeCostAdjustment: formatutil.Float64ToResponse(arts.PersistentVolumeCostAdjustment),
+		RAMCost:                        formatutil.Float64ToResponse(arts.RAMCost),
+		RAMCostAdjustment:              formatutil.Float64ToResponse(arts.RAMCostAdjustment),
+		TotalCost:                      formatutil.Float64ToResponse(arts.TotalCost()),
+	}
+}
+
+type AllocationTotalsResultResponse struct {
+	Cluster map[string]*AllocationTotalsResponse `json:"cluster"`
+	Node    map[string]*AllocationTotalsResponse `json:"node"`
+}
+
+func (atr *AllocationTotalsResult) ToResponse() *AllocationTotalsResultResponse {
+	response := &AllocationTotalsResultResponse{
+		Cluster: map[string]*AllocationTotalsResponse{},
+		Node:    map[string]*AllocationTotalsResponse{},
+	}
+
+	for k, v := range atr.Cluster {
+		response.Cluster[k] = v.ToResponse()
+	}
+
+	for k, v := range atr.Node {
+		response.Node[k] = v.ToResponse()
+	}
+
+	return response
+}

+ 14 - 0
pkg/util/filterutil/filterutil.go

@@ -355,6 +355,20 @@ func AllocationFilterFromParamsV1(
 	return andFilter
 }
 
+func AllocationSharerFromParamsV1(params AllocationFilterV1) filter.Filter {
+	var filterOps []ast.FilterNode
+
+	if len(params.Namespaces) > 0 {
+		filterOps = push(filterOps, filterV1SingleValueFromList(params.Namespaces, afilter.FieldNamespace))
+	}
+
+	if len(params.Labels) > 0 {
+		filterOps = push(filterOps, filterV1DoubleValueFromList(params.Labels, afilter.FieldLabel))
+	}
+
+	return opsToAnd(filterOps)
+}
+
 func AssetFilterFromParamsV1(
 	qp mapper.PrimitiveMapReader,
 	clusterMap clusters.ClusterMap,