Переглянути джерело

feat: Add UID field to all metric result types and queries (#3384)

Signed-off-by: Sparsh <sparsh.raj30@gmail.com>
Co-authored-by: Alex Meijer <ameijer@users.noreply.github.com>
Sparsh Raj 6 місяців тому
батько
коміт
0cd79f398f

Різницю між файлами не показано, бо вона завелика
+ 136 - 0
core/pkg/source/decoders.go


+ 68 - 0
modules/collector-source/pkg/collector/collector.go

@@ -104,6 +104,7 @@ func NewPVPricePerGiBHourMetricCollector() *metric.MetricCollector {
 			source.VolumeNameLabel,
 			source.VolumeNameLabel,
 			source.PVLabel,
 			source.PVLabel,
 			source.ProviderIDLabel,
 			source.ProviderIDLabel,
+			source.UIDLabel,
 		},
 		},
 		aggregator.AverageOverTime,
 		aggregator.AverageOverTime,
 		nil,
 		nil,
@@ -125,6 +126,7 @@ func NewPVUsedAverageMetricCollector() *metric.MetricCollector {
 		[]string{
 		[]string{
 			source.NamespaceLabel,
 			source.NamespaceLabel,
 			source.PVCLabel,
 			source.PVCLabel,
+			source.UIDLabel,
 		},
 		},
 		aggregator.AverageOverTime,
 		aggregator.AverageOverTime,
 		nil,
 		nil,
@@ -146,6 +148,7 @@ func NewPVUsedMaxMetricCollector() *metric.MetricCollector {
 		[]string{
 		[]string{
 			source.NamespaceLabel,
 			source.NamespaceLabel,
 			source.PVCLabel,
 			source.PVCLabel,
+			source.UIDLabel,
 		},
 		},
 		aggregator.MaxOverTime,
 		aggregator.MaxOverTime,
 		nil,
 		nil,
@@ -168,6 +171,7 @@ func NewPVCInfoMetricCollector() *metric.MetricCollector {
 			source.VolumeNameLabel,
 			source.VolumeNameLabel,
 			source.PVCLabel,
 			source.PVCLabel,
 			source.StorageClassLabel,
 			source.StorageClassLabel,
+			source.UIDLabel,
 		},
 		},
 		aggregator.ActiveMinutes,
 		aggregator.ActiveMinutes,
 		func(labels map[string]string) bool {
 		func(labels map[string]string) bool {
@@ -188,6 +192,7 @@ func NewPVActiveMinutesMetricCollector() *metric.MetricCollector {
 		metric.KubePersistentVolumeCapacityBytes,
 		metric.KubePersistentVolumeCapacityBytes,
 		[]string{
 		[]string{
 			source.PVLabel,
 			source.PVLabel,
+			source.UIDLabel,
 		},
 		},
 		aggregator.ActiveMinutes,
 		aggregator.ActiveMinutes,
 		nil,
 		nil,
@@ -214,6 +219,7 @@ func NewLocalStorageUsedActiveMinutesMetricCollector() *metric.MetricCollector {
 		[]string{
 		[]string{
 			source.InstanceLabel,
 			source.InstanceLabel,
 			source.DeviceLabel,
 			source.DeviceLabel,
+			source.UIDLabel,
 		},
 		},
 		aggregator.ActiveMinutes,
 		aggregator.ActiveMinutes,
 		nil, // filter not required here because only container root file system is being scraped
 		nil, // filter not required here because only container root file system is being scraped
@@ -239,6 +245,7 @@ func NewLocalStorageUsedAverageMetricCollector() *metric.MetricCollector {
 		[]string{
 		[]string{
 			source.InstanceLabel,
 			source.InstanceLabel,
 			source.DeviceLabel,
 			source.DeviceLabel,
+			source.UIDLabel,
 		},
 		},
 		aggregator.AverageOverTime,
 		aggregator.AverageOverTime,
 		nil, // filter not required here because only container root file system is being scraped
 		nil, // filter not required here because only container root file system is being scraped
@@ -265,6 +272,7 @@ func NewLocalStorageUsedMaxMetricCollector() *metric.MetricCollector {
 		[]string{
 		[]string{
 			source.InstanceLabel,
 			source.InstanceLabel,
 			source.DeviceLabel,
 			source.DeviceLabel,
+			source.UIDLabel,
 		},
 		},
 		aggregator.MaxOverTime,
 		aggregator.MaxOverTime,
 		nil, // filter not required here because only container root file system is being scraped
 		nil, // filter not required here because only container root file system is being scraped
@@ -289,6 +297,7 @@ func NewLocalStorageBytesMetricCollector() *metric.MetricCollector {
 		[]string{
 		[]string{
 			source.InstanceLabel,
 			source.InstanceLabel,
 			source.DeviceLabel,
 			source.DeviceLabel,
+			source.UIDLabel,
 		},
 		},
 		aggregator.AverageOverTime,
 		aggregator.AverageOverTime,
 		nil, // filter not required here because only node root file system is being scraped
 		nil, // filter not required here because only node root file system is being scraped
@@ -309,6 +318,7 @@ func NewLocalStorageActiveMinutesMetricCollector() *metric.MetricCollector {
 		[]string{
 		[]string{
 			source.NodeLabel,
 			source.NodeLabel,
 			source.ProviderIDLabel,
 			source.ProviderIDLabel,
+			source.UIDLabel,
 		},
 		},
 		aggregator.ActiveMinutes,
 		aggregator.ActiveMinutes,
 		nil,
 		nil,
@@ -330,6 +340,7 @@ func NewNodeCPUCoresCapacityMetricCollector() *metric.MetricCollector {
 		metric.KubeNodeStatusCapacityCPUCores,
 		metric.KubeNodeStatusCapacityCPUCores,
 		[]string{
 		[]string{
 			source.NodeLabel,
 			source.NodeLabel,
+			source.UIDLabel,
 		},
 		},
 		aggregator.AverageOverTime,
 		aggregator.AverageOverTime,
 		nil,
 		nil,
@@ -350,6 +361,7 @@ func NewNodeCPUCoresAllocatableMetricCollector() *metric.MetricCollector {
 		metric.KubeNodeStatusAllocatableCPUCores,
 		metric.KubeNodeStatusAllocatableCPUCores,
 		[]string{
 		[]string{
 			source.NodeLabel,
 			source.NodeLabel,
+			source.UIDLabel,
 		},
 		},
 		aggregator.AverageOverTime,
 		aggregator.AverageOverTime,
 		nil,
 		nil,
@@ -370,6 +382,7 @@ func NewNodeRAMBytesCapacityMetricCollector() *metric.MetricCollector {
 		metric.KubeNodeStatusCapacityMemoryBytes,
 		metric.KubeNodeStatusCapacityMemoryBytes,
 		[]string{
 		[]string{
 			source.NodeLabel,
 			source.NodeLabel,
+			source.UIDLabel,
 		},
 		},
 		aggregator.AverageOverTime,
 		aggregator.AverageOverTime,
 		nil,
 		nil,
@@ -390,6 +403,7 @@ func NewNodeRAMBytesAllocatableMetricCollector() *metric.MetricCollector {
 		metric.KubeNodeStatusAllocatableMemoryBytes,
 		metric.KubeNodeStatusAllocatableMemoryBytes,
 		[]string{
 		[]string{
 			source.NodeLabel,
 			source.NodeLabel,
+			source.UIDLabel,
 		},
 		},
 		aggregator.AverageOverTime,
 		aggregator.AverageOverTime,
 		nil,
 		nil,
@@ -411,6 +425,7 @@ func NewNodeGPUCountMetricCollector() *metric.MetricCollector {
 		[]string{
 		[]string{
 			source.NodeLabel,
 			source.NodeLabel,
 			source.ProviderIDLabel,
 			source.ProviderIDLabel,
+			source.UIDLabel,
 		},
 		},
 		aggregator.AverageOverTime,
 		aggregator.AverageOverTime,
 		nil,
 		nil,
@@ -429,6 +444,7 @@ func NewNodeLabelsMetricCollector() *metric.MetricCollector {
 		metric.KubeNodeLabels,
 		metric.KubeNodeLabels,
 		[]string{
 		[]string{
 			source.NodeLabel,
 			source.NodeLabel,
+			source.UIDLabel,
 		},
 		},
 		aggregator.Info,
 		aggregator.Info,
 		nil,
 		nil,
@@ -448,6 +464,7 @@ func NewNodeActiveMinutesMetricCollector() *metric.MetricCollector {
 		[]string{
 		[]string{
 			source.NodeLabel,
 			source.NodeLabel,
 			source.ProviderIDLabel,
 			source.ProviderIDLabel,
+			source.UIDLabel,
 		},
 		},
 		aggregator.ActiveMinutes,
 		aggregator.ActiveMinutes,
 		nil,
 		nil,
@@ -469,6 +486,7 @@ func NewNodeCPUModeTotalMetricCollector() *metric.MetricCollector {
 		[]string{
 		[]string{
 			source.KubernetesNodeLabel,
 			source.KubernetesNodeLabel,
 			source.ModeLabel,
 			source.ModeLabel,
+			source.UIDLabel,
 		},
 		},
 		aggregator.Rate,
 		aggregator.Rate,
 		nil,
 		nil,
@@ -492,6 +510,7 @@ func NewNodeRAMSystemUsageAverageMetricCollector() *metric.MetricCollector {
 		metric.ContainerMemoryWorkingSetBytes,
 		metric.ContainerMemoryWorkingSetBytes,
 		[]string{
 		[]string{
 			source.InstanceLabel,
 			source.InstanceLabel,
+			source.UIDLabel,
 		},
 		},
 		aggregator.AverageOverTime,
 		aggregator.AverageOverTime,
 		func(labels map[string]string) bool {
 		func(labels map[string]string) bool {
@@ -517,6 +536,7 @@ func NewNodeRAMUserUsageAverageMetricCollector() *metric.MetricCollector {
 		metric.ContainerMemoryWorkingSetBytes,
 		metric.ContainerMemoryWorkingSetBytes,
 		[]string{
 		[]string{
 			source.InstanceLabel,
 			source.InstanceLabel,
+			source.UIDLabel,
 		},
 		},
 		aggregator.AverageOverTime,
 		aggregator.AverageOverTime,
 		func(labels map[string]string) bool {
 		func(labels map[string]string) bool {
@@ -541,6 +561,7 @@ func NewLBPricePerHourMetricCollector() *metric.MetricCollector {
 			source.NamespaceLabel,
 			source.NamespaceLabel,
 			source.ServiceNameLabel,
 			source.ServiceNameLabel,
 			source.IngressIPLabel,
 			source.IngressIPLabel,
+			source.UIDLabel,
 		},
 		},
 		aggregator.AverageOverTime,
 		aggregator.AverageOverTime,
 		nil,
 		nil,
@@ -561,6 +582,7 @@ func NewLBActiveMinutesMetricCollector() *metric.MetricCollector {
 			source.NamespaceLabel,
 			source.NamespaceLabel,
 			source.ServiceNameLabel,
 			source.ServiceNameLabel,
 			source.IngressIPLabel,
 			source.IngressIPLabel,
+			source.UIDLabel,
 		},
 		},
 		aggregator.ActiveMinutes,
 		aggregator.ActiveMinutes,
 		nil,
 		nil,
@@ -579,6 +601,7 @@ func NewClusterManagementDurationMetricCollector() *metric.MetricCollector {
 		metric.KubecostClusterManagementCost,
 		metric.KubecostClusterManagementCost,
 		[]string{
 		[]string{
 			source.ProvisionerNameLabel,
 			source.ProvisionerNameLabel,
+			source.UIDLabel,
 		},
 		},
 		aggregator.ActiveMinutes,
 		aggregator.ActiveMinutes,
 		nil,
 		nil,
@@ -599,6 +622,7 @@ func NewClusterManagementPricePerHourMetricCollector() *metric.MetricCollector {
 		metric.KubecostClusterManagementCost,
 		metric.KubecostClusterManagementCost,
 		[]string{
 		[]string{
 			source.ProvisionerNameLabel,
 			source.ProvisionerNameLabel,
+			source.UIDLabel,
 		},
 		},
 		aggregator.AverageOverTime,
 		aggregator.AverageOverTime,
 		nil,
 		nil,
@@ -645,6 +669,7 @@ func NewRAMBytesAllocatedMetricCollector() *metric.MetricCollector {
 			source.InstanceLabel,
 			source.InstanceLabel,
 			source.NamespaceLabel,
 			source.NamespaceLabel,
 			source.PodLabel,
 			source.PodLabel,
+			source.UIDLabel,
 			source.ContainerLabel,
 			source.ContainerLabel,
 		},
 		},
 		aggregator.AverageOverTime,
 		aggregator.AverageOverTime,
@@ -676,6 +701,7 @@ func NewRAMRequestsMetricCollector() *metric.MetricCollector {
 			source.InstanceLabel,
 			source.InstanceLabel,
 			source.NamespaceLabel,
 			source.NamespaceLabel,
 			source.PodLabel,
 			source.PodLabel,
+			source.UIDLabel,
 			source.ContainerLabel,
 			source.ContainerLabel,
 		},
 		},
 		aggregator.AverageOverTime,
 		aggregator.AverageOverTime,
@@ -704,6 +730,7 @@ func NewRAMUsageAverageMetricCollector() *metric.MetricCollector {
 			source.InstanceLabel,
 			source.InstanceLabel,
 			source.NamespaceLabel,
 			source.NamespaceLabel,
 			source.PodLabel,
 			source.PodLabel,
+			source.UIDLabel,
 			source.ContainerLabel,
 			source.ContainerLabel,
 		},
 		},
 		aggregator.AverageOverTime,
 		aggregator.AverageOverTime,
@@ -733,6 +760,7 @@ func NewRAMUsageMaxMetricCollector() *metric.MetricCollector {
 			source.InstanceLabel,
 			source.InstanceLabel,
 			source.NamespaceLabel,
 			source.NamespaceLabel,
 			source.PodLabel,
 			source.PodLabel,
+			source.UIDLabel,
 			source.ContainerLabel,
 			source.ContainerLabel,
 		},
 		},
 		aggregator.MaxOverTime,
 		aggregator.MaxOverTime,
@@ -762,6 +790,7 @@ func NewCPUCoresAllocatedMetricCollector() *metric.MetricCollector {
 			source.InstanceLabel,
 			source.InstanceLabel,
 			source.NamespaceLabel,
 			source.NamespaceLabel,
 			source.PodLabel,
 			source.PodLabel,
+			source.UIDLabel,
 			source.ContainerLabel,
 			source.ContainerLabel,
 		},
 		},
 		aggregator.AverageOverTime,
 		aggregator.AverageOverTime,
@@ -793,6 +822,7 @@ func NewCPURequestsMetricCollector() *metric.MetricCollector {
 			source.InstanceLabel,
 			source.InstanceLabel,
 			source.NamespaceLabel,
 			source.NamespaceLabel,
 			source.PodLabel,
 			source.PodLabel,
+			source.UIDLabel,
 			source.ContainerLabel,
 			source.ContainerLabel,
 		},
 		},
 		aggregator.AverageOverTime,
 		aggregator.AverageOverTime,
@@ -822,6 +852,7 @@ func NewCPUUsageAverageMetricCollector() *metric.MetricCollector {
 			source.InstanceLabel,
 			source.InstanceLabel,
 			source.NamespaceLabel,
 			source.NamespaceLabel,
 			source.PodLabel,
 			source.PodLabel,
+			source.UIDLabel,
 			source.ContainerLabel,
 			source.ContainerLabel,
 		},
 		},
 		aggregator.Rate,
 		aggregator.Rate,
@@ -853,6 +884,7 @@ func NewCPUUsageMaxMetricCollector() *metric.MetricCollector {
 			source.InstanceLabel,
 			source.InstanceLabel,
 			source.NamespaceLabel,
 			source.NamespaceLabel,
 			source.PodLabel,
 			source.PodLabel,
+			source.UIDLabel,
 			source.ContainerLabel,
 			source.ContainerLabel,
 		},
 		},
 		aggregator.IRateMax,
 		aggregator.IRateMax,
@@ -881,6 +913,7 @@ func NewGPUsRequestedMetricCollector() *metric.MetricCollector {
 		[]string{
 		[]string{
 			source.NamespaceLabel,
 			source.NamespaceLabel,
 			source.PodLabel,
 			source.PodLabel,
+			source.UIDLabel,
 			source.ContainerLabel,
 			source.ContainerLabel,
 		},
 		},
 		aggregator.AverageOverTime,
 		aggregator.AverageOverTime,
@@ -905,6 +938,7 @@ func NewGPUsUsageAverageMetricCollector() *metric.MetricCollector {
 		[]string{
 		[]string{
 			source.NamespaceLabel,
 			source.NamespaceLabel,
 			source.PodLabel,
 			source.PodLabel,
+			source.UIDLabel,
 			source.ContainerLabel,
 			source.ContainerLabel,
 		},
 		},
 		aggregator.AverageOverTime,
 		aggregator.AverageOverTime,
@@ -929,6 +963,7 @@ func NewGPUsUsageMaxMetricCollector() *metric.MetricCollector {
 		[]string{
 		[]string{
 			source.NamespaceLabel,
 			source.NamespaceLabel,
 			source.PodLabel,
 			source.PodLabel,
+			source.UIDLabel,
 			source.ContainerLabel,
 			source.ContainerLabel,
 		},
 		},
 		aggregator.MaxOverTime,
 		aggregator.MaxOverTime,
@@ -956,6 +991,7 @@ func NewGPUsAllocatedMetricCollector() *metric.MetricCollector {
 		[]string{
 		[]string{
 			source.NamespaceLabel,
 			source.NamespaceLabel,
 			source.PodLabel,
 			source.PodLabel,
+			source.UIDLabel,
 			source.ContainerLabel,
 			source.ContainerLabel,
 		},
 		},
 		aggregator.AverageOverTime,
 		aggregator.AverageOverTime,
@@ -985,6 +1021,7 @@ func NewIsGPUSharedMetricCollector() *metric.MetricCollector {
 		[]string{
 		[]string{
 			source.NamespaceLabel,
 			source.NamespaceLabel,
 			source.PodLabel,
 			source.PodLabel,
+			source.UIDLabel,
 			source.ContainerLabel,
 			source.ContainerLabel,
 			source.ResourceLabel,
 			source.ResourceLabel,
 		},
 		},
@@ -1011,6 +1048,7 @@ func NewGPUInfoMetricCollector() *metric.MetricCollector {
 		[]string{
 		[]string{
 			source.NamespaceLabel,
 			source.NamespaceLabel,
 			source.PodLabel,
 			source.PodLabel,
+			source.UIDLabel,
 			source.ContainerLabel,
 			source.ContainerLabel,
 			source.DeviceLabel,
 			source.DeviceLabel,
 			source.ModelNameLabel,
 			source.ModelNameLabel,
@@ -1039,6 +1077,7 @@ func NewNodeCPUPricePerHourMetricCollector() *metric.MetricCollector {
 			source.NodeLabel,
 			source.NodeLabel,
 			source.InstanceTypeLabel,
 			source.InstanceTypeLabel,
 			source.ProviderIDLabel,
 			source.ProviderIDLabel,
+			source.UIDLabel,
 		},
 		},
 		aggregator.AverageOverTime,
 		aggregator.AverageOverTime,
 		nil,
 		nil,
@@ -1061,6 +1100,7 @@ func NewNodeRAMPricePerGiBHourMetricCollector() *metric.MetricCollector {
 			source.NodeLabel,
 			source.NodeLabel,
 			source.InstanceTypeLabel,
 			source.InstanceTypeLabel,
 			source.ProviderIDLabel,
 			source.ProviderIDLabel,
+			source.UIDLabel,
 		},
 		},
 		aggregator.AverageOverTime,
 		aggregator.AverageOverTime,
 		nil,
 		nil,
@@ -1083,6 +1123,7 @@ func NewNodeGPUPricePerHourMetricCollector() *metric.MetricCollector {
 			source.NodeLabel,
 			source.NodeLabel,
 			source.InstanceTypeLabel,
 			source.InstanceTypeLabel,
 			source.ProviderIDLabel,
 			source.ProviderIDLabel,
+			source.UIDLabel,
 		},
 		},
 		aggregator.AverageOverTime,
 		aggregator.AverageOverTime,
 		nil,
 		nil,
@@ -1102,6 +1143,7 @@ func NewNodeIsSpotMetricCollector() *metric.MetricCollector {
 		[]string{
 		[]string{
 			source.NodeLabel,
 			source.NodeLabel,
 			source.ProviderIDLabel,
 			source.ProviderIDLabel,
+			source.UIDLabel,
 		},
 		},
 		aggregator.AverageOverTime,
 		aggregator.AverageOverTime,
 		nil,
 		nil,
@@ -1123,6 +1165,7 @@ func NewPodPVCAllocationMetricCollector() *metric.MetricCollector {
 		[]string{
 		[]string{
 			source.NamespaceLabel,
 			source.NamespaceLabel,
 			source.PodLabel,
 			source.PodLabel,
+			source.UIDLabel,
 			source.PVLabel,
 			source.PVLabel,
 			source.PVCLabel,
 			source.PVCLabel,
 		},
 		},
@@ -1146,6 +1189,7 @@ func NewPVCBytesRequestedMetricCollector() *metric.MetricCollector {
 		[]string{
 		[]string{
 			source.NamespaceLabel,
 			source.NamespaceLabel,
 			source.PVCLabel,
 			source.PVCLabel,
+			source.UIDLabel,
 		},
 		},
 		aggregator.AverageOverTime,
 		aggregator.AverageOverTime,
 		nil,
 		nil,
@@ -1166,6 +1210,7 @@ func NewPVBytesMetricCollector() *metric.MetricCollector {
 		metric.KubePersistentVolumeCapacityBytes,
 		metric.KubePersistentVolumeCapacityBytes,
 		[]string{
 		[]string{
 			source.PVLabel,
 			source.PVLabel,
+			source.UIDLabel,
 		},
 		},
 		aggregator.AverageOverTime,
 		aggregator.AverageOverTime,
 		nil,
 		nil,
@@ -1188,6 +1233,7 @@ func NewPVInfoMetricCollector() *metric.MetricCollector {
 			source.PVLabel,
 			source.PVLabel,
 			source.StorageClassLabel,
 			source.StorageClassLabel,
 			source.ProviderIDLabel,
 			source.ProviderIDLabel,
+			source.UIDLabel,
 		},
 		},
 		aggregator.AverageOverTime,
 		aggregator.AverageOverTime,
 		nil,
 		nil,
@@ -1213,6 +1259,7 @@ func NewNetZoneGiBMetricCollector() *metric.MetricCollector {
 		[]string{
 		[]string{
 			source.NamespaceLabel,
 			source.NamespaceLabel,
 			source.PodNameLabel,
 			source.PodNameLabel,
+			source.UIDLabel,
 		},
 		},
 		aggregator.Increase,
 		aggregator.Increase,
 		func(labels map[string]string) bool {
 		func(labels map[string]string) bool {
@@ -1258,6 +1305,7 @@ func NewNetRegionGiBMetricCollector() *metric.MetricCollector {
 		[]string{
 		[]string{
 			source.NamespaceLabel,
 			source.NamespaceLabel,
 			source.PodNameLabel,
 			source.PodNameLabel,
+			source.UIDLabel,
 		},
 		},
 		aggregator.Increase,
 		aggregator.Increase,
 		func(labels map[string]string) bool {
 		func(labels map[string]string) bool {
@@ -1300,6 +1348,7 @@ func NewNetInternetGiBMetricCollector() *metric.MetricCollector {
 		[]string{
 		[]string{
 			source.NamespaceLabel,
 			source.NamespaceLabel,
 			source.PodNameLabel,
 			source.PodNameLabel,
+			source.UIDLabel,
 		},
 		},
 		aggregator.Increase,
 		aggregator.Increase,
 		func(labels map[string]string) bool {
 		func(labels map[string]string) bool {
@@ -1343,6 +1392,7 @@ func NewNetInternetServiceGiBMetricCollector() *metric.MetricCollector {
 			source.NamespaceLabel,
 			source.NamespaceLabel,
 			source.PodNameLabel,
 			source.PodNameLabel,
 			source.ServiceLabel,
 			source.ServiceLabel,
+			source.UIDLabel,
 		},
 		},
 		aggregator.Increase,
 		aggregator.Increase,
 		func(labels map[string]string) bool {
 		func(labels map[string]string) bool {
@@ -1367,6 +1417,7 @@ func NewNetReceiveBytesMetricCollector() *metric.MetricCollector {
 		[]string{
 		[]string{
 			source.NamespaceLabel,
 			source.NamespaceLabel,
 			source.PodLabel,
 			source.PodLabel,
+			source.UIDLabel,
 		},
 		},
 		aggregator.Increase,
 		aggregator.Increase,
 		func(labels map[string]string) bool {
 		func(labels map[string]string) bool {
@@ -1393,6 +1444,7 @@ func NewNetZoneIngressGiBMetricCollector() *metric.MetricCollector {
 		[]string{
 		[]string{
 			source.NamespaceLabel,
 			source.NamespaceLabel,
 			source.PodNameLabel,
 			source.PodNameLabel,
+			source.UIDLabel,
 		},
 		},
 		aggregator.Increase,
 		aggregator.Increase,
 		func(labels map[string]string) bool {
 		func(labels map[string]string) bool {
@@ -1421,6 +1473,7 @@ func NewNetRegionIngressGiBMetricCollector() *metric.MetricCollector {
 		[]string{
 		[]string{
 			source.NamespaceLabel,
 			source.NamespaceLabel,
 			source.PodNameLabel,
 			source.PodNameLabel,
+			source.UIDLabel,
 		},
 		},
 		aggregator.Increase,
 		aggregator.Increase,
 		func(labels map[string]string) bool {
 		func(labels map[string]string) bool {
@@ -1447,6 +1500,7 @@ func NewNetInternetIngressGiBMetricCollector() *metric.MetricCollector {
 		[]string{
 		[]string{
 			source.NamespaceLabel,
 			source.NamespaceLabel,
 			source.PodNameLabel,
 			source.PodNameLabel,
+			source.UIDLabel,
 		},
 		},
 		aggregator.Increase,
 		aggregator.Increase,
 		func(labels map[string]string) bool {
 		func(labels map[string]string) bool {
@@ -1472,6 +1526,7 @@ func NewNetInternetServiceIngressGiBMetricCollector() *metric.MetricCollector {
 			source.NamespaceLabel,
 			source.NamespaceLabel,
 			source.PodNameLabel,
 			source.PodNameLabel,
 			source.ServiceLabel,
 			source.ServiceLabel,
+			source.UIDLabel,
 		},
 		},
 		aggregator.Increase,
 		aggregator.Increase,
 		func(labels map[string]string) bool {
 		func(labels map[string]string) bool {
@@ -1496,6 +1551,7 @@ func NewNetTransferBytesMetricCollector() *metric.MetricCollector {
 		[]string{
 		[]string{
 			source.NamespaceLabel,
 			source.NamespaceLabel,
 			source.PodLabel,
 			source.PodLabel,
+			source.UIDLabel,
 		},
 		},
 		aggregator.Increase,
 		aggregator.Increase,
 		func(labels map[string]string) bool {
 		func(labels map[string]string) bool {
@@ -1516,6 +1572,7 @@ func NewNamespaceLabelsMetricCollector() *metric.MetricCollector {
 		metric.KubeNamespaceLabels,
 		metric.KubeNamespaceLabels,
 		[]string{
 		[]string{
 			source.NamespaceLabel,
 			source.NamespaceLabel,
+			source.UIDLabel,
 		},
 		},
 		aggregator.Info,
 		aggregator.Info,
 		nil,
 		nil,
@@ -1534,6 +1591,7 @@ func NewNamespaceAnnotationsMetricCollector() *metric.MetricCollector {
 		metric.KubeNamespaceAnnotations,
 		metric.KubeNamespaceAnnotations,
 		[]string{
 		[]string{
 			source.NamespaceLabel,
 			source.NamespaceLabel,
+			source.UIDLabel,
 		},
 		},
 		aggregator.Info,
 		aggregator.Info,
 		nil,
 		nil,
@@ -1553,6 +1611,7 @@ func NewPodLabelsMetricCollector() *metric.MetricCollector {
 		[]string{
 		[]string{
 			source.NamespaceLabel,
 			source.NamespaceLabel,
 			source.PodLabel,
 			source.PodLabel,
+			source.UIDLabel,
 		},
 		},
 		aggregator.Info,
 		aggregator.Info,
 		nil,
 		nil,
@@ -1572,6 +1631,7 @@ func NewPodAnnotationsMetricCollector() *metric.MetricCollector {
 		[]string{
 		[]string{
 			source.NamespaceLabel,
 			source.NamespaceLabel,
 			source.PodLabel,
 			source.PodLabel,
+			source.UIDLabel,
 		},
 		},
 		aggregator.Info,
 		aggregator.Info,
 		nil,
 		nil,
@@ -1591,6 +1651,7 @@ func NewServiceLabelsMetricCollector() *metric.MetricCollector {
 		[]string{
 		[]string{
 			source.NamespaceLabel,
 			source.NamespaceLabel,
 			source.ServiceLabel,
 			source.ServiceLabel,
+			source.UIDLabel,
 		},
 		},
 		aggregator.Info,
 		aggregator.Info,
 		nil,
 		nil,
@@ -1610,6 +1671,7 @@ func NewDeploymentLabelsMetricCollector() *metric.MetricCollector {
 		[]string{
 		[]string{
 			source.NamespaceLabel,
 			source.NamespaceLabel,
 			source.DeploymentLabel,
 			source.DeploymentLabel,
+			source.UIDLabel,
 		},
 		},
 		aggregator.Info,
 		aggregator.Info,
 		nil,
 		nil,
@@ -1629,6 +1691,7 @@ func NewStatefulSetLabelsMetricCollector() *metric.MetricCollector {
 		[]string{
 		[]string{
 			source.NamespaceLabel,
 			source.NamespaceLabel,
 			source.StatefulSetLabel,
 			source.StatefulSetLabel,
+			source.UIDLabel,
 		},
 		},
 		aggregator.Info,
 		aggregator.Info,
 		nil,
 		nil,
@@ -1651,6 +1714,7 @@ func NewDaemonSetLabelsMetricCollector() *metric.MetricCollector {
 		[]string{
 		[]string{
 			source.NamespaceLabel,
 			source.NamespaceLabel,
 			source.PodLabel,
 			source.PodLabel,
+			source.UIDLabel,
 			source.OwnerNameLabel,
 			source.OwnerNameLabel,
 		},
 		},
 		aggregator.Info,
 		aggregator.Info,
@@ -1676,6 +1740,7 @@ func NewJobLabelsMetricCollector() *metric.MetricCollector {
 		[]string{
 		[]string{
 			source.NamespaceLabel,
 			source.NamespaceLabel,
 			source.PodLabel,
 			source.PodLabel,
+			source.UIDLabel,
 			source.OwnerNameLabel,
 			source.OwnerNameLabel,
 		},
 		},
 		aggregator.Info,
 		aggregator.Info,
@@ -1701,6 +1766,7 @@ func NewPodsWithReplicaSetOwnerMetricCollector() *metric.MetricCollector {
 		[]string{
 		[]string{
 			source.NamespaceLabel,
 			source.NamespaceLabel,
 			source.PodLabel,
 			source.PodLabel,
+			source.UIDLabel,
 			source.OwnerNameLabel,
 			source.OwnerNameLabel,
 		},
 		},
 		aggregator.Info,
 		aggregator.Info,
@@ -1727,6 +1793,7 @@ func NewReplicaSetsWithoutOwnersMetricCollector() *metric.MetricCollector {
 		[]string{
 		[]string{
 			source.NamespaceLabel,
 			source.NamespaceLabel,
 			source.ReplicaSetLabel,
 			source.ReplicaSetLabel,
+			source.UIDLabel,
 		},
 		},
 		aggregator.Info,
 		aggregator.Info,
 		func(labels map[string]string) bool {
 		func(labels map[string]string) bool {
@@ -1751,6 +1818,7 @@ func NewReplicaSetsWithRolloutMetricCollector() *metric.MetricCollector {
 		[]string{
 		[]string{
 			source.NamespaceLabel,
 			source.NamespaceLabel,
 			source.ReplicaSetLabel,
 			source.ReplicaSetLabel,
+			source.UIDLabel,
 			source.OwnerNameLabel,
 			source.OwnerNameLabel,
 			source.OwnerKindLabel,
 			source.OwnerKindLabel,
 		},
 		},

+ 10 - 0
modules/collector-source/pkg/collector/metricsquerier_test.go

@@ -52,6 +52,7 @@ func GetMockCollectorProvider() StoreProvider {
 	gpu1Info := map[string]string{
 	gpu1Info := map[string]string{
 		source.NamespaceLabel: "namespace1",
 		source.NamespaceLabel: "namespace1",
 		source.PodLabel:       "pod1",
 		source.PodLabel:       "pod1",
+		source.UIDLabel:       "pod-uuid1",
 		"container":           "container1",
 		"container":           "container1",
 		"gpu":                 "0",
 		"gpu":                 "0",
 		"UUID":                "GPU-1",
 		"UUID":                "GPU-1",
@@ -299,6 +300,7 @@ func TestCollectorMetricsQuerier_QueryNodeRAMSystemPercent(t *testing.T) {
 	}
 	}
 	expected := []*source.NodeRAMSystemPercentResult{
 	expected := []*source.NodeRAMSystemPercentResult{
 		{
 		{
+			UID:      "pod-uuid2",
 			Cluster:  "",
 			Cluster:  "",
 			Instance: "node1",
 			Instance: "node1",
 			Data: []*util.Vector{
 			Data: []*util.Vector{
@@ -332,6 +334,7 @@ func TestCollectorMetricsQuerier_QueryNodeRAMUserPercent(t *testing.T) {
 	}
 	}
 	expected := []*source.NodeRAMUserPercentResult{
 	expected := []*source.NodeRAMUserPercentResult{
 		{
 		{
+			UID:      "pod-uuid1",
 			Cluster:  "",
 			Cluster:  "",
 			Instance: "node1",
 			Instance: "node1",
 			Data: []*util.Vector{
 			Data: []*util.Vector{
@@ -403,6 +406,7 @@ func Test_collectorMetricsQuerier_QueryCPUUsageAvg(t *testing.T) {
 	}
 	}
 	expected := []*source.CPUUsageAvgResult{
 	expected := []*source.CPUUsageAvgResult{
 		{
 		{
+			UID:       "pod-uuid1",
 			Cluster:   "",
 			Cluster:   "",
 			Namespace: "namespace1",
 			Namespace: "namespace1",
 			Node:      "node1",
 			Node:      "node1",
@@ -440,6 +444,7 @@ func Test_collectorMetricsQuerier_QueryCPUUsageMax(t *testing.T) {
 	}
 	}
 	expected := []*source.CPUUsageMaxResult{
 	expected := []*source.CPUUsageMaxResult{
 		{
 		{
+			UID:       "pod-uuid1",
 			Cluster:   "",
 			Cluster:   "",
 			Namespace: "namespace1",
 			Namespace: "namespace1",
 			Node:      "node1",
 			Node:      "node1",
@@ -477,6 +482,7 @@ func TestCollectorMetricsQuerier_QueryGPUsUsageAvg(t *testing.T) {
 	}
 	}
 	expected := []*source.GPUsUsageAvgResult{
 	expected := []*source.GPUsUsageAvgResult{
 		{
 		{
+			UID:       "pod-uuid1",
 			Cluster:   "",
 			Cluster:   "",
 			Namespace: "namespace1",
 			Namespace: "namespace1",
 			Pod:       "pod1",
 			Pod:       "pod1",
@@ -512,6 +518,7 @@ func TestCollectorMetricsQuerier_QueryGPUsUsageMax(t *testing.T) {
 	}
 	}
 	expected := []*source.GPUsUsageMaxResult{
 	expected := []*source.GPUsUsageMaxResult{
 		{
 		{
+			UID:       "pod-uuid1",
 			Cluster:   "",
 			Cluster:   "",
 			Namespace: "namespace1",
 			Namespace: "namespace1",
 			Pod:       "pod1",
 			Pod:       "pod1",
@@ -547,6 +554,7 @@ func TestCollectorMetricsQuerier_QueryGPUInfo(t *testing.T) {
 	}
 	}
 	expected := []*source.GPUInfoResult{
 	expected := []*source.GPUInfoResult{
 		{
 		{
+			UID:       "pod-uuid1",
 			Cluster:   "",
 			Cluster:   "",
 			Namespace: "namespace1",
 			Namespace: "namespace1",
 			Pod:       "pod1",
 			Pod:       "pod1",
@@ -835,6 +843,7 @@ func Test_collectorMetricsQuerier_QueryNetTransferBytes(t *testing.T) {
 	}
 	}
 	expected := []*source.NetTransferBytesResult{
 	expected := []*source.NetTransferBytesResult{
 		{
 		{
+			UID:       "pod-uuid1",
 			Cluster:   "",
 			Cluster:   "",
 			Namespace: "namespace1",
 			Namespace: "namespace1",
 			Pod:       "pod1",
 			Pod:       "pod1",
@@ -1022,6 +1031,7 @@ func Test_collectorMetricsQuerier_QueryNetReceiveBytes(t *testing.T) {
 	}
 	}
 	expected := []*source.NetReceiveBytesResult{
 	expected := []*source.NetReceiveBytesResult{
 		{
 		{
+			UID:       "pod-uuid1",
 			Cluster:   "",
 			Cluster:   "",
 			Namespace: "namespace1",
 			Namespace: "namespace1",
 			Pod:       "pod1",
 			Pod:       "pod1",

+ 8 - 0
modules/collector-source/pkg/scrape/clustercache.go

@@ -56,6 +56,7 @@ func (ccs *ClusterCacheScraper) scrapeNodes(nodes []*clustercache.Node) []metric
 		nodeInfo := map[string]string{
 		nodeInfo := map[string]string{
 			source.NodeLabel:       node.Name,
 			source.NodeLabel:       node.Name,
 			source.ProviderIDLabel: node.SpecProviderID,
 			source.ProviderIDLabel: node.SpecProviderID,
+			source.UIDLabel:        string(node.UID),
 		}
 		}
 
 
 		// Node Capacity
 		// Node Capacity
@@ -134,6 +135,7 @@ func (ccs *ClusterCacheScraper) scrapeDeployments(deployments []*clustercache.De
 		deploymentInfo := map[string]string{
 		deploymentInfo := map[string]string{
 			source.DeploymentLabel: deployment.Name,
 			source.DeploymentLabel: deployment.Name,
 			source.NamespaceLabel:  deployment.Namespace,
 			source.NamespaceLabel:  deployment.Namespace,
+			source.UIDLabel:        string(deployment.UID),
 		}
 		}
 
 
 		// deployment labels
 		// deployment labels
@@ -168,6 +170,7 @@ func (ccs *ClusterCacheScraper) scrapeNamespaces(namespaces []*clustercache.Name
 	for _, namespace := range namespaces {
 	for _, namespace := range namespaces {
 		namespaceInfo := map[string]string{
 		namespaceInfo := map[string]string{
 			source.NamespaceLabel: namespace.Name,
 			source.NamespaceLabel: namespace.Name,
+			source.UIDLabel:       string(namespace.UID),
 		}
 		}
 
 
 		// namespace labels
 		// namespace labels
@@ -314,6 +317,7 @@ func (ccs *ClusterCacheScraper) scrapePVCs(pvcs []*clustercache.PersistentVolume
 		pvcInfo := map[string]string{
 		pvcInfo := map[string]string{
 			source.PVCLabel:          pvc.Name,
 			source.PVCLabel:          pvc.Name,
 			source.NamespaceLabel:    pvc.Namespace,
 			source.NamespaceLabel:    pvc.Namespace,
+			source.UIDLabel:          string(pvc.UID),
 			source.VolumeNameLabel:   pvc.Spec.VolumeName,
 			source.VolumeNameLabel:   pvc.Spec.VolumeName,
 			source.StorageClassLabel: getPersistentVolumeClaimClass(pvc),
 			source.StorageClassLabel: getPersistentVolumeClaimClass(pvc),
 		}
 		}
@@ -358,6 +362,7 @@ func (ccs *ClusterCacheScraper) scrapePVs(pvs []*clustercache.PersistentVolume)
 		}
 		}
 		pvInfo := map[string]string{
 		pvInfo := map[string]string{
 			source.PVLabel:           pv.Name,
 			source.PVLabel:           pv.Name,
+			source.UIDLabel:          string(pv.UID),
 			source.StorageClassLabel: pv.Spec.StorageClassName,
 			source.StorageClassLabel: pv.Spec.StorageClassName,
 			source.ProviderIDLabel:   providerID,
 			source.ProviderIDLabel:   providerID,
 		}
 		}
@@ -398,6 +403,7 @@ func (ccs *ClusterCacheScraper) scrapeServices(services []*clustercache.Service)
 		serviceInfo := map[string]string{
 		serviceInfo := map[string]string{
 			source.ServiceLabel:   service.Name,
 			source.ServiceLabel:   service.Name,
 			source.NamespaceLabel: service.Namespace,
 			source.NamespaceLabel: service.Namespace,
+			source.UIDLabel:       string(service.UID),
 		}
 		}
 
 
 		// service labels
 		// service labels
@@ -433,6 +439,7 @@ func (ccs *ClusterCacheScraper) scrapeStatefulSets(statefulSets []*clustercache.
 		statefulSetInfo := map[string]string{
 		statefulSetInfo := map[string]string{
 			source.StatefulSetLabel: statefulSet.Name,
 			source.StatefulSetLabel: statefulSet.Name,
 			source.NamespaceLabel:   statefulSet.Namespace,
 			source.NamespaceLabel:   statefulSet.Namespace,
+			source.UIDLabel:         string(statefulSet.UID),
 		}
 		}
 
 
 		// statefulSet labels
 		// statefulSet labels
@@ -467,6 +474,7 @@ func (ccs *ClusterCacheScraper) scrapeReplicaSets(replicaSets []*clustercache.Re
 		replicaSetInfo := map[string]string{
 		replicaSetInfo := map[string]string{
 			source.ReplicaSetLabel: replicaSet.Name,
 			source.ReplicaSetLabel: replicaSet.Name,
 			source.NamespaceLabel:  replicaSet.Namespace,
 			source.NamespaceLabel:  replicaSet.Namespace,
+			source.UIDLabel:        string(replicaSet.UID),
 		}
 		}
 
 
 		// this specific metric exports a special <none> value for name and kind
 		// this specific metric exports a special <none> value for name and kind

+ 25 - 0
modules/collector-source/pkg/scrape/clustercache_test.go

@@ -36,6 +36,7 @@ func Test_kubernetesScraper_scrapeNodes(t *testing.T) {
 					Nodes: []*clustercache.Node{
 					Nodes: []*clustercache.Node{
 						{
 						{
 							Name:           "node1",
 							Name:           "node1",
+							UID:            "uuid1",
 							SpecProviderID: "i-1",
 							SpecProviderID: "i-1",
 							Status: v1.NodeStatus{
 							Status: v1.NodeStatus{
 								Capacity: v1.ResourceList{
 								Capacity: v1.ResourceList{
@@ -62,6 +63,7 @@ func Test_kubernetesScraper_scrapeNodes(t *testing.T) {
 					Labels: map[string]string{
 					Labels: map[string]string{
 						source.NodeLabel:       "node1",
 						source.NodeLabel:       "node1",
 						source.ProviderIDLabel: "i-1",
 						source.ProviderIDLabel: "i-1",
+						source.UIDLabel:        "uuid1",
 					},
 					},
 					Value:          2.0,
 					Value:          2.0,
 					AdditionalInfo: nil,
 					AdditionalInfo: nil,
@@ -71,6 +73,7 @@ func Test_kubernetesScraper_scrapeNodes(t *testing.T) {
 					Labels: map[string]string{
 					Labels: map[string]string{
 						source.NodeLabel:       "node1",
 						source.NodeLabel:       "node1",
 						source.ProviderIDLabel: "i-1",
 						source.ProviderIDLabel: "i-1",
+						source.UIDLabel:        "uuid1",
 					},
 					},
 					Value:          2048.0,
 					Value:          2048.0,
 					AdditionalInfo: nil,
 					AdditionalInfo: nil,
@@ -80,6 +83,7 @@ func Test_kubernetesScraper_scrapeNodes(t *testing.T) {
 					Labels: map[string]string{
 					Labels: map[string]string{
 						source.NodeLabel:       "node1",
 						source.NodeLabel:       "node1",
 						source.ProviderIDLabel: "i-1",
 						source.ProviderIDLabel: "i-1",
+						source.UIDLabel:        "uuid1",
 					},
 					},
 					Value:          1.0,
 					Value:          1.0,
 					AdditionalInfo: nil,
 					AdditionalInfo: nil,
@@ -89,6 +93,7 @@ func Test_kubernetesScraper_scrapeNodes(t *testing.T) {
 					Labels: map[string]string{
 					Labels: map[string]string{
 						source.NodeLabel:       "node1",
 						source.NodeLabel:       "node1",
 						source.ProviderIDLabel: "i-1",
 						source.ProviderIDLabel: "i-1",
+						source.UIDLabel:        "uuid1",
 					},
 					},
 					Value:          1024.0,
 					Value:          1024.0,
 					AdditionalInfo: nil,
 					AdditionalInfo: nil,
@@ -98,6 +103,7 @@ func Test_kubernetesScraper_scrapeNodes(t *testing.T) {
 					Labels: map[string]string{
 					Labels: map[string]string{
 						source.NodeLabel:       "node1",
 						source.NodeLabel:       "node1",
 						source.ProviderIDLabel: "i-1",
 						source.ProviderIDLabel: "i-1",
+						source.UIDLabel:        "uuid1",
 					},
 					},
 					Value: 0,
 					Value: 0,
 					AdditionalInfo: map[string]string{
 					AdditionalInfo: map[string]string{
@@ -152,6 +158,7 @@ func Test_kubernetesScraper_scrapeDeployments(t *testing.T) {
 						{
 						{
 							Name:      "deployment1",
 							Name:      "deployment1",
 							Namespace: "namespace1",
 							Namespace: "namespace1",
+							UID:       "uuid1",
 							MatchLabels: map[string]string{
 							MatchLabels: map[string]string{
 								"test1": "blah",
 								"test1": "blah",
 								"test2": "blah2",
 								"test2": "blah2",
@@ -168,6 +175,7 @@ func Test_kubernetesScraper_scrapeDeployments(t *testing.T) {
 					Labels: map[string]string{
 					Labels: map[string]string{
 						source.DeploymentLabel: "deployment1",
 						source.DeploymentLabel: "deployment1",
 						source.NamespaceLabel:  "namespace1",
 						source.NamespaceLabel:  "namespace1",
+						source.UIDLabel:        "uuid1",
 					},
 					},
 					Value: 0,
 					Value: 0,
 					AdditionalInfo: map[string]string{
 					AdditionalInfo: map[string]string{
@@ -221,6 +229,7 @@ func Test_kubernetesScraper_scrapeNamespaces(t *testing.T) {
 					Namespaces: []*clustercache.Namespace{
 					Namespaces: []*clustercache.Namespace{
 						{
 						{
 							Name: "namespace1",
 							Name: "namespace1",
+							UID:  "uuid1",
 							Labels: map[string]string{
 							Labels: map[string]string{
 								"test1": "blah",
 								"test1": "blah",
 								"test2": "blah2",
 								"test2": "blah2",
@@ -239,6 +248,7 @@ func Test_kubernetesScraper_scrapeNamespaces(t *testing.T) {
 					Name: metric.KubeNamespaceLabels,
 					Name: metric.KubeNamespaceLabels,
 					Labels: map[string]string{
 					Labels: map[string]string{
 						source.NamespaceLabel: "namespace1",
 						source.NamespaceLabel: "namespace1",
+						source.UIDLabel:       "uuid1",
 					},
 					},
 					Value: 0,
 					Value: 0,
 					AdditionalInfo: map[string]string{
 					AdditionalInfo: map[string]string{
@@ -250,6 +260,7 @@ func Test_kubernetesScraper_scrapeNamespaces(t *testing.T) {
 					Name: metric.KubeNamespaceAnnotations,
 					Name: metric.KubeNamespaceAnnotations,
 					Labels: map[string]string{
 					Labels: map[string]string{
 						source.NamespaceLabel: "namespace1",
 						source.NamespaceLabel: "namespace1",
+						source.UIDLabel:       "uuid1",
 					},
 					},
 					Value: 0,
 					Value: 0,
 					AdditionalInfo: map[string]string{
 					AdditionalInfo: map[string]string{
@@ -484,6 +495,7 @@ func Test_kubernetesScraper_scrapePVCs(t *testing.T) {
 						{
 						{
 							Name:      "pvc1",
 							Name:      "pvc1",
 							Namespace: "namespace1",
 							Namespace: "namespace1",
+							UID:       "uuid1",
 							Spec: v1.PersistentVolumeClaimSpec{
 							Spec: v1.PersistentVolumeClaimSpec{
 								VolumeName:       "vol1",
 								VolumeName:       "vol1",
 								StorageClassName: util.Ptr("storageClass1"),
 								StorageClassName: util.Ptr("storageClass1"),
@@ -504,6 +516,7 @@ func Test_kubernetesScraper_scrapePVCs(t *testing.T) {
 					Labels: map[string]string{
 					Labels: map[string]string{
 						source.PVCLabel:          "pvc1",
 						source.PVCLabel:          "pvc1",
 						source.NamespaceLabel:    "namespace1",
 						source.NamespaceLabel:    "namespace1",
+						source.UIDLabel:          "uuid1",
 						source.VolumeNameLabel:   "vol1",
 						source.VolumeNameLabel:   "vol1",
 						source.StorageClassLabel: "storageClass1",
 						source.StorageClassLabel: "storageClass1",
 					},
 					},
@@ -515,6 +528,7 @@ func Test_kubernetesScraper_scrapePVCs(t *testing.T) {
 					Labels: map[string]string{
 					Labels: map[string]string{
 						source.PVCLabel:          "pvc1",
 						source.PVCLabel:          "pvc1",
 						source.NamespaceLabel:    "namespace1",
 						source.NamespaceLabel:    "namespace1",
+						source.UIDLabel:          "uuid1",
 						source.VolumeNameLabel:   "vol1",
 						source.VolumeNameLabel:   "vol1",
 						source.StorageClassLabel: "storageClass1",
 						source.StorageClassLabel: "storageClass1",
 					},
 					},
@@ -567,6 +581,7 @@ func Test_kubernetesScraper_scrapePVs(t *testing.T) {
 					PVs: []*clustercache.PersistentVolume{
 					PVs: []*clustercache.PersistentVolume{
 						{
 						{
 							Name: "pv1",
 							Name: "pv1",
+							UID:  "uuid1",
 							Spec: v1.PersistentVolumeSpec{
 							Spec: v1.PersistentVolumeSpec{
 								StorageClassName: "storageClass1",
 								StorageClassName: "storageClass1",
 								PersistentVolumeSource: v1.PersistentVolumeSource{
 								PersistentVolumeSource: v1.PersistentVolumeSource{
@@ -590,6 +605,7 @@ func Test_kubernetesScraper_scrapePVs(t *testing.T) {
 						source.PVLabel:           "pv1",
 						source.PVLabel:           "pv1",
 						source.ProviderIDLabel:   "vol-1",
 						source.ProviderIDLabel:   "vol-1",
 						source.StorageClassLabel: "storageClass1",
 						source.StorageClassLabel: "storageClass1",
+						source.UIDLabel:          "uuid1",
 					},
 					},
 					Value:          0,
 					Value:          0,
 					AdditionalInfo: nil,
 					AdditionalInfo: nil,
@@ -600,6 +616,7 @@ func Test_kubernetesScraper_scrapePVs(t *testing.T) {
 						source.PVLabel:           "pv1",
 						source.PVLabel:           "pv1",
 						source.ProviderIDLabel:   "vol-1",
 						source.ProviderIDLabel:   "vol-1",
 						source.StorageClassLabel: "storageClass1",
 						source.StorageClassLabel: "storageClass1",
+						source.UIDLabel:          "uuid1",
 					},
 					},
 					Value:          4096,
 					Value:          4096,
 					AdditionalInfo: nil,
 					AdditionalInfo: nil,
@@ -651,6 +668,7 @@ func Test_kubernetesScraper_scrapeServices(t *testing.T) {
 						{
 						{
 							Name:      "service1",
 							Name:      "service1",
 							Namespace: "namespace1",
 							Namespace: "namespace1",
+							UID:       "uuid1",
 							SpecSelector: map[string]string{
 							SpecSelector: map[string]string{
 								"test1": "blah",
 								"test1": "blah",
 								"test2": "blah2",
 								"test2": "blah2",
@@ -666,6 +684,7 @@ func Test_kubernetesScraper_scrapeServices(t *testing.T) {
 					Labels: map[string]string{
 					Labels: map[string]string{
 						"service":             "service1",
 						"service":             "service1",
 						source.NamespaceLabel: "namespace1",
 						source.NamespaceLabel: "namespace1",
+						source.UIDLabel:       "uuid1",
 					},
 					},
 					Value: 0,
 					Value: 0,
 					AdditionalInfo: map[string]string{
 					AdditionalInfo: map[string]string{
@@ -720,6 +739,7 @@ func Test_kubernetesScraper_scrapeStatefulSets(t *testing.T) {
 						{
 						{
 							Name:      "statefulSet1",
 							Name:      "statefulSet1",
 							Namespace: "namespace1",
 							Namespace: "namespace1",
+							UID:       "uuid1",
 							SpecSelector: &metav1.LabelSelector{
 							SpecSelector: &metav1.LabelSelector{
 								MatchLabels: map[string]string{
 								MatchLabels: map[string]string{
 									"test1": "blah",
 									"test1": "blah",
@@ -737,6 +757,7 @@ func Test_kubernetesScraper_scrapeStatefulSets(t *testing.T) {
 					Labels: map[string]string{
 					Labels: map[string]string{
 						source.StatefulSetLabel: "statefulSet1",
 						source.StatefulSetLabel: "statefulSet1",
 						source.NamespaceLabel:   "namespace1",
 						source.NamespaceLabel:   "namespace1",
+						source.UIDLabel:         "uuid1",
 					},
 					},
 					Value: 0,
 					Value: 0,
 					AdditionalInfo: map[string]string{
 					AdditionalInfo: map[string]string{
@@ -791,6 +812,7 @@ func Test_kubernetesScraper_scrapeReplicaSets(t *testing.T) {
 						{
 						{
 							Name:      "replicaSet1",
 							Name:      "replicaSet1",
 							Namespace: "namespace1",
 							Namespace: "namespace1",
+							UID:       "uuid1",
 							OwnerReferences: []metav1.OwnerReference{
 							OwnerReferences: []metav1.OwnerReference{
 								{
 								{
 									Name: "rollout1",
 									Name: "rollout1",
@@ -801,6 +823,7 @@ func Test_kubernetesScraper_scrapeReplicaSets(t *testing.T) {
 						{
 						{
 							Name:            "pureReplicaSet",
 							Name:            "pureReplicaSet",
 							Namespace:       "namespace1",
 							Namespace:       "namespace1",
+							UID:             "uuid2",
 							OwnerReferences: []metav1.OwnerReference{},
 							OwnerReferences: []metav1.OwnerReference{},
 						},
 						},
 					},
 					},
@@ -813,6 +836,7 @@ func Test_kubernetesScraper_scrapeReplicaSets(t *testing.T) {
 					Labels: map[string]string{
 					Labels: map[string]string{
 						"replicaset":          "replicaSet1",
 						"replicaset":          "replicaSet1",
 						source.NamespaceLabel: "namespace1",
 						source.NamespaceLabel: "namespace1",
+						source.UIDLabel:       "uuid1",
 						source.OwnerNameLabel: "rollout1",
 						source.OwnerNameLabel: "rollout1",
 						source.OwnerKindLabel: "Rollout",
 						source.OwnerKindLabel: "Rollout",
 					},
 					},
@@ -823,6 +847,7 @@ func Test_kubernetesScraper_scrapeReplicaSets(t *testing.T) {
 					Labels: map[string]string{
 					Labels: map[string]string{
 						"replicaset":          "pureReplicaSet",
 						"replicaset":          "pureReplicaSet",
 						source.NamespaceLabel: "namespace1",
 						source.NamespaceLabel: "namespace1",
+						source.UIDLabel:       "uuid2",
 						source.OwnerNameLabel: source.NoneLabelValue,
 						source.OwnerNameLabel: source.NoneLabelValue,
 						source.OwnerKindLabel: source.NoneLabelValue,
 						source.OwnerKindLabel: source.NoneLabelValue,
 					},
 					},

+ 4 - 0
modules/collector-source/pkg/scrape/statsummary.go

@@ -104,6 +104,7 @@ func (s *StatSummaryScraper) Scrape() []metric.Update {
 					Labels: map[string]string{
 					Labels: map[string]string{
 						source.PVCLabel:       volumeStats.PVCRef.Name,
 						source.PVCLabel:       volumeStats.PVCRef.Name,
 						source.NamespaceLabel: volumeStats.PVCRef.Namespace,
 						source.NamespaceLabel: volumeStats.PVCRef.Namespace,
+						source.UIDLabel:       podUID,
 					},
 					},
 					Value: float64(*volumeStats.UsedBytes),
 					Value: float64(*volumeStats.UsedBytes),
 				})
 				})
@@ -120,6 +121,7 @@ func (s *StatSummaryScraper) Scrape() []metric.Update {
 							source.NamespaceLabel: namespace,
 							source.NamespaceLabel: namespace,
 							source.NodeLabel:      nodeName,
 							source.NodeLabel:      nodeName,
 							source.InstanceLabel:  nodeName,
 							source.InstanceLabel:  nodeName,
+							source.UIDLabel:       podUID,
 						},
 						},
 						Value: float64(*container.CPU.UsageCoreNanoSeconds) * 1e-9,
 						Value: float64(*container.CPU.UsageCoreNanoSeconds) * 1e-9,
 					})
 					})
@@ -133,6 +135,7 @@ func (s *StatSummaryScraper) Scrape() []metric.Update {
 							source.NamespaceLabel: namespace,
 							source.NamespaceLabel: namespace,
 							source.NodeLabel:      nodeName,
 							source.NodeLabel:      nodeName,
 							source.InstanceLabel:  nodeName,
 							source.InstanceLabel:  nodeName,
+							source.UIDLabel:       podUID,
 						},
 						},
 						Value: float64(*container.Memory.WorkingSetBytes),
 						Value: float64(*container.Memory.WorkingSetBytes),
 					})
 					})
@@ -144,6 +147,7 @@ func (s *StatSummaryScraper) Scrape() []metric.Update {
 						Labels: map[string]string{
 						Labels: map[string]string{
 							source.InstanceLabel: nodeName,
 							source.InstanceLabel: nodeName,
 							source.DeviceLabel:   "local",
 							source.DeviceLabel:   "local",
+							source.UIDLabel:      podUID,
 						},
 						},
 						Value: float64(*container.Rootfs.UsedBytes),
 						Value: float64(*container.Rootfs.UsedBytes),
 					})
 					})

+ 5 - 0
modules/collector-source/pkg/scrape/statsummary_test.go

@@ -230,6 +230,7 @@ func TestStatScraper_Scrape(t *testing.T) {
 					Labels: map[string]string{
 					Labels: map[string]string{
 						source.PVCLabel:       "pvc1",
 						source.PVCLabel:       "pvc1",
 						source.NamespaceLabel: "namespace1",
 						source.NamespaceLabel: "namespace1",
+						source.UIDLabel:       "uid1",
 					},
 					},
 					Value: float64(1 * util.GB),
 					Value: float64(1 * util.GB),
 				},
 				},
@@ -241,6 +242,7 @@ func TestStatScraper_Scrape(t *testing.T) {
 						source.NamespaceLabel: "namespace1",
 						source.NamespaceLabel: "namespace1",
 						source.NodeLabel:      "node1",
 						source.NodeLabel:      "node1",
 						source.InstanceLabel:  "node1",
 						source.InstanceLabel:  "node1",
+						source.UIDLabel:       "uid1",
 					},
 					},
 					Value: 1,
 					Value: 1,
 				},
 				},
@@ -252,6 +254,7 @@ func TestStatScraper_Scrape(t *testing.T) {
 						source.NamespaceLabel: "namespace1",
 						source.NamespaceLabel: "namespace1",
 						source.NodeLabel:      "node1",
 						source.NodeLabel:      "node1",
 						source.InstanceLabel:  "node1",
 						source.InstanceLabel:  "node1",
+						source.UIDLabel:       "uid1",
 					},
 					},
 					Value: float64(5 * util.MB),
 					Value: float64(5 * util.MB),
 				},
 				},
@@ -260,6 +263,7 @@ func TestStatScraper_Scrape(t *testing.T) {
 					Labels: map[string]string{
 					Labels: map[string]string{
 						source.InstanceLabel: "node1",
 						source.InstanceLabel: "node1",
 						source.DeviceLabel:   "local",
 						source.DeviceLabel:   "local",
+						source.UIDLabel:      "uid1",
 					},
 					},
 					Value: float64(1 * util.GB),
 					Value: float64(1 * util.GB),
 				},
 				},
@@ -321,6 +325,7 @@ func TestStatScraper_Scrape(t *testing.T) {
 					Labels: map[string]string{
 					Labels: map[string]string{
 						source.PVCLabel:       "pvc1",
 						source.PVCLabel:       "pvc1",
 						source.NamespaceLabel: "namespace1",
 						source.NamespaceLabel: "namespace1",
+						source.UIDLabel:       "uid1",
 					},
 					},
 					Value: float64(1 * util.GB),
 					Value: float64(1 * util.GB),
 				},
 				},

+ 60 - 60
modules/prometheus-source/pkg/prom/metricsquerier.go

@@ -39,7 +39,7 @@ func newPrometheusMetricsQuerier(
 
 
 func (pds *PrometheusMetricsQuerier) QueryPVPricePerGiBHour(start, end time.Time) *source.Future[source.PVPricePerGiBHourResult] {
 func (pds *PrometheusMetricsQuerier) QueryPVPricePerGiBHour(start, end time.Time) *source.Future[source.PVPricePerGiBHourResult] {
 	const queryName = "QueryPVPricePerGiBHour"
 	const queryName = "QueryPVPricePerGiBHour"
-	const pvCostQuery = `avg(avg_over_time(pv_hourly_cost{%s}[%s])) by (%s, persistentvolume, volumename, provider_id)`
+	const pvCostQuery = `avg(avg_over_time(pv_hourly_cost{%s}[%s])) by (%s, persistentvolume, volumename, uid, provider_id)`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 
 
@@ -57,7 +57,7 @@ func (pds *PrometheusMetricsQuerier) QueryPVPricePerGiBHour(start, end time.Time
 
 
 func (pds *PrometheusMetricsQuerier) QueryPVUsedAverage(start, end time.Time) *source.Future[source.PVUsedAvgResult] {
 func (pds *PrometheusMetricsQuerier) QueryPVUsedAverage(start, end time.Time) *source.Future[source.PVUsedAvgResult] {
 	const queryName = "QueryPVUsedAverage"
 	const queryName = "QueryPVUsedAverage"
-	const pvUsedAverageQuery = `avg(avg_over_time(kubelet_volume_stats_used_bytes{%s}[%s])) by (%s, persistentvolumeclaim, namespace)`
+	const pvUsedAverageQuery = `avg(avg_over_time(kubelet_volume_stats_used_bytes{%s}[%s])) by (%s, persistentvolumeclaim, namespace, uid)`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 
 
@@ -75,7 +75,7 @@ func (pds *PrometheusMetricsQuerier) QueryPVUsedAverage(start, end time.Time) *s
 
 
 func (pds *PrometheusMetricsQuerier) QueryPVUsedMax(start, end time.Time) *source.Future[source.PVUsedMaxResult] {
 func (pds *PrometheusMetricsQuerier) QueryPVUsedMax(start, end time.Time) *source.Future[source.PVUsedMaxResult] {
 	const queryName = "QueryPVUsedMax"
 	const queryName = "QueryPVUsedMax"
-	const pvUsedMaxQuery = `max(max_over_time(kubelet_volume_stats_used_bytes{%s}[%s])) by (%s, persistentvolumeclaim, namespace)`
+	const pvUsedMaxQuery = `max(max_over_time(kubelet_volume_stats_used_bytes{%s}[%s])) by (%s, persistentvolumeclaim, namespace, uid)`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 
 
@@ -93,7 +93,7 @@ func (pds *PrometheusMetricsQuerier) QueryPVUsedMax(start, end time.Time) *sourc
 
 
 func (pds *PrometheusMetricsQuerier) QueryPVCInfo(start, end time.Time) *source.Future[source.PVCInfoResult] {
 func (pds *PrometheusMetricsQuerier) QueryPVCInfo(start, end time.Time) *source.Future[source.PVCInfoResult] {
 	const queryName = "QueryPVCInfo"
 	const queryName = "QueryPVCInfo"
-	const queryFmtPVCInfo = `avg(kube_persistentvolumeclaim_info{volumename != "", %s}) by (persistentvolumeclaim, storageclass, volumename, namespace, %s)[%s:%dm]`
+	const queryFmtPVCInfo = `avg(kube_persistentvolumeclaim_info{volumename != "", %s}) by (persistentvolumeclaim, storageclass, volumename, namespace, uid, %s)[%s:%dm]`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 	minsPerResolution := cfg.DataResolutionMinutes
 	minsPerResolution := cfg.DataResolutionMinutes
@@ -112,7 +112,7 @@ func (pds *PrometheusMetricsQuerier) QueryPVCInfo(start, end time.Time) *source.
 
 
 func (pds *PrometheusMetricsQuerier) QueryPVActiveMinutes(start, end time.Time) *source.Future[source.PVActiveMinutesResult] {
 func (pds *PrometheusMetricsQuerier) QueryPVActiveMinutes(start, end time.Time) *source.Future[source.PVActiveMinutesResult] {
 	const queryName = "QueryPVActiveMinutes"
 	const queryName = "QueryPVActiveMinutes"
-	const pvActiveMinsQuery = `avg(kube_persistentvolume_capacity_bytes{%s}) by (%s, persistentvolume)[%s:%dm]`
+	const pvActiveMinsQuery = `avg(kube_persistentvolume_capacity_bytes{%s}) by (%s, persistentvolume, uid)[%s:%dm]`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 	minsPerResolution := cfg.DataResolutionMinutes
 	minsPerResolution := cfg.DataResolutionMinutes
@@ -131,7 +131,7 @@ func (pds *PrometheusMetricsQuerier) QueryPVActiveMinutes(start, end time.Time)
 
 
 func (pds *PrometheusMetricsQuerier) QueryLocalStorageCost(start, end time.Time) *source.Future[source.LocalStorageCostResult] {
 func (pds *PrometheusMetricsQuerier) QueryLocalStorageCost(start, end time.Time) *source.Future[source.LocalStorageCostResult] {
 	const queryName = "QueryLocalStorageCost"
 	const queryName = "QueryLocalStorageCost"
-	const localStorageCostQuery = `sum_over_time(sum(container_fs_limit_bytes{device=~"/dev/(nvme|sda).*", id="/", %s}) by (instance, device, %s)[%s:%dm]) / 1024 / 1024 / 1024 * %f * %f`
+	const localStorageCostQuery = `sum_over_time(sum(container_fs_limit_bytes{device=~"/dev/(nvme|sda).*", id="/", %s}) by (instance, device, uid, %s)[%s:%dm]) / 1024 / 1024 / 1024 * %f * %f`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 	minsPerResolution := cfg.DataResolutionMinutes
 	minsPerResolution := cfg.DataResolutionMinutes
@@ -156,7 +156,7 @@ func (pds *PrometheusMetricsQuerier) QueryLocalStorageCost(start, end time.Time)
 
 
 func (pds *PrometheusMetricsQuerier) QueryLocalStorageUsedCost(start, end time.Time) *source.Future[source.LocalStorageUsedCostResult] {
 func (pds *PrometheusMetricsQuerier) QueryLocalStorageUsedCost(start, end time.Time) *source.Future[source.LocalStorageUsedCostResult] {
 	const queryName = "QueryLocalStorageUsedCost"
 	const queryName = "QueryLocalStorageUsedCost"
-	const localStorageUsedCostQuery = `sum_over_time(sum(container_fs_usage_bytes{device=~"/dev/(nvme|sda).*", id="/", %s}) by (instance, device, %s)[%s:%dm]) / 1024 / 1024 / 1024 * %f * %f`
+	const localStorageUsedCostQuery = `sum_over_time(sum(container_fs_usage_bytes{device=~"/dev/(nvme|sda).*", id="/", %s}) by (instance, device, uid, %s)[%s:%dm]) / 1024 / 1024 / 1024 * %f * %f`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 	minsPerResolution := cfg.DataResolutionMinutes
 	minsPerResolution := cfg.DataResolutionMinutes
@@ -181,7 +181,7 @@ func (pds *PrometheusMetricsQuerier) QueryLocalStorageUsedCost(start, end time.T
 
 
 func (pds *PrometheusMetricsQuerier) QueryLocalStorageUsedAvg(start, end time.Time) *source.Future[source.LocalStorageUsedAvgResult] {
 func (pds *PrometheusMetricsQuerier) QueryLocalStorageUsedAvg(start, end time.Time) *source.Future[source.LocalStorageUsedAvgResult] {
 	const queryName = "QueryLocalStorageUsedAvg"
 	const queryName = "QueryLocalStorageUsedAvg"
-	const localStorageUsedAvgQuery = `avg(sum(avg_over_time(container_fs_usage_bytes{device=~"/dev/(nvme|sda).*", id="/", %s}[%s])) by (instance, device, %s, job)) by (instance, device, %s)`
+	const localStorageUsedAvgQuery = `avg(sum(avg_over_time(container_fs_usage_bytes{device=~"/dev/(nvme|sda).*", id="/", %s}[%s])) by (instance, device, uid, %s, job)) by (instance, device, uid, %s)`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 
 
@@ -199,7 +199,7 @@ func (pds *PrometheusMetricsQuerier) QueryLocalStorageUsedAvg(start, end time.Ti
 
 
 func (pds *PrometheusMetricsQuerier) QueryLocalStorageUsedMax(start, end time.Time) *source.Future[source.LocalStorageUsedMaxResult] {
 func (pds *PrometheusMetricsQuerier) QueryLocalStorageUsedMax(start, end time.Time) *source.Future[source.LocalStorageUsedMaxResult] {
 	const queryName = "QueryLocalStorageUsedMax"
 	const queryName = "QueryLocalStorageUsedMax"
-	const localStorageUsedMaxQuery = `max(sum(max_over_time(container_fs_usage_bytes{device=~"/dev/(nvme|sda).*", id="/", %s}[%s])) by (instance, device, %s, job)) by (instance, device, %s)`
+	const localStorageUsedMaxQuery = `max(sum(max_over_time(container_fs_usage_bytes{device=~"/dev/(nvme|sda).*", id="/", %s}[%s])) by (instance, device, uid, %s, job)) by (instance, device, uid, %s)`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 
 
@@ -217,7 +217,7 @@ func (pds *PrometheusMetricsQuerier) QueryLocalStorageUsedMax(start, end time.Ti
 
 
 func (pds *PrometheusMetricsQuerier) QueryLocalStorageBytes(start, end time.Time) *source.Future[source.LocalStorageBytesResult] {
 func (pds *PrometheusMetricsQuerier) QueryLocalStorageBytes(start, end time.Time) *source.Future[source.LocalStorageBytesResult] {
 	const queryName = "QueryLocalStorageBytes"
 	const queryName = "QueryLocalStorageBytes"
-	const localStorageBytesQuery = `avg_over_time(sum(container_fs_limit_bytes{device=~"/dev/(nvme|sda).*", id="/", %s}) by (instance, device, %s)[%s:%dm])`
+	const localStorageBytesQuery = `avg_over_time(sum(container_fs_limit_bytes{device=~"/dev/(nvme|sda).*", id="/", %s}) by (instance, device, uid, %s)[%s:%dm])`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 	minsPerResolution := cfg.DataResolutionMinutes
 	minsPerResolution := cfg.DataResolutionMinutes
@@ -236,7 +236,7 @@ func (pds *PrometheusMetricsQuerier) QueryLocalStorageBytes(start, end time.Time
 
 
 func (pds *PrometheusMetricsQuerier) QueryLocalStorageActiveMinutes(start, end time.Time) *source.Future[source.LocalStorageActiveMinutesResult] {
 func (pds *PrometheusMetricsQuerier) QueryLocalStorageActiveMinutes(start, end time.Time) *source.Future[source.LocalStorageActiveMinutesResult] {
 	const queryName = "QueryLocalStorageActiveMinutes"
 	const queryName = "QueryLocalStorageActiveMinutes"
-	const localStorageActiveMinutesQuery = `count(node_total_hourly_cost{%s}) by (%s, node, instance, provider_id)[%s:%dm]`
+	const localStorageActiveMinutesQuery = `count(node_total_hourly_cost{%s}) by (%s, node, uid, instance, provider_id)[%s:%dm]`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 	minsPerResolution := cfg.DataResolutionMinutes
 	minsPerResolution := cfg.DataResolutionMinutes
@@ -255,7 +255,7 @@ func (pds *PrometheusMetricsQuerier) QueryLocalStorageActiveMinutes(start, end t
 
 
 func (pds *PrometheusMetricsQuerier) QueryNodeCPUCoresCapacity(start, end time.Time) *source.Future[source.NodeCPUCoresCapacityResult] {
 func (pds *PrometheusMetricsQuerier) QueryNodeCPUCoresCapacity(start, end time.Time) *source.Future[source.NodeCPUCoresCapacityResult] {
 	const queryName = "QueryNodeCPUCoresCapacity"
 	const queryName = "QueryNodeCPUCoresCapacity"
-	const nodeCPUCoresCapacityQuery = `avg(avg_over_time(kube_node_status_capacity_cpu_cores{%s}[%s])) by (%s, node)`
+	const nodeCPUCoresCapacityQuery = `avg(avg_over_time(kube_node_status_capacity_cpu_cores{%s}[%s])) by (%s, node, uid)`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 
 
@@ -273,7 +273,7 @@ func (pds *PrometheusMetricsQuerier) QueryNodeCPUCoresCapacity(start, end time.T
 
 
 func (pds *PrometheusMetricsQuerier) QueryNodeCPUCoresAllocatable(start, end time.Time) *source.Future[source.NodeCPUCoresAllocatableResult] {
 func (pds *PrometheusMetricsQuerier) QueryNodeCPUCoresAllocatable(start, end time.Time) *source.Future[source.NodeCPUCoresAllocatableResult] {
 	const queryName = "QueryNodeCPUCoresAllocatable"
 	const queryName = "QueryNodeCPUCoresAllocatable"
-	const nodeCPUCoresAllocatableQuery = `avg(avg_over_time(kube_node_status_allocatable_cpu_cores{%s}[%s])) by (%s, node)`
+	const nodeCPUCoresAllocatableQuery = `avg(avg_over_time(kube_node_status_allocatable_cpu_cores{%s}[%s])) by (%s, node, uid)`
 	// `avg(avg_over_time(container_cpu_allocation{container!="", container!="POD", node!="", %s}[%s])) by (container, pod, namespace, node, %s)`
 	// `avg(avg_over_time(container_cpu_allocation{container!="", container!="POD", node!="", %s}[%s])) by (container, pod, namespace, node, %s)`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
@@ -292,7 +292,7 @@ func (pds *PrometheusMetricsQuerier) QueryNodeCPUCoresAllocatable(start, end tim
 
 
 func (pds *PrometheusMetricsQuerier) QueryNodeRAMBytesCapacity(start, end time.Time) *source.Future[source.NodeRAMBytesCapacityResult] {
 func (pds *PrometheusMetricsQuerier) QueryNodeRAMBytesCapacity(start, end time.Time) *source.Future[source.NodeRAMBytesCapacityResult] {
 	const queryName = "QueryNodeRAMBytesCapacity"
 	const queryName = "QueryNodeRAMBytesCapacity"
-	const nodeRAMBytesCapacityQuery = `avg(avg_over_time(kube_node_status_capacity_memory_bytes{%s}[%s])) by (%s, node)`
+	const nodeRAMBytesCapacityQuery = `avg(avg_over_time(kube_node_status_capacity_memory_bytes{%s}[%s])) by (%s, node, uid)`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 
 
@@ -310,7 +310,7 @@ func (pds *PrometheusMetricsQuerier) QueryNodeRAMBytesCapacity(start, end time.T
 
 
 func (pds *PrometheusMetricsQuerier) QueryNodeRAMBytesAllocatable(start, end time.Time) *source.Future[source.NodeRAMBytesAllocatableResult] {
 func (pds *PrometheusMetricsQuerier) QueryNodeRAMBytesAllocatable(start, end time.Time) *source.Future[source.NodeRAMBytesAllocatableResult] {
 	const queryName = "QueryNodeRAMBytesAllocatable"
 	const queryName = "QueryNodeRAMBytesAllocatable"
-	const nodeRAMBytesAllocatableQuery = `avg(avg_over_time(kube_node_status_allocatable_memory_bytes{%s}[%s])) by (%s, node)`
+	const nodeRAMBytesAllocatableQuery = `avg(avg_over_time(kube_node_status_allocatable_memory_bytes{%s}[%s])) by (%s, node, uid)`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 
 
@@ -328,7 +328,7 @@ func (pds *PrometheusMetricsQuerier) QueryNodeRAMBytesAllocatable(start, end tim
 
 
 func (pds *PrometheusMetricsQuerier) QueryNodeGPUCount(start, end time.Time) *source.Future[source.NodeGPUCountResult] {
 func (pds *PrometheusMetricsQuerier) QueryNodeGPUCount(start, end time.Time) *source.Future[source.NodeGPUCountResult] {
 	const queryName = "QueryNodeGPUCount"
 	const queryName = "QueryNodeGPUCount"
-	const nodeGPUCountQuery = `avg(avg_over_time(node_gpu_count{%s}[%s])) by (%s, node, provider_id)`
+	const nodeGPUCountQuery = `avg(avg_over_time(node_gpu_count{%s}[%s])) by (%s, node, uid, provider_id)`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 
 
@@ -364,7 +364,7 @@ func (pds *PrometheusMetricsQuerier) QueryNodeLabels(start, end time.Time) *sour
 
 
 func (pds *PrometheusMetricsQuerier) QueryNodeActiveMinutes(start, end time.Time) *source.Future[source.NodeActiveMinutesResult] {
 func (pds *PrometheusMetricsQuerier) QueryNodeActiveMinutes(start, end time.Time) *source.Future[source.NodeActiveMinutesResult] {
 	const queryName = "QueryNodeActiveMinutes"
 	const queryName = "QueryNodeActiveMinutes"
-	const activeMinsQuery = `avg(node_total_hourly_cost{%s}) by (node, %s, provider_id)[%s:%dm]`
+	const activeMinsQuery = `avg(node_total_hourly_cost{%s}) by (node, uid, %s, provider_id)[%s:%dm]`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 	minsPerResolution := cfg.DataResolutionMinutes
 	minsPerResolution := cfg.DataResolutionMinutes
@@ -383,7 +383,7 @@ func (pds *PrometheusMetricsQuerier) QueryNodeActiveMinutes(start, end time.Time
 
 
 func (pds *PrometheusMetricsQuerier) QueryNodeCPUModeTotal(start, end time.Time) *source.Future[source.NodeCPUModeTotalResult] {
 func (pds *PrometheusMetricsQuerier) QueryNodeCPUModeTotal(start, end time.Time) *source.Future[source.NodeCPUModeTotalResult] {
 	const queryName = "QueryNodeCPUModeTotal"
 	const queryName = "QueryNodeCPUModeTotal"
-	const nodeCPUModeTotalQuery = `sum(rate(node_cpu_seconds_total{%s}[%s:%dm])) by (kubernetes_node, %s, mode)`
+	const nodeCPUModeTotalQuery = `sum(rate(node_cpu_seconds_total{%s}[%s:%dm])) by (kubernetes_node, uid, %s, mode)`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 	minsPerResolution := cfg.DataResolutionMinutes
 	minsPerResolution := cfg.DataResolutionMinutes
@@ -401,7 +401,7 @@ func (pds *PrometheusMetricsQuerier) QueryNodeCPUModeTotal(start, end time.Time)
 }
 }
 func (pds *PrometheusMetricsQuerier) QueryNodeRAMSystemPercent(start, end time.Time) *source.Future[source.NodeRAMSystemPercentResult] {
 func (pds *PrometheusMetricsQuerier) QueryNodeRAMSystemPercent(start, end time.Time) *source.Future[source.NodeRAMSystemPercentResult] {
 	const queryName = "QueryNodeRAMSystemPercent"
 	const queryName = "QueryNodeRAMSystemPercent"
-	const nodeRAMSystemPctQuery = `sum(sum_over_time(container_memory_working_set_bytes{container_name!="POD",container_name!="",namespace="kube-system", %s}[%s:%dm])) by (instance, %s) / avg(label_replace(sum(sum_over_time(kube_node_status_capacity_memory_bytes{%s}[%s:%dm])) by (node, %s), "instance", "$1", "node", "(.*)")) by (instance, %s)`
+	const nodeRAMSystemPctQuery = `sum(sum_over_time(container_memory_working_set_bytes{container_name!="POD",container_name!="",namespace="kube-system", %s}[%s:%dm])) by (instance, uid, %s) / avg(label_replace(sum(sum_over_time(kube_node_status_capacity_memory_bytes{%s}[%s:%dm])) by (node, uid, %s), "instance", "$1", "node", "(.*)")) by (instance, uid, %s)`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 	minsPerResolution := cfg.DataResolutionMinutes
 	minsPerResolution := cfg.DataResolutionMinutes
@@ -420,7 +420,7 @@ func (pds *PrometheusMetricsQuerier) QueryNodeRAMSystemPercent(start, end time.T
 
 
 func (pds *PrometheusMetricsQuerier) QueryNodeRAMUserPercent(start, end time.Time) *source.Future[source.NodeRAMUserPercentResult] {
 func (pds *PrometheusMetricsQuerier) QueryNodeRAMUserPercent(start, end time.Time) *source.Future[source.NodeRAMUserPercentResult] {
 	const queryName = "QueryNodeRAMUserPercent"
 	const queryName = "QueryNodeRAMUserPercent"
-	const nodeRAMUserPctQuery = `sum(sum_over_time(container_memory_working_set_bytes{container_name!="POD",container_name!="",namespace!="kube-system", %s}[%s:%dm])) by (instance, %s) / avg(label_replace(sum(sum_over_time(kube_node_status_capacity_memory_bytes{%s}[%s:%dm])) by (node, %s), "instance", "$1", "node", "(.*)")) by (instance, %s)`
+	const nodeRAMUserPctQuery = `sum(sum_over_time(container_memory_working_set_bytes{container_name!="POD",container_name!="",namespace!="kube-system", %s}[%s:%dm])) by (instance, uid, %s) / avg(label_replace(sum(sum_over_time(kube_node_status_capacity_memory_bytes{%s}[%s:%dm])) by (node, uid, %s), "instance", "$1", "node", "(.*)")) by (instance, uid, %s)`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 	minsPerResolution := cfg.DataResolutionMinutes
 	minsPerResolution := cfg.DataResolutionMinutes
@@ -439,7 +439,7 @@ func (pds *PrometheusMetricsQuerier) QueryNodeRAMUserPercent(start, end time.Tim
 
 
 func (pds *PrometheusMetricsQuerier) QueryLBPricePerHr(start, end time.Time) *source.Future[source.LBPricePerHrResult] {
 func (pds *PrometheusMetricsQuerier) QueryLBPricePerHr(start, end time.Time) *source.Future[source.LBPricePerHrResult] {
 	const queryName = "QueryLBPricePerHr"
 	const queryName = "QueryLBPricePerHr"
-	const queryFmtLBCostPerHr = `avg(avg_over_time(kubecost_load_balancer_cost{%s}[%s])) by (namespace, service_name, ingress_ip, %s)`
+	const queryFmtLBCostPerHr = `avg(avg_over_time(kubecost_load_balancer_cost{%s}[%s])) by (namespace, service_name, ingress_ip, uid, %s)`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 
 
@@ -457,7 +457,7 @@ func (pds *PrometheusMetricsQuerier) QueryLBPricePerHr(start, end time.Time) *so
 
 
 func (pds *PrometheusMetricsQuerier) QueryLBActiveMinutes(start, end time.Time) *source.Future[source.LBActiveMinutesResult] {
 func (pds *PrometheusMetricsQuerier) QueryLBActiveMinutes(start, end time.Time) *source.Future[source.LBActiveMinutesResult] {
 	const queryName = "QueryLBActiveMinutes"
 	const queryName = "QueryLBActiveMinutes"
-	const lbActiveMinutesQuery = `avg(kubecost_load_balancer_cost{%s}) by (namespace, service_name, %s, ingress_ip)[%s:%dm]`
+	const lbActiveMinutesQuery = `avg(kubecost_load_balancer_cost{%s}) by (namespace, service_name, uid, %s, ingress_ip)[%s:%dm]`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 	minsPerResolution := cfg.DataResolutionMinutes
 	minsPerResolution := cfg.DataResolutionMinutes
@@ -515,7 +515,7 @@ func (pds *PrometheusMetricsQuerier) QueryClusterManagementPricePerHr(start, end
 
 
 func (pds *PrometheusMetricsQuerier) QueryPods(start, end time.Time) *source.Future[source.PodsResult] {
 func (pds *PrometheusMetricsQuerier) QueryPods(start, end time.Time) *source.Future[source.PodsResult] {
 	const queryName = "QueryPods"
 	const queryName = "QueryPods"
-	const queryFmtPods = `avg(kube_pod_container_status_running{%s} != 0) by (pod, namespace, %s)[%s:%dm]`
+	const queryFmtPods = `avg(kube_pod_container_status_running{%s} != 0) by (pod, namespace, uid, %s)[%s:%dm]`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 	minsPerResolution := cfg.DataResolutionMinutes
 	minsPerResolution := cfg.DataResolutionMinutes
@@ -553,7 +553,7 @@ func (pds *PrometheusMetricsQuerier) QueryPodsUID(start, end time.Time) *source.
 
 
 func (pds *PrometheusMetricsQuerier) QueryRAMBytesAllocated(start, end time.Time) *source.Future[source.RAMBytesAllocatedResult] {
 func (pds *PrometheusMetricsQuerier) QueryRAMBytesAllocated(start, end time.Time) *source.Future[source.RAMBytesAllocatedResult] {
 	const queryName = "QueryRAMBytesAllocated"
 	const queryName = "QueryRAMBytesAllocated"
-	const queryFmtRAMBytesAllocated = `avg(avg_over_time(container_memory_allocation_bytes{container!="", container!="POD", node!="", %s}[%s])) by (container, pod, namespace, node, %s, provider_id)`
+	const queryFmtRAMBytesAllocated = `avg(avg_over_time(container_memory_allocation_bytes{container!="", container!="POD", node!="", %s}[%s])) by (container, pod, namespace, node, uid, %s, provider_id)`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 
 
@@ -571,7 +571,7 @@ func (pds *PrometheusMetricsQuerier) QueryRAMBytesAllocated(start, end time.Time
 
 
 func (pds *PrometheusMetricsQuerier) QueryRAMRequests(start, end time.Time) *source.Future[source.RAMRequestsResult] {
 func (pds *PrometheusMetricsQuerier) QueryRAMRequests(start, end time.Time) *source.Future[source.RAMRequestsResult] {
 	const queryName = "QueryRAMRequests"
 	const queryName = "QueryRAMRequests"
-	const queryFmtRAMRequests = `avg(avg_over_time(kube_pod_container_resource_requests{resource="memory", unit="byte", container!="", container!="POD", node!="", %s}[%s])) by (container, pod, namespace, node, %s)`
+	const queryFmtRAMRequests = `avg(avg_over_time(kube_pod_container_resource_requests{resource="memory", unit="byte", container!="", container!="POD", node!="", %s}[%s])) by (container, pod, namespace, node, uid, %s)`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 
 
@@ -589,7 +589,7 @@ func (pds *PrometheusMetricsQuerier) QueryRAMRequests(start, end time.Time) *sou
 
 
 func (pds *PrometheusMetricsQuerier) QueryRAMUsageAvg(start, end time.Time) *source.Future[source.RAMUsageAvgResult] {
 func (pds *PrometheusMetricsQuerier) QueryRAMUsageAvg(start, end time.Time) *source.Future[source.RAMUsageAvgResult] {
 	const queryName = "QueryRAMUsageAvg"
 	const queryName = "QueryRAMUsageAvg"
-	const queryFmtRAMUsageAvg = `avg(avg_over_time(container_memory_working_set_bytes{container!="", container_name!="POD", container!="POD", %s}[%s])) by (container_name, container, pod_name, pod, namespace, node, instance, %s)`
+	const queryFmtRAMUsageAvg = `avg(avg_over_time(container_memory_working_set_bytes{container!="", container_name!="POD", container!="POD", %s}[%s])) by (container_name, container, pod_name, pod, namespace, node, instance, uid, %s)`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 
 
@@ -607,7 +607,7 @@ func (pds *PrometheusMetricsQuerier) QueryRAMUsageAvg(start, end time.Time) *sou
 
 
 func (pds *PrometheusMetricsQuerier) QueryRAMUsageMax(start, end time.Time) *source.Future[source.RAMUsageMaxResult] {
 func (pds *PrometheusMetricsQuerier) QueryRAMUsageMax(start, end time.Time) *source.Future[source.RAMUsageMaxResult] {
 	const queryName = "QueryRAMUsageMax"
 	const queryName = "QueryRAMUsageMax"
-	const queryFmtRAMUsageMax = `max(max_over_time(container_memory_working_set_bytes{container!="", container_name!="POD", container!="POD", %s}[%s])) by (container_name, container, pod_name, pod, namespace, node, instance, %s)`
+	const queryFmtRAMUsageMax = `max(max_over_time(container_memory_working_set_bytes{container!="", container_name!="POD", container!="POD", %s}[%s])) by (container_name, container, pod_name, pod, namespace, node, instance, uid, %s)`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 
 
@@ -625,7 +625,7 @@ func (pds *PrometheusMetricsQuerier) QueryRAMUsageMax(start, end time.Time) *sou
 
 
 func (pds *PrometheusMetricsQuerier) QueryCPUCoresAllocated(start, end time.Time) *source.Future[source.CPUCoresAllocatedResult] {
 func (pds *PrometheusMetricsQuerier) QueryCPUCoresAllocated(start, end time.Time) *source.Future[source.CPUCoresAllocatedResult] {
 	const queryName = "QueryCPUCoresAllocated"
 	const queryName = "QueryCPUCoresAllocated"
-	const queryFmtCPUCoresAllocated = `avg(avg_over_time(container_cpu_allocation{container!="", container!="POD", node!="", %s}[%s])) by (container, pod, namespace, node, %s)`
+	const queryFmtCPUCoresAllocated = `avg(avg_over_time(container_cpu_allocation{container!="", container!="POD", node!="", %s}[%s])) by (container, pod, namespace, node, uid, %s)`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 
 
@@ -643,7 +643,7 @@ func (pds *PrometheusMetricsQuerier) QueryCPUCoresAllocated(start, end time.Time
 
 
 func (pds *PrometheusMetricsQuerier) QueryCPURequests(start, end time.Time) *source.Future[source.CPURequestsResult] {
 func (pds *PrometheusMetricsQuerier) QueryCPURequests(start, end time.Time) *source.Future[source.CPURequestsResult] {
 	const queryName = "QueryCPURequests"
 	const queryName = "QueryCPURequests"
-	const queryFmtCPURequests = `avg(avg_over_time(kube_pod_container_resource_requests{resource="cpu", unit="core", container!="", container!="POD", node!="", %s}[%s])) by (container, pod, namespace, node, %s)`
+	const queryFmtCPURequests = `avg(avg_over_time(kube_pod_container_resource_requests{resource="cpu", unit="core", container!="", container!="POD", node!="", %s}[%s])) by (container, pod, namespace, node, uid, %s)`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 
 
@@ -661,7 +661,7 @@ func (pds *PrometheusMetricsQuerier) QueryCPURequests(start, end time.Time) *sou
 
 
 func (pds *PrometheusMetricsQuerier) QueryCPUUsageAvg(start, end time.Time) *source.Future[source.CPUUsageAvgResult] {
 func (pds *PrometheusMetricsQuerier) QueryCPUUsageAvg(start, end time.Time) *source.Future[source.CPUUsageAvgResult] {
 	const queryName = "QueryCPUUsageAvg"
 	const queryName = "QueryCPUUsageAvg"
-	const queryFmtCPUUsageAvg = `avg(rate(container_cpu_usage_seconds_total{container!="", container_name!="POD", container!="POD", %s}[%s])) by (container_name, container, pod_name, pod, namespace, node, instance, %s)`
+	const queryFmtCPUUsageAvg = `avg(rate(container_cpu_usage_seconds_total{container!="", container_name!="POD", container!="POD", %s}[%s])) by (container_name, container, pod_name, pod, namespace, node, instance, uid, %s)`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 
 
@@ -692,7 +692,7 @@ func (pds *PrometheusMetricsQuerier) QueryCPUUsageMax(start, end time.Time) *sou
 	//
 	//
 	// If changing the name of the recording rule, make sure to update the
 	// If changing the name of the recording rule, make sure to update the
 	// corresponding diagnostic query to avoid confusion.
 	// corresponding diagnostic query to avoid confusion.
-	const queryFmtCPUUsageMaxRecordingRule = `max(max_over_time(kubecost_container_cpu_usage_irate{%s}[%s])) by (container_name, container, pod_name, pod, namespace, node, instance, %s)`
+	const queryFmtCPUUsageMaxRecordingRule = `max(max_over_time(kubecost_container_cpu_usage_irate{%s}[%s])) by (container_name, container, pod_name, pod, namespace, node, instance, uid, %s)`
 
 
 	// This is the subquery equivalent of the above recording rule query. It is
 	// This is the subquery equivalent of the above recording rule query. It is
 	// more expensive, but does not require the recording rule. It should be
 	// more expensive, but does not require the recording rule. It should be
@@ -704,7 +704,7 @@ func (pds *PrometheusMetricsQuerier) QueryCPUUsageMax(start, end time.Time) *sou
 	// the resolution, to make sure the irate always has two points to query
 	// the resolution, to make sure the irate always has two points to query
 	// in case the Prom scrape duration has been reduced to be equal to the
 	// in case the Prom scrape duration has been reduced to be equal to the
 	// query resolution.
 	// query resolution.
-	const queryFmtCPUUsageMaxSubquery = `max(max_over_time(irate(container_cpu_usage_seconds_total{container!="POD", container!="", %s}[%dm])[%s:%dm])) by (container, pod_name, pod, namespace, node, instance, %s)`
+	const queryFmtCPUUsageMaxSubquery = `max(max_over_time(irate(container_cpu_usage_seconds_total{container!="POD", container!="", %s}[%dm])[%s:%dm])) by (container, pod_name, pod, namespace, node, instance, uid, %s)`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 	durStr := timeutil.DurationString(end.Sub(start))
 	durStr := timeutil.DurationString(end.Sub(start))
@@ -738,7 +738,7 @@ func (pds *PrometheusMetricsQuerier) QueryCPUUsageMax(start, end time.Time) *sou
 
 
 func (pds *PrometheusMetricsQuerier) QueryGPUsRequested(start, end time.Time) *source.Future[source.GPUsRequestedResult] {
 func (pds *PrometheusMetricsQuerier) QueryGPUsRequested(start, end time.Time) *source.Future[source.GPUsRequestedResult] {
 	const queryName = "QueryGPUsRequested"
 	const queryName = "QueryGPUsRequested"
-	const queryFmtGPUsRequested = `avg(avg_over_time(kube_pod_container_resource_requests{resource="nvidia_com_gpu", container!="",container!="POD", node!="", %s}[%s])) by (container, pod, namespace, node, %s)`
+	const queryFmtGPUsRequested = `avg(avg_over_time(kube_pod_container_resource_requests{resource="nvidia_com_gpu", container!="",container!="POD", node!="", %s}[%s])) by (container, pod, namespace, node, uid, %s)`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 
 
@@ -756,7 +756,7 @@ func (pds *PrometheusMetricsQuerier) QueryGPUsRequested(start, end time.Time) *s
 
 
 func (pds *PrometheusMetricsQuerier) QueryGPUsUsageAvg(start, end time.Time) *source.Future[source.GPUsUsageAvgResult] {
 func (pds *PrometheusMetricsQuerier) QueryGPUsUsageAvg(start, end time.Time) *source.Future[source.GPUsUsageAvgResult] {
 	const queryName = "QueryGPUsUsageAvg"
 	const queryName = "QueryGPUsUsageAvg"
-	const queryFmtGPUsUsageAvg = `avg(avg_over_time(DCGM_FI_PROF_GR_ENGINE_ACTIVE{container!=""}[%s])) by (container, pod, namespace, %s)`
+	const queryFmtGPUsUsageAvg = `avg(avg_over_time(DCGM_FI_PROF_GR_ENGINE_ACTIVE{container!=""}[%s])) by (container, pod, namespace, uid, %s)`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 
 
@@ -774,7 +774,7 @@ func (pds *PrometheusMetricsQuerier) QueryGPUsUsageAvg(start, end time.Time) *so
 
 
 func (pds *PrometheusMetricsQuerier) QueryGPUsUsageMax(start, end time.Time) *source.Future[source.GPUsUsageMaxResult] {
 func (pds *PrometheusMetricsQuerier) QueryGPUsUsageMax(start, end time.Time) *source.Future[source.GPUsUsageMaxResult] {
 	const queryName = "QueryGPUsUsageMax"
 	const queryName = "QueryGPUsUsageMax"
-	const queryFmtGPUsUsageMax = `max(max_over_time(DCGM_FI_PROF_GR_ENGINE_ACTIVE{container!=""}[%s])) by (container, pod, namespace, %s)`
+	const queryFmtGPUsUsageMax = `max(max_over_time(DCGM_FI_PROF_GR_ENGINE_ACTIVE{container!=""}[%s])) by (container, pod, namespace, uid, %s)`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 
 
@@ -792,7 +792,7 @@ func (pds *PrometheusMetricsQuerier) QueryGPUsUsageMax(start, end time.Time) *so
 
 
 func (pds *PrometheusMetricsQuerier) QueryGPUsAllocated(start, end time.Time) *source.Future[source.GPUsAllocatedResult] {
 func (pds *PrometheusMetricsQuerier) QueryGPUsAllocated(start, end time.Time) *source.Future[source.GPUsAllocatedResult] {
 	const queryName = "QueryGPUsAllocated"
 	const queryName = "QueryGPUsAllocated"
-	const queryFmtGPUsAllocated = `avg(avg_over_time(container_gpu_allocation{container!="", container!="POD", node!="", %s}[%s])) by (container, pod, namespace, node, %s)`
+	const queryFmtGPUsAllocated = `avg(avg_over_time(container_gpu_allocation{container!="", container!="POD", node!="", %s}[%s])) by (container, pod, namespace, node, uid, %s)`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 
 
@@ -810,7 +810,7 @@ func (pds *PrometheusMetricsQuerier) QueryGPUsAllocated(start, end time.Time) *s
 
 
 func (pds *PrometheusMetricsQuerier) QueryIsGPUShared(start, end time.Time) *source.Future[source.IsGPUSharedResult] {
 func (pds *PrometheusMetricsQuerier) QueryIsGPUShared(start, end time.Time) *source.Future[source.IsGPUSharedResult] {
 	const queryName = "QueryIsGPUShared"
 	const queryName = "QueryIsGPUShared"
-	const queryFmtIsGPUShared = `avg(avg_over_time(kube_pod_container_resource_requests{container!="", node != "", pod != "", container!= "", unit = "integer",  %s}[%s])) by (container, pod, namespace, node, resource, %s)`
+	const queryFmtIsGPUShared = `avg(avg_over_time(kube_pod_container_resource_requests{container!="", node != "", pod != "", container!= "", unit = "integer",  %s}[%s])) by (container, pod, namespace, node, resource, uid, %s)`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 
 
@@ -828,7 +828,7 @@ func (pds *PrometheusMetricsQuerier) QueryIsGPUShared(start, end time.Time) *sou
 
 
 func (pds *PrometheusMetricsQuerier) QueryGPUInfo(start, end time.Time) *source.Future[source.GPUInfoResult] {
 func (pds *PrometheusMetricsQuerier) QueryGPUInfo(start, end time.Time) *source.Future[source.GPUInfoResult] {
 	const queryName = "QueryGPUInfo"
 	const queryName = "QueryGPUInfo"
-	const queryFmtGetGPUInfo = `avg(avg_over_time(DCGM_FI_DEV_DEC_UTIL{container!="",%s}[%s])) by (container, pod, namespace, device, modelName, UUID, %s)`
+	const queryFmtGetGPUInfo = `avg(avg_over_time(DCGM_FI_DEV_DEC_UTIL{container!="",%s}[%s])) by (container, pod, namespace, device, modelName, UUID, uid, %s)`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 
 
@@ -846,7 +846,7 @@ func (pds *PrometheusMetricsQuerier) QueryGPUInfo(start, end time.Time) *source.
 
 
 func (pds *PrometheusMetricsQuerier) QueryNodeCPUPricePerHr(start, end time.Time) *source.Future[source.NodeCPUPricePerHrResult] {
 func (pds *PrometheusMetricsQuerier) QueryNodeCPUPricePerHr(start, end time.Time) *source.Future[source.NodeCPUPricePerHrResult] {
 	const queryName = "QueryNodeCPUPricePerHr"
 	const queryName = "QueryNodeCPUPricePerHr"
-	const queryFmtNodeCostPerCPUHr = `avg(avg_over_time(node_cpu_hourly_cost{%s}[%s])) by (node, %s, instance_type, provider_id)`
+	const queryFmtNodeCostPerCPUHr = `avg(avg_over_time(node_cpu_hourly_cost{%s}[%s])) by (node, uid, %s, instance_type, provider_id)`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 
 
@@ -864,7 +864,7 @@ func (pds *PrometheusMetricsQuerier) QueryNodeCPUPricePerHr(start, end time.Time
 
 
 func (pds *PrometheusMetricsQuerier) QueryNodeRAMPricePerGiBHr(start, end time.Time) *source.Future[source.NodeRAMPricePerGiBHrResult] {
 func (pds *PrometheusMetricsQuerier) QueryNodeRAMPricePerGiBHr(start, end time.Time) *source.Future[source.NodeRAMPricePerGiBHrResult] {
 	const queryName = "QueryNodeRAMPricePerGiBHr"
 	const queryName = "QueryNodeRAMPricePerGiBHr"
-	const queryFmtNodeCostPerRAMGiBHr = `avg(avg_over_time(node_ram_hourly_cost{%s}[%s])) by (node, %s, instance_type, provider_id)`
+	const queryFmtNodeCostPerRAMGiBHr = `avg(avg_over_time(node_ram_hourly_cost{%s}[%s])) by (node, uid, %s, instance_type, provider_id)`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 
 
@@ -882,7 +882,7 @@ func (pds *PrometheusMetricsQuerier) QueryNodeRAMPricePerGiBHr(start, end time.T
 
 
 func (pds *PrometheusMetricsQuerier) QueryNodeGPUPricePerHr(start, end time.Time) *source.Future[source.NodeGPUPricePerHrResult] {
 func (pds *PrometheusMetricsQuerier) QueryNodeGPUPricePerHr(start, end time.Time) *source.Future[source.NodeGPUPricePerHrResult] {
 	const queryName = "QueryNodeGPUPricePerHr"
 	const queryName = "QueryNodeGPUPricePerHr"
-	const queryFmtNodeCostPerGPUHr = `avg(avg_over_time(node_gpu_hourly_cost{%s}[%s])) by (node, %s, instance_type, provider_id)`
+	const queryFmtNodeCostPerGPUHr = `avg(avg_over_time(node_gpu_hourly_cost{%s}[%s])) by (node, uid, %s, instance_type, provider_id)`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 
 
@@ -918,7 +918,7 @@ func (pds *PrometheusMetricsQuerier) QueryNodeIsSpot(start, end time.Time) *sour
 
 
 func (pds *PrometheusMetricsQuerier) QueryPodPVCAllocation(start, end time.Time) *source.Future[source.PodPVCAllocationResult] {
 func (pds *PrometheusMetricsQuerier) QueryPodPVCAllocation(start, end time.Time) *source.Future[source.PodPVCAllocationResult] {
 	const queryName = "QueryPodPVCAllocation"
 	const queryName = "QueryPodPVCAllocation"
-	const queryFmtPodPVCAllocation = `avg(avg_over_time(pod_pvc_allocation{%s}[%s])) by (persistentvolume, persistentvolumeclaim, pod, namespace, %s)`
+	const queryFmtPodPVCAllocation = `avg(avg_over_time(pod_pvc_allocation{%s}[%s])) by (persistentvolume, persistentvolumeclaim, pod, namespace, uid, %s)`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 
 
@@ -936,7 +936,7 @@ func (pds *PrometheusMetricsQuerier) QueryPodPVCAllocation(start, end time.Time)
 
 
 func (pds *PrometheusMetricsQuerier) QueryPVCBytesRequested(start, end time.Time) *source.Future[source.PVCBytesRequestedResult] {
 func (pds *PrometheusMetricsQuerier) QueryPVCBytesRequested(start, end time.Time) *source.Future[source.PVCBytesRequestedResult] {
 	const queryName = "QueryPVCBytesRequested"
 	const queryName = "QueryPVCBytesRequested"
-	const queryFmtPVCBytesRequested = `avg(avg_over_time(kube_persistentvolumeclaim_resource_requests_storage_bytes{%s}[%s])) by (persistentvolumeclaim, namespace, %s)`
+	const queryFmtPVCBytesRequested = `avg(avg_over_time(kube_persistentvolumeclaim_resource_requests_storage_bytes{%s}[%s])) by (persistentvolumeclaim, namespace, uid, %s)`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 
 
@@ -954,7 +954,7 @@ func (pds *PrometheusMetricsQuerier) QueryPVCBytesRequested(start, end time.Time
 
 
 func (pds *PrometheusMetricsQuerier) QueryPVBytes(start, end time.Time) *source.Future[source.PVBytesResult] {
 func (pds *PrometheusMetricsQuerier) QueryPVBytes(start, end time.Time) *source.Future[source.PVBytesResult] {
 	const queryName = "QueryPVBytes"
 	const queryName = "QueryPVBytes"
-	const queryFmtPVBytes = `avg(avg_over_time(kube_persistentvolume_capacity_bytes{%s}[%s])) by (persistentvolume, %s)`
+	const queryFmtPVBytes = `avg(avg_over_time(kube_persistentvolume_capacity_bytes{%s}[%s])) by (persistentvolume, uid, %s)`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 
 
@@ -972,7 +972,7 @@ func (pds *PrometheusMetricsQuerier) QueryPVBytes(start, end time.Time) *source.
 
 
 func (pds *PrometheusMetricsQuerier) QueryPVInfo(start, end time.Time) *source.Future[source.PVInfoResult] {
 func (pds *PrometheusMetricsQuerier) QueryPVInfo(start, end time.Time) *source.Future[source.PVInfoResult] {
 	const queryName = "QueryPVInfo"
 	const queryName = "QueryPVInfo"
-	const queryFmtPVMeta = `avg(avg_over_time(kubecost_pv_info{%s}[%s])) by (%s, storageclass, persistentvolume, provider_id)`
+	const queryFmtPVMeta = `avg(avg_over_time(kubecost_pv_info{%s}[%s])) by (%s, storageclass, persistentvolume, uid, provider_id)`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 
 
@@ -990,7 +990,7 @@ func (pds *PrometheusMetricsQuerier) QueryPVInfo(start, end time.Time) *source.F
 
 
 func (pds *PrometheusMetricsQuerier) QueryNetZoneGiB(start, end time.Time) *source.Future[source.NetZoneGiBResult] {
 func (pds *PrometheusMetricsQuerier) QueryNetZoneGiB(start, end time.Time) *source.Future[source.NetZoneGiBResult] {
 	const queryName = "QueryNetZoneGiB"
 	const queryName = "QueryNetZoneGiB"
-	const queryFmtNetZoneGiB = `sum(increase(kubecost_pod_network_egress_bytes_total{internet="false", same_zone="false", same_region="true", %s}[%s:%dm])) by (pod_name, namespace, %s) / 1024 / 1024 / 1024`
+	const queryFmtNetZoneGiB = `sum(increase(kubecost_pod_network_egress_bytes_total{internet="false", same_zone="false", same_region="true", %s}[%s:%dm])) by (pod_name, namespace, uid, %s) / 1024 / 1024 / 1024`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 	minsPerResolution := cfg.DataResolutionMinutes
 	minsPerResolution := cfg.DataResolutionMinutes
@@ -1027,7 +1027,7 @@ func (pds *PrometheusMetricsQuerier) QueryNetZonePricePerGiB(start, end time.Tim
 
 
 func (pds *PrometheusMetricsQuerier) QueryNetRegionGiB(start, end time.Time) *source.Future[source.NetRegionGiBResult] {
 func (pds *PrometheusMetricsQuerier) QueryNetRegionGiB(start, end time.Time) *source.Future[source.NetRegionGiBResult] {
 	const queryName = "QueryNetRegionGiB"
 	const queryName = "QueryNetRegionGiB"
-	const queryFmtNetRegionGiB = `sum(increase(kubecost_pod_network_egress_bytes_total{internet="false", same_zone="false", same_region="false", %s}[%s:%dm])) by (pod_name, namespace, %s) / 1024 / 1024 / 1024`
+	const queryFmtNetRegionGiB = `sum(increase(kubecost_pod_network_egress_bytes_total{internet="false", same_zone="false", same_region="false", %s}[%s:%dm])) by (pod_name, namespace, uid, %s) / 1024 / 1024 / 1024`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 	minsPerResolution := cfg.DataResolutionMinutes
 	minsPerResolution := cfg.DataResolutionMinutes
@@ -1064,7 +1064,7 @@ func (pds *PrometheusMetricsQuerier) QueryNetRegionPricePerGiB(start, end time.T
 
 
 func (pds *PrometheusMetricsQuerier) QueryNetInternetGiB(start, end time.Time) *source.Future[source.NetInternetGiBResult] {
 func (pds *PrometheusMetricsQuerier) QueryNetInternetGiB(start, end time.Time) *source.Future[source.NetInternetGiBResult] {
 	const queryName = "QueryNetInternetGiB"
 	const queryName = "QueryNetInternetGiB"
-	const queryFmtNetInternetGiB = `sum(increase(kubecost_pod_network_egress_bytes_total{internet="true", %s}[%s:%dm])) by (pod_name, namespace, %s) / 1024 / 1024 / 1024`
+	const queryFmtNetInternetGiB = `sum(increase(kubecost_pod_network_egress_bytes_total{internet="true", %s}[%s:%dm])) by (pod_name, namespace, uid, %s) / 1024 / 1024 / 1024`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 	minsPerResolution := cfg.DataResolutionMinutes
 	minsPerResolution := cfg.DataResolutionMinutes
@@ -1101,7 +1101,7 @@ func (pds *PrometheusMetricsQuerier) QueryNetInternetPricePerGiB(start, end time
 
 
 func (pds *PrometheusMetricsQuerier) QueryNetInternetServiceGiB(start, end time.Time) *source.Future[source.NetInternetServiceGiBResult] {
 func (pds *PrometheusMetricsQuerier) QueryNetInternetServiceGiB(start, end time.Time) *source.Future[source.NetInternetServiceGiBResult] {
 	const queryName = "QueryNetInternetServiceGiB"
 	const queryName = "QueryNetInternetServiceGiB"
-	const queryFmtNetInternetGiB = `sum(increase(kubecost_pod_network_egress_bytes_total{internet="true", %s}[%s:%dm])) by (pod_name, namespace, service, %s) / 1024 / 1024 / 1024`
+	const queryFmtNetInternetGiB = `sum(increase(kubecost_pod_network_egress_bytes_total{internet="true", %s}[%s:%dm])) by (pod_name, namespace, service, uid, %s) / 1024 / 1024 / 1024`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 	minsPerResolution := cfg.DataResolutionMinutes
 	minsPerResolution := cfg.DataResolutionMinutes
@@ -1120,7 +1120,7 @@ func (pds *PrometheusMetricsQuerier) QueryNetInternetServiceGiB(start, end time.
 
 
 func (pds *PrometheusMetricsQuerier) QueryNetTransferBytes(start, end time.Time) *source.Future[source.NetTransferBytesResult] {
 func (pds *PrometheusMetricsQuerier) QueryNetTransferBytes(start, end time.Time) *source.Future[source.NetTransferBytesResult] {
 	const queryName = "QueryNetTransferBytes"
 	const queryName = "QueryNetTransferBytes"
-	const queryFmtNetTransferBytes = `sum(increase(container_network_transmit_bytes_total{pod!="", %s}[%s:%dm])) by (pod_name, pod, namespace, %s)`
+	const queryFmtNetTransferBytes = `sum(increase(container_network_transmit_bytes_total{pod!="", %s}[%s:%dm])) by (pod_name, pod, namespace, uid, %s)`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 	minsPerResolution := cfg.DataResolutionMinutes
 	minsPerResolution := cfg.DataResolutionMinutes
@@ -1139,7 +1139,7 @@ func (pds *PrometheusMetricsQuerier) QueryNetTransferBytes(start, end time.Time)
 
 
 func (pds *PrometheusMetricsQuerier) QueryNetZoneIngressGiB(start, end time.Time) *source.Future[source.NetZoneIngressGiBResult] {
 func (pds *PrometheusMetricsQuerier) QueryNetZoneIngressGiB(start, end time.Time) *source.Future[source.NetZoneIngressGiBResult] {
 	const queryName = "QueryNetZoneIngressGiB"
 	const queryName = "QueryNetZoneIngressGiB"
-	const queryFmtIngNetZoneGiB = `sum(increase(kubecost_pod_network_ingress_bytes_total{internet="false", same_zone="false", same_region="true", %s}[%s:%dm])) by (pod_name, namespace, %s) / 1024 / 1024 / 1024`
+	const queryFmtIngNetZoneGiB = `sum(increase(kubecost_pod_network_ingress_bytes_total{internet="false", same_zone="false", same_region="true", %s}[%s:%dm])) by (pod_name, namespace, uid, %s) / 1024 / 1024 / 1024`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 	minsPerResolution := cfg.DataResolutionMinutes
 	minsPerResolution := cfg.DataResolutionMinutes
@@ -1158,7 +1158,7 @@ func (pds *PrometheusMetricsQuerier) QueryNetZoneIngressGiB(start, end time.Time
 
 
 func (pds *PrometheusMetricsQuerier) QueryNetRegionIngressGiB(start, end time.Time) *source.Future[source.NetRegionIngressGiBResult] {
 func (pds *PrometheusMetricsQuerier) QueryNetRegionIngressGiB(start, end time.Time) *source.Future[source.NetRegionIngressGiBResult] {
 	const queryName = "QueryNetRegionIngressGiB"
 	const queryName = "QueryNetRegionIngressGiB"
-	const queryFmtIngNetRegionGiB = `sum(increase(kubecost_pod_network_ingress_bytes_total{internet="false", same_zone="false", same_region="false", %s}[%s:%dm])) by (pod_name, namespace, %s) / 1024 / 1024 / 1024`
+	const queryFmtIngNetRegionGiB = `sum(increase(kubecost_pod_network_ingress_bytes_total{internet="false", same_zone="false", same_region="false", %s}[%s:%dm])) by (pod_name, namespace, uid, %s) / 1024 / 1024 / 1024`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 	minsPerResolution := cfg.DataResolutionMinutes
 	minsPerResolution := cfg.DataResolutionMinutes
@@ -1177,7 +1177,7 @@ func (pds *PrometheusMetricsQuerier) QueryNetRegionIngressGiB(start, end time.Ti
 
 
 func (pds *PrometheusMetricsQuerier) QueryNetInternetIngressGiB(start, end time.Time) *source.Future[source.NetInternetIngressGiBResult] {
 func (pds *PrometheusMetricsQuerier) QueryNetInternetIngressGiB(start, end time.Time) *source.Future[source.NetInternetIngressGiBResult] {
 	const queryName = "QueryNetInternetIngressGiB"
 	const queryName = "QueryNetInternetIngressGiB"
-	const queryFmtNetIngInternetGiB = `sum(increase(kubecost_pod_network_ingress_bytes_total{internet="true", %s}[%s:%dm])) by (pod_name, namespace, %s) / 1024 / 1024 / 1024`
+	const queryFmtNetIngInternetGiB = `sum(increase(kubecost_pod_network_ingress_bytes_total{internet="true", %s}[%s:%dm])) by (pod_name, namespace, uid, %s) / 1024 / 1024 / 1024`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 	minsPerResolution := cfg.DataResolutionMinutes
 	minsPerResolution := cfg.DataResolutionMinutes
@@ -1196,7 +1196,7 @@ func (pds *PrometheusMetricsQuerier) QueryNetInternetIngressGiB(start, end time.
 
 
 func (pds *PrometheusMetricsQuerier) QueryNetInternetServiceIngressGiB(start, end time.Time) *source.Future[source.NetInternetServiceIngressGiBResult] {
 func (pds *PrometheusMetricsQuerier) QueryNetInternetServiceIngressGiB(start, end time.Time) *source.Future[source.NetInternetServiceIngressGiBResult] {
 	const queryName = "QueryNetInternetServiceIngressGiB"
 	const queryName = "QueryNetInternetServiceIngressGiB"
-	const queryFmtIngNetInternetGiB = `sum(increase(kubecost_pod_network_ingress_bytes_total{internet="true", %s}[%s:%dm])) by (pod_name, namespace, service, %s) / 1024 / 1024 / 1024`
+	const queryFmtIngNetInternetGiB = `sum(increase(kubecost_pod_network_ingress_bytes_total{internet="true", %s}[%s:%dm])) by (pod_name, namespace, service, uid, %s) / 1024 / 1024 / 1024`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 	minsPerResolution := cfg.DataResolutionMinutes
 	minsPerResolution := cfg.DataResolutionMinutes
@@ -1215,7 +1215,7 @@ func (pds *PrometheusMetricsQuerier) QueryNetInternetServiceIngressGiB(start, en
 
 
 func (pds *PrometheusMetricsQuerier) QueryNetReceiveBytes(start, end time.Time) *source.Future[source.NetReceiveBytesResult] {
 func (pds *PrometheusMetricsQuerier) QueryNetReceiveBytes(start, end time.Time) *source.Future[source.NetReceiveBytesResult] {
 	const queryName = "QueryNetReceiveBytes"
 	const queryName = "QueryNetReceiveBytes"
-	const queryFmtNetReceiveBytes = `sum(increase(container_network_receive_bytes_total{pod!="", %s}[%s:%dm])) by (pod_name, pod, namespace, %s)`
+	const queryFmtNetReceiveBytes = `sum(increase(container_network_receive_bytes_total{pod!="", %s}[%s:%dm])) by (pod_name, pod, namespace, uid, %s)`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 	minsPerResolution := cfg.DataResolutionMinutes
 	minsPerResolution := cfg.DataResolutionMinutes
@@ -1360,7 +1360,7 @@ func (pds *PrometheusMetricsQuerier) QueryStatefulSetLabels(start, end time.Time
 
 
 func (pds *PrometheusMetricsQuerier) QueryDaemonSetLabels(start, end time.Time) *source.Future[source.DaemonSetLabelsResult] {
 func (pds *PrometheusMetricsQuerier) QueryDaemonSetLabels(start, end time.Time) *source.Future[source.DaemonSetLabelsResult] {
 	const queryName = "QueryDaemonSetLabels"
 	const queryName = "QueryDaemonSetLabels"
-	const queryFmtDaemonSetLabels = `sum(avg_over_time(kube_pod_owner{owner_kind="DaemonSet", %s}[%s])) by (pod, owner_name, namespace, %s)`
+	const queryFmtDaemonSetLabels = `sum(avg_over_time(kube_pod_owner{owner_kind="DaemonSet", %s}[%s])) by (pod, owner_name, namespace, uid, %s)`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 
 
@@ -1378,7 +1378,7 @@ func (pds *PrometheusMetricsQuerier) QueryDaemonSetLabels(start, end time.Time)
 
 
 func (pds *PrometheusMetricsQuerier) QueryJobLabels(start, end time.Time) *source.Future[source.JobLabelsResult] {
 func (pds *PrometheusMetricsQuerier) QueryJobLabels(start, end time.Time) *source.Future[source.JobLabelsResult] {
 	const queryName = "QueryJobLabels"
 	const queryName = "QueryJobLabels"
-	const queryFmtJobLabels = `sum(avg_over_time(kube_pod_owner{owner_kind="Job", %s}[%s])) by (pod, owner_name, namespace ,%s)`
+	const queryFmtJobLabels = `sum(avg_over_time(kube_pod_owner{owner_kind="Job", %s}[%s])) by (pod, owner_name, namespace, uid, %s)`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 
 
@@ -1396,7 +1396,7 @@ func (pds *PrometheusMetricsQuerier) QueryJobLabels(start, end time.Time) *sourc
 
 
 func (pds *PrometheusMetricsQuerier) QueryPodsWithReplicaSetOwner(start, end time.Time) *source.Future[source.PodsWithReplicaSetOwnerResult] {
 func (pds *PrometheusMetricsQuerier) QueryPodsWithReplicaSetOwner(start, end time.Time) *source.Future[source.PodsWithReplicaSetOwnerResult] {
 	const queryName = "QueryPodsWithReplicaSetOwner"
 	const queryName = "QueryPodsWithReplicaSetOwner"
-	const queryFmtPodsWithReplicaSetOwner = `sum(avg_over_time(kube_pod_owner{owner_kind="ReplicaSet", %s}[%s])) by (pod, owner_name, namespace ,%s)`
+	const queryFmtPodsWithReplicaSetOwner = `sum(avg_over_time(kube_pod_owner{owner_kind="ReplicaSet", %s}[%s])) by (pod, owner_name, namespace, uid, %s)`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 
 
@@ -1414,7 +1414,7 @@ func (pds *PrometheusMetricsQuerier) QueryPodsWithReplicaSetOwner(start, end tim
 
 
 func (pds *PrometheusMetricsQuerier) QueryReplicaSetsWithoutOwners(start, end time.Time) *source.Future[source.ReplicaSetsWithoutOwnersResult] {
 func (pds *PrometheusMetricsQuerier) QueryReplicaSetsWithoutOwners(start, end time.Time) *source.Future[source.ReplicaSetsWithoutOwnersResult] {
 	const queryName = "QueryReplicaSetsWithoutOwners"
 	const queryName = "QueryReplicaSetsWithoutOwners"
-	const queryFmtReplicaSetsWithoutOwners = `avg(avg_over_time(kube_replicaset_owner{owner_kind="<none>", owner_name="<none>", %s}[%s])) by (replicaset, namespace, %s)`
+	const queryFmtReplicaSetsWithoutOwners = `avg(avg_over_time(kube_replicaset_owner{owner_kind="<none>", owner_name="<none>", %s}[%s])) by (replicaset, namespace, uid, %s)`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 
 
@@ -1432,7 +1432,7 @@ func (pds *PrometheusMetricsQuerier) QueryReplicaSetsWithoutOwners(start, end ti
 
 
 func (pds *PrometheusMetricsQuerier) QueryReplicaSetsWithRollout(start, end time.Time) *source.Future[source.ReplicaSetsWithRolloutResult] {
 func (pds *PrometheusMetricsQuerier) QueryReplicaSetsWithRollout(start, end time.Time) *source.Future[source.ReplicaSetsWithRolloutResult] {
 	const queryName = "QueryReplicaSetsWithRollout"
 	const queryName = "QueryReplicaSetsWithRollout"
-	const queryFmtReplicaSetsWithRolloutOwner = `avg(avg_over_time(kube_replicaset_owner{owner_kind="Rollout", %s}[%s])) by (replicaset, namespace, owner_kind, owner_name, %s)`
+	const queryFmtReplicaSetsWithRolloutOwner = `avg(avg_over_time(kube_replicaset_owner{owner_kind="Rollout", %s}[%s])) by (replicaset, namespace, owner_kind, owner_name, uid, %s)`
 
 
 	cfg := pds.promConfig
 	cfg := pds.promConfig
 
 

+ 65 - 30
pkg/costmodel/metrics.go

@@ -148,7 +148,7 @@ func initCostModelMetrics(clusterInfo clusters.ClusterInfoProvider, metricsConfi
 		cpuGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
 		cpuGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
 			Name: "node_cpu_hourly_cost",
 			Name: "node_cpu_hourly_cost",
 			Help: "node_cpu_hourly_cost hourly cost for each cpu on this node",
 			Help: "node_cpu_hourly_cost hourly cost for each cpu on this node",
-		}, []string{"instance", "node", "instance_type", "region", "provider_id", "arch"})
+		}, []string{"instance", "node", "instance_type", "region", "provider_id", "arch", "uid"})
 		if _, disabled := disabledMetrics["node_cpu_hourly_cost"]; !disabled {
 		if _, disabled := disabledMetrics["node_cpu_hourly_cost"]; !disabled {
 			toRegisterGV = append(toRegisterGV, cpuGv)
 			toRegisterGV = append(toRegisterGV, cpuGv)
 		}
 		}
@@ -156,7 +156,7 @@ func initCostModelMetrics(clusterInfo clusters.ClusterInfoProvider, metricsConfi
 		ramGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
 		ramGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
 			Name: "node_ram_hourly_cost",
 			Name: "node_ram_hourly_cost",
 			Help: "node_ram_hourly_cost hourly cost for each gb of ram on this node",
 			Help: "node_ram_hourly_cost hourly cost for each gb of ram on this node",
-		}, []string{"instance", "node", "instance_type", "region", "provider_id", "arch"})
+		}, []string{"instance", "node", "instance_type", "region", "provider_id", "arch", "uid"})
 		if _, disabled := disabledMetrics["node_ram_hourly_cost"]; !disabled {
 		if _, disabled := disabledMetrics["node_ram_hourly_cost"]; !disabled {
 			toRegisterGV = append(toRegisterGV, ramGv)
 			toRegisterGV = append(toRegisterGV, ramGv)
 		}
 		}
@@ -164,7 +164,7 @@ func initCostModelMetrics(clusterInfo clusters.ClusterInfoProvider, metricsConfi
 		gpuGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
 		gpuGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
 			Name: "node_gpu_hourly_cost",
 			Name: "node_gpu_hourly_cost",
 			Help: "node_gpu_hourly_cost hourly cost for each gpu on this node",
 			Help: "node_gpu_hourly_cost hourly cost for each gpu on this node",
-		}, []string{"instance", "node", "instance_type", "region", "provider_id", "arch"})
+		}, []string{"instance", "node", "instance_type", "region", "provider_id", "arch", "uid"})
 		if _, disabled := disabledMetrics["node_gpu_hourly_cost"]; !disabled {
 		if _, disabled := disabledMetrics["node_gpu_hourly_cost"]; !disabled {
 			toRegisterGV = append(toRegisterGV, gpuGv)
 			toRegisterGV = append(toRegisterGV, gpuGv)
 		}
 		}
@@ -172,7 +172,7 @@ func initCostModelMetrics(clusterInfo clusters.ClusterInfoProvider, metricsConfi
 		gpuCountGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
 		gpuCountGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
 			Name: "node_gpu_count",
 			Name: "node_gpu_count",
 			Help: "node_gpu_count count of gpu on this node",
 			Help: "node_gpu_count count of gpu on this node",
-		}, []string{"instance", "node", "instance_type", "region", "provider_id", "arch"})
+		}, []string{"instance", "node", "instance_type", "region", "provider_id", "arch", "uid"})
 		if _, disabled := disabledMetrics["node_gpu_count"]; !disabled {
 		if _, disabled := disabledMetrics["node_gpu_count"]; !disabled {
 			toRegisterGV = append(toRegisterGV, gpuCountGv)
 			toRegisterGV = append(toRegisterGV, gpuCountGv)
 		}
 		}
@@ -180,7 +180,7 @@ func initCostModelMetrics(clusterInfo clusters.ClusterInfoProvider, metricsConfi
 		pvGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
 		pvGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
 			Name: "pv_hourly_cost",
 			Name: "pv_hourly_cost",
 			Help: "pv_hourly_cost Cost per GB per hour on a persistent disk",
 			Help: "pv_hourly_cost Cost per GB per hour on a persistent disk",
-		}, []string{"volumename", "persistentvolume", "provider_id"})
+		}, []string{"volumename", "persistentvolume", "provider_id", "uid"})
 		if _, disabled := disabledMetrics["pv_hourly_cost"]; !disabled {
 		if _, disabled := disabledMetrics["pv_hourly_cost"]; !disabled {
 			toRegisterGV = append(toRegisterGV, pvGv)
 			toRegisterGV = append(toRegisterGV, pvGv)
 		}
 		}
@@ -188,7 +188,7 @@ func initCostModelMetrics(clusterInfo clusters.ClusterInfoProvider, metricsConfi
 		spotGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
 		spotGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
 			Name: "kubecost_node_is_spot",
 			Name: "kubecost_node_is_spot",
 			Help: "kubecost_node_is_spot Cloud provider info about node preemptibility",
 			Help: "kubecost_node_is_spot Cloud provider info about node preemptibility",
-		}, []string{"instance", "node", "instance_type", "region", "provider_id", "arch"})
+		}, []string{"instance", "node", "instance_type", "region", "provider_id", "arch", "uid"})
 		if _, disabled := disabledMetrics["kubecost_node_is_spot"]; !disabled {
 		if _, disabled := disabledMetrics["kubecost_node_is_spot"]; !disabled {
 			toRegisterGV = append(toRegisterGV, spotGv)
 			toRegisterGV = append(toRegisterGV, spotGv)
 		}
 		}
@@ -196,7 +196,7 @@ func initCostModelMetrics(clusterInfo clusters.ClusterInfoProvider, metricsConfi
 		totalGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
 		totalGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
 			Name: "node_total_hourly_cost",
 			Name: "node_total_hourly_cost",
 			Help: "node_total_hourly_cost Total node cost per hour",
 			Help: "node_total_hourly_cost Total node cost per hour",
-		}, []string{"instance", "node", "instance_type", "region", "provider_id", "arch"})
+		}, []string{"instance", "node", "instance_type", "region", "provider_id", "arch", "uid"})
 		if _, disabled := disabledMetrics["node_total_hourly_cost"]; !disabled {
 		if _, disabled := disabledMetrics["node_total_hourly_cost"]; !disabled {
 			toRegisterGV = append(toRegisterGV, totalGv)
 			toRegisterGV = append(toRegisterGV, totalGv)
 		}
 		}
@@ -204,7 +204,7 @@ func initCostModelMetrics(clusterInfo clusters.ClusterInfoProvider, metricsConfi
 		ramAllocGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
 		ramAllocGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
 			Name: "container_memory_allocation_bytes",
 			Name: "container_memory_allocation_bytes",
 			Help: "container_memory_allocation_bytes Bytes of RAM used",
 			Help: "container_memory_allocation_bytes Bytes of RAM used",
-		}, []string{"namespace", "pod", "container", "instance", "node"})
+		}, []string{"namespace", "pod", "container", "instance", "node", "uid"})
 		if _, disabled := disabledMetrics["container_memory_allocation_bytes"]; !disabled {
 		if _, disabled := disabledMetrics["container_memory_allocation_bytes"]; !disabled {
 			toRegisterGV = append(toRegisterGV, ramAllocGv)
 			toRegisterGV = append(toRegisterGV, ramAllocGv)
 		}
 		}
@@ -212,7 +212,7 @@ func initCostModelMetrics(clusterInfo clusters.ClusterInfoProvider, metricsConfi
 		cpuAllocGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
 		cpuAllocGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
 			Name: "container_cpu_allocation",
 			Name: "container_cpu_allocation",
 			Help: "container_cpu_allocation Percent of a single CPU used in a minute",
 			Help: "container_cpu_allocation Percent of a single CPU used in a minute",
-		}, []string{"namespace", "pod", "container", "instance", "node"})
+		}, []string{"namespace", "pod", "container", "instance", "node", "uid"})
 		if _, disabled := disabledMetrics["container_cpu_allocation"]; !disabled {
 		if _, disabled := disabledMetrics["container_cpu_allocation"]; !disabled {
 			toRegisterGV = append(toRegisterGV, cpuAllocGv)
 			toRegisterGV = append(toRegisterGV, cpuAllocGv)
 		}
 		}
@@ -220,7 +220,7 @@ func initCostModelMetrics(clusterInfo clusters.ClusterInfoProvider, metricsConfi
 		gpuAllocGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
 		gpuAllocGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
 			Name: "container_gpu_allocation",
 			Name: "container_gpu_allocation",
 			Help: "container_gpu_allocation GPU used",
 			Help: "container_gpu_allocation GPU used",
-		}, []string{"namespace", "pod", "container", "instance", "node"})
+		}, []string{"namespace", "pod", "container", "instance", "node", "uid"})
 		if _, disabled := disabledMetrics["container_gpu_allocation"]; !disabled {
 		if _, disabled := disabledMetrics["container_gpu_allocation"]; !disabled {
 			toRegisterGV = append(toRegisterGV, gpuAllocGv)
 			toRegisterGV = append(toRegisterGV, gpuAllocGv)
 		}
 		}
@@ -228,7 +228,7 @@ func initCostModelMetrics(clusterInfo clusters.ClusterInfoProvider, metricsConfi
 		pvAllocGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
 		pvAllocGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
 			Name: "pod_pvc_allocation",
 			Name: "pod_pvc_allocation",
 			Help: "pod_pvc_allocation Bytes used by a PVC attached to a pod",
 			Help: "pod_pvc_allocation Bytes used by a PVC attached to a pod",
-		}, []string{"namespace", "pod", "persistentvolumeclaim", "persistentvolume"})
+		}, []string{"namespace", "pod", "persistentvolumeclaim", "persistentvolume", "uid"})
 		if _, disabled := disabledMetrics["pod_pvc_allocation"]; !disabled {
 		if _, disabled := disabledMetrics["pod_pvc_allocation"]; !disabled {
 			toRegisterGV = append(toRegisterGV, pvAllocGv)
 			toRegisterGV = append(toRegisterGV, pvAllocGv)
 		}
 		}
@@ -268,7 +268,7 @@ func initCostModelMetrics(clusterInfo clusters.ClusterInfoProvider, metricsConfi
 		lbCostGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{ // no differentiation between ELB and ALB right now
 		lbCostGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{ // no differentiation between ELB and ALB right now
 			Name: "kubecost_load_balancer_cost",
 			Name: "kubecost_load_balancer_cost",
 			Help: "kubecost_load_balancer_cost Hourly cost of load balancer",
 			Help: "kubecost_load_balancer_cost Hourly cost of load balancer",
-		}, []string{"ingress_ip", "namespace", "service_name"}) // assumes one ingress IP per load balancer
+		}, []string{"ingress_ip", "namespace", "service_name", "uid"}) // assumes one ingress IP per load balancer
 		if _, disabled := disabledMetrics["kubecost_load_balancer_cost"]; !disabled {
 		if _, disabled := disabledMetrics["kubecost_load_balancer_cost"]; !disabled {
 			toRegisterGV = append(toRegisterGV, lbCostGv)
 			toRegisterGV = append(toRegisterGV, lbCostGv)
 		}
 		}
@@ -430,8 +430,32 @@ func (cmme *CostModelMetricsEmitter) Start() bool {
 			log.Debugf("Recording prices...")
 			log.Debugf("Recording prices...")
 			podlist := cmme.KubeClusterCache.GetAllPods()
 			podlist := cmme.KubeClusterCache.GetAllPods()
 			podStatus := make(map[string]v1.PodPhase)
 			podStatus := make(map[string]v1.PodPhase)
+			podUIDs := make(map[string]string)
 			for _, pod := range podlist {
 			for _, pod := range podlist {
 				podStatus[pod.Name] = pod.Status.Phase
 				podStatus[pod.Name] = pod.Status.Phase
+				podUIDs[pod.Name] = string(pod.UID)
+			}
+
+			// Create node UID lookup map
+			nodeList := cmme.KubeClusterCache.GetAllNodes()
+			nodeUIDs := make(map[string]string)
+			for _, node := range nodeList {
+				nodeUIDs[node.Name] = string(node.UID)
+			}
+
+			// Create PV UID lookup map
+			pvList := cmme.KubeClusterCache.GetAllPersistentVolumes()
+			pvUIDs := make(map[string]string)
+			for _, pv := range pvList {
+				pvUIDs[pv.Name] = string(pv.UID)
+			}
+
+			// Create service UID lookup map
+			serviceList := cmme.KubeClusterCache.GetAllServices()
+			serviceUIDs := make(map[string]string)
+			for _, service := range serviceList {
+				serviceKey := service.Namespace + "/" + service.Name
+				serviceUIDs[serviceKey] = string(service.UID)
 			}
 			}
 
 
 			cfg, _ := cmme.CloudProvider.GetConfig()
 			cfg, _ := cmme.CloudProvider.GetConfig()
@@ -476,6 +500,9 @@ func (cmme *CostModelMetricsEmitter) Start() bool {
 				log.Warnf("Error getting Node cost: %s", err)
 				log.Warnf("Error getting Node cost: %s", err)
 			}
 			}
 			for nodeName, node := range nodes {
 			for nodeName, node := range nodes {
+				// Get node UID first
+				nodeUID := nodeUIDs[nodeName]
+
 				// Emit costs, guarding against NaN inputs for custom pricing.
 				// Emit costs, guarding against NaN inputs for custom pricing.
 				cpuCost, _ := strconv.ParseFloat(node.VCPUCost, 64)
 				cpuCost, _ := strconv.ParseFloat(node.VCPUCost, 64)
 				if math.IsNaN(cpuCost) || math.IsInf(cpuCost, 0) {
 				if math.IsNaN(cpuCost) || math.IsInf(cpuCost, 0) {
@@ -515,7 +542,7 @@ func (cmme *CostModelMetricsEmitter) Start() bool {
 
 
 				totalCost := cpu*cpuCost + ramCost*(ram/1024/1024/1024) + gpu*gpuCost
 				totalCost := cpu*cpuCost + ramCost*(ram/1024/1024/1024) + gpu*gpuCost
 
 
-				labelKey := getKeyFromLabelStrings(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID, node.ArchType)
+				labelKey := getKeyFromLabelStrings(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID, node.ArchType, nodeUID)
 
 
 				avgCosts, ok := nodeCostAverages[labelKey]
 				avgCosts, ok := nodeCostAverages[labelKey]
 
 
@@ -530,8 +557,8 @@ func (cmme *CostModelMetricsEmitter) Start() bool {
 					nodeCostAverages[labelKey] = avgCosts
 					nodeCostAverages[labelKey] = avgCosts
 				}
 				}
 
 
-				cmme.GPUCountRecorder.WithLabelValues(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID, node.ArchType).Set(gpu)
-				cmme.GPUPriceRecorder.WithLabelValues(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID, node.ArchType).Set(gpuCost)
+				cmme.GPUCountRecorder.WithLabelValues(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID, node.ArchType, nodeUID).Set(gpu)
+				cmme.GPUPriceRecorder.WithLabelValues(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID, node.ArchType, nodeUID).Set(gpuCost)
 
 
 				const outlierFactor float64 = 30
 				const outlierFactor float64 = 30
 				// don't record cpuCost, ramCost, or gpuCost in the case of wild outliers
 				// don't record cpuCost, ramCost, or gpuCost in the case of wild outliers
@@ -539,7 +566,7 @@ func (cmme *CostModelMetricsEmitter) Start() bool {
 				// https://github.com/opencost/opencost/issues/927
 				// https://github.com/opencost/opencost/issues/927
 				cpuOutlierCutoff := outlierFactor * avgCosts.CpuCostAverage
 				cpuOutlierCutoff := outlierFactor * avgCosts.CpuCostAverage
 				if cpuCost < cpuOutlierCutoff {
 				if cpuCost < cpuOutlierCutoff {
-					cmme.CPUPriceRecorder.WithLabelValues(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID, node.ArchType).Set(cpuCost)
+					cmme.CPUPriceRecorder.WithLabelValues(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID, node.ArchType, nodeUID).Set(cpuCost)
 					avgCosts.CpuCostAverage = (avgCosts.CpuCostAverage*avgCosts.NumCpuDataPoints + cpuCost) / (avgCosts.NumCpuDataPoints + 1)
 					avgCosts.CpuCostAverage = (avgCosts.CpuCostAverage*avgCosts.NumCpuDataPoints + cpuCost) / (avgCosts.NumCpuDataPoints + 1)
 					avgCosts.NumCpuDataPoints += 1
 					avgCosts.NumCpuDataPoints += 1
 				} else {
 				} else {
@@ -547,7 +574,7 @@ func (cmme *CostModelMetricsEmitter) Start() bool {
 				}
 				}
 				ramOutlierCutoff := outlierFactor * avgCosts.RamCostAverage
 				ramOutlierCutoff := outlierFactor * avgCosts.RamCostAverage
 				if ramCost < ramOutlierCutoff {
 				if ramCost < ramOutlierCutoff {
-					cmme.RAMPriceRecorder.WithLabelValues(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID, node.ArchType).Set(ramCost)
+					cmme.RAMPriceRecorder.WithLabelValues(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID, node.ArchType, nodeUID).Set(ramCost)
 					avgCosts.RamCostAverage = (avgCosts.RamCostAverage*avgCosts.NumRamDataPoints + ramCost) / (avgCosts.NumRamDataPoints + 1)
 					avgCosts.RamCostAverage = (avgCosts.RamCostAverage*avgCosts.NumRamDataPoints + ramCost) / (avgCosts.NumRamDataPoints + 1)
 					avgCosts.NumRamDataPoints += 1
 					avgCosts.NumRamDataPoints += 1
 				} else {
 				} else {
@@ -555,7 +582,7 @@ func (cmme *CostModelMetricsEmitter) Start() bool {
 				}
 				}
 				// skip redording totalCost if any constituent costs were outliers
 				// skip redording totalCost if any constituent costs were outliers
 				if cpuCost < cpuOutlierCutoff && ramCost < ramOutlierCutoff {
 				if cpuCost < cpuOutlierCutoff && ramCost < ramOutlierCutoff {
-					cmme.NodeTotalPriceRecorder.WithLabelValues(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID, node.ArchType).Set(totalCost)
+					cmme.NodeTotalPriceRecorder.WithLabelValues(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID, node.ArchType, nodeUID).Set(totalCost)
 				} else {
 				} else {
 					log.Debugf("CPU and RAM outlier detected, not recording node %s total cost %f", nodeName, totalCost)
 					log.Debugf("CPU and RAM outlier detected, not recording node %s total cost %f", nodeName, totalCost)
 				}
 				}
@@ -563,9 +590,9 @@ func (cmme *CostModelMetricsEmitter) Start() bool {
 				nodeCostAverages[labelKey] = avgCosts
 				nodeCostAverages[labelKey] = avgCosts
 
 
 				if node.IsSpot() {
 				if node.IsSpot() {
-					cmme.NodeSpotRecorder.WithLabelValues(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID, node.ArchType).Set(1.0)
+					cmme.NodeSpotRecorder.WithLabelValues(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID, node.ArchType, nodeUID).Set(1.0)
 				} else {
 				} else {
-					cmme.NodeSpotRecorder.WithLabelValues(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID, node.ArchType).Set(0.0)
+					cmme.NodeSpotRecorder.WithLabelValues(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID, node.ArchType, nodeUID).Set(0.0)
 				}
 				}
 				nodeSeen[labelKey] = true
 				nodeSeen[labelKey] = true
 			}
 			}
@@ -583,9 +610,11 @@ func (cmme *CostModelMetricsEmitter) Start() bool {
 				if len(lb.IngressIPAddresses) > 0 {
 				if len(lb.IngressIPAddresses) > 0 {
 					ingressIP = lb.IngressIPAddresses[0] // assumes one ingress IP per load balancer
 					ingressIP = lb.IngressIPAddresses[0] // assumes one ingress IP per load balancer
 				}
 				}
-				cmme.LBCostRecorder.WithLabelValues(ingressIP, namespace, serviceName).Set(lb.Cost)
+				serviceKey := namespace + "/" + serviceName
+				serviceUID := serviceUIDs[serviceKey]
+				cmme.LBCostRecorder.WithLabelValues(ingressIP, namespace, serviceName, serviceUID).Set(lb.Cost)
 
 
-				labelKey := getKeyFromLabelStrings(ingressIP, namespace, serviceName)
+				labelKey := getKeyFromLabelStrings(ingressIP, namespace, serviceName, serviceUID)
 				loadBalancerSeen[labelKey] = true
 				loadBalancerSeen[labelKey] = true
 			}
 			}
 
 
@@ -603,18 +632,21 @@ func (cmme *CostModelMetricsEmitter) Start() bool {
 							if timesClaimed == 0 {
 							if timesClaimed == 0 {
 								timesClaimed = 1 // unallocated PVs are unclaimed but have a full allocation
 								timesClaimed = 1 // unallocated PVs are unclaimed but have a full allocation
 							}
 							}
-							cmme.PVAllocationRecorder.WithLabelValues(namespace, podName, pvc.Claim, pvc.VolumeName).Set(pvc.Values[0].Value / float64(timesClaimed))
-							labelKey := getKeyFromLabelStrings(namespace, podName, pvc.Claim, pvc.VolumeName)
+							podUID := podUIDs[podName]
+							cmme.PVAllocationRecorder.WithLabelValues(namespace, podName, pvc.Claim, pvc.VolumeName, podUID).Set(pvc.Values[0].Value / float64(timesClaimed))
+							labelKey := getKeyFromLabelStrings(namespace, podName, pvc.Claim, pvc.VolumeName, podUID)
 							pvcSeen[labelKey] = true
 							pvcSeen[labelKey] = true
 						}
 						}
 					}
 					}
 				}
 				}
 
 
 				if len(costs.RAMAllocation) > 0 {
 				if len(costs.RAMAllocation) > 0 {
-					cmme.RAMAllocationRecorder.WithLabelValues(namespace, podName, containerName, nodeName, nodeName).Set(costs.RAMAllocation[0].Value)
+					podUID := podUIDs[podName]
+					cmme.RAMAllocationRecorder.WithLabelValues(namespace, podName, containerName, nodeName, nodeName, podUID).Set(costs.RAMAllocation[0].Value)
 				}
 				}
 				if len(costs.CPUAllocation) > 0 {
 				if len(costs.CPUAllocation) > 0 {
-					cmme.CPUAllocationRecorder.WithLabelValues(namespace, podName, containerName, nodeName, nodeName).Set(costs.CPUAllocation[0].Value)
+					podUID := podUIDs[podName]
+					cmme.CPUAllocationRecorder.WithLabelValues(namespace, podName, containerName, nodeName, nodeName, podUID).Set(costs.CPUAllocation[0].Value)
 				}
 				}
 				if len(costs.GPUReq) > 0 {
 				if len(costs.GPUReq) > 0 {
 					// allocation here is set to the request because shared GPU usage not yet supported.
 					// allocation here is set to the request because shared GPU usage not yet supported.
@@ -635,9 +667,11 @@ func (cmme *CostModelMetricsEmitter) Start() bool {
 						gpualloc = gpualloc * (gpu / vgpu)
 						gpualloc = gpualloc * (gpu / vgpu)
 					}
 					}
 
 
-					cmme.GPUAllocationRecorder.WithLabelValues(namespace, podName, containerName, nodeName, nodeName).Set(gpualloc)
+					podUID := podUIDs[podName]
+					cmme.GPUAllocationRecorder.WithLabelValues(namespace, podName, containerName, nodeName, nodeName, podUID).Set(gpualloc)
 				}
 				}
-				labelKey := getKeyFromLabelStrings(namespace, podName, containerName, nodeName, nodeName)
+				podUID := podUIDs[podName]
+				labelKey := getKeyFromLabelStrings(namespace, podName, containerName, nodeName, nodeName, podUID)
 				if podStatus[podName] == v1.PodRunning { // Only report data for current pods
 				if podStatus[podName] == v1.PodRunning { // Only report data for current pods
 					containerSeen[labelKey] = true
 					containerSeen[labelKey] = true
 				} else {
 				} else {
@@ -681,8 +715,9 @@ func (cmme *CostModelMetricsEmitter) Start() bool {
 
 
 				cmme.Model.GetPVCost(cacPv, pv, region)
 				cmme.Model.GetPVCost(cacPv, pv, region)
 				c, _ := strconv.ParseFloat(cacPv.Cost, 64)
 				c, _ := strconv.ParseFloat(cacPv.Cost, 64)
-				cmme.PersistentVolumePriceRecorder.WithLabelValues(pv.Name, pv.Name, cacPv.ProviderID).Set(c)
-				labelKey := getKeyFromLabelStrings(pv.Name, pv.Name, cacPv.ProviderID)
+				pvUID := pvUIDs[pv.Name]
+				cmme.PersistentVolumePriceRecorder.WithLabelValues(pv.Name, pv.Name, cacPv.ProviderID, pvUID).Set(c)
+				labelKey := getKeyFromLabelStrings(pv.Name, pv.Name, cacPv.ProviderID, pvUID)
 				pvSeen[labelKey] = true
 				pvSeen[labelKey] = true
 			}
 			}
 
 

+ 1 - 1
pkg/metrics/podlabelmetrics.go

@@ -100,7 +100,7 @@ func (kpmc KubePodLabelsCollector) Collect(ch chan<- prometheus.Metric) {
 		// Owner References
 		// Owner References
 		if _, disabled := disabledMetrics["kube_pod_owner"]; !disabled {
 		if _, disabled := disabledMetrics["kube_pod_owner"]; !disabled {
 			for _, owner := range pod.OwnerReferences {
 			for _, owner := range pod.OwnerReferences {
-				ch <- newKubePodOwnerMetric("kube_pod_owner", podNS, podName, owner.Name, owner.Kind, owner.Controller != nil)
+				ch <- newKubePodOwnerMetric("kube_pod_owner", podNS, podName, podUID, owner.Name, owner.Kind, owner.Controller != nil)
 			}
 			}
 		}
 		}
 	}
 	}

+ 9 - 2
pkg/metrics/podmetrics.go

@@ -143,7 +143,7 @@ func (kpmc KubePodCollector) Collect(ch chan<- prometheus.Metric) {
 		// Owner References
 		// Owner References
 		if _, disabled := disabledMetrics["kube_pod_owner"]; !disabled {
 		if _, disabled := disabledMetrics["kube_pod_owner"]; !disabled {
 			for _, owner := range pod.OwnerReferences {
 			for _, owner := range pod.OwnerReferences {
-				ch <- newKubePodOwnerMetric("kube_pod_owner", podNS, podName, owner.Name, owner.Kind, owner.Controller != nil)
+				ch <- newKubePodOwnerMetric("kube_pod_owner", podNS, podName, podUID, owner.Name, owner.Kind, owner.Controller != nil)
 			}
 			}
 		}
 		}
 
 
@@ -1017,18 +1017,20 @@ type KubePodOwnerMetric struct {
 	help              string
 	help              string
 	namespace         string
 	namespace         string
 	pod               string
 	pod               string
+	uid               string
 	ownerIsController bool
 	ownerIsController bool
 	ownerName         string
 	ownerName         string
 	ownerKind         string
 	ownerKind         string
 }
 }
 
 
 // Creates a new KubePodOwnerMetric, implementation of prometheus.Metric
 // Creates a new KubePodOwnerMetric, implementation of prometheus.Metric
-func newKubePodOwnerMetric(fqname, namespace, pod, ownerName, ownerKind string, ownerIsController bool) KubePodOwnerMetric {
+func newKubePodOwnerMetric(fqname, namespace, pod, uid, ownerName, ownerKind string, ownerIsController bool) KubePodOwnerMetric {
 	return KubePodOwnerMetric{
 	return KubePodOwnerMetric{
 		fqName:            fqname,
 		fqName:            fqname,
 		help:              "kube_pod_owner Information about the Pod's owner",
 		help:              "kube_pod_owner Information about the Pod's owner",
 		namespace:         namespace,
 		namespace:         namespace,
 		pod:               pod,
 		pod:               pod,
+		uid:               uid,
 		ownerName:         ownerName,
 		ownerName:         ownerName,
 		ownerKind:         ownerKind,
 		ownerKind:         ownerKind,
 		ownerIsController: ownerIsController,
 		ownerIsController: ownerIsController,
@@ -1041,6 +1043,7 @@ func (kpo KubePodOwnerMetric) Desc() *prometheus.Desc {
 	l := prometheus.Labels{
 	l := prometheus.Labels{
 		"namespace":           kpo.namespace,
 		"namespace":           kpo.namespace,
 		"pod":                 kpo.pod,
 		"pod":                 kpo.pod,
+		"uid":                 kpo.uid,
 		"owner_name":          kpo.ownerName,
 		"owner_name":          kpo.ownerName,
 		"owner_kind":          kpo.ownerKind,
 		"owner_kind":          kpo.ownerKind,
 		"owner_is_controller": fmt.Sprintf("%t", kpo.ownerIsController),
 		"owner_is_controller": fmt.Sprintf("%t", kpo.ownerIsController),
@@ -1065,6 +1068,10 @@ func (kpo KubePodOwnerMetric) Write(m *dto.Metric) error {
 			Name:  toStringPtr("pod"),
 			Name:  toStringPtr("pod"),
 			Value: &kpo.pod,
 			Value: &kpo.pod,
 		},
 		},
+		{
+			Name:  toStringPtr("uid"),
+			Value: &kpo.uid,
+		},
 		{
 		{
 			Name:  toStringPtr("owner_name"),
 			Name:  toStringPtr("owner_name"),
 			Value: &kpo.ownerName,
 			Value: &kpo.ownerName,

+ 2 - 1
pkg/metrics/podmetrics_test.go

@@ -751,7 +751,7 @@ func TestKubePodContainerResourceLimitsMemoryBytesMetric(t *testing.T) {
 }
 }
 
 
 func TestKubePodOwnerMetric(t *testing.T) {
 func TestKubePodOwnerMetric(t *testing.T) {
-	metric := newKubePodOwnerMetric("kube_pod_owner", "default", "test-pod", "test-replicaset", "ReplicaSet", true)
+	metric := newKubePodOwnerMetric("kube_pod_owner", "default", "test-pod", "test-uid", "test-replicaset", "ReplicaSet", true)
 
 
 	var dtoMetric dto.Metric
 	var dtoMetric dto.Metric
 	err := metric.Write(&dtoMetric)
 	err := metric.Write(&dtoMetric)
@@ -771,6 +771,7 @@ func TestKubePodOwnerMetric(t *testing.T) {
 	expectedLabels := map[string]string{
 	expectedLabels := map[string]string{
 		"namespace":           "default",
 		"namespace":           "default",
 		"pod":                 "test-pod",
 		"pod":                 "test-pod",
+		"uid":                 "test-uid",
 		"owner_name":          "test-replicaset",
 		"owner_name":          "test-replicaset",
 		"owner_kind":          "ReplicaSet",
 		"owner_kind":          "ReplicaSet",
 		"owner_is_controller": "true",
 		"owner_is_controller": "true",

Деякі файли не було показано, через те що забагато файлів було змінено