// Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.36.9 // protoc v6.32.1 // source: kubemodel/gpu.proto package kubemodel import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" unsafe "unsafe" ) const ( // Verify that this generated code is sufficiently up-to-date. _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) // Verify that runtime/protoimpl is sufficiently up-to-date. _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) // GPUDevice represents a GPU device with DCGM integration (provisioned resource) // This tracks available GPU capacity on a node type GPUDevice struct { state protoimpl.MessageState `protogen:"open.v1"` // Identification ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` // GPU UUID (hardware identifier) NodeID string `protobuf:"bytes,2,opt,name=nodeID,proto3" json:"nodeID,omitempty"` // Node hosting this GPU device // Properties DeviceNumber int32 `protobuf:"varint,3,opt,name=deviceNumber,proto3" json:"deviceNumber,omitempty"` ModelName string `protobuf:"bytes,4,opt,name=modelName,proto3" json:"modelName,omitempty"` // GPU sharing information IsShared bool `protobuf:"varint,6,opt,name=isShared,proto3" json:"isShared,omitempty"` SharePercentage float32 `protobuf:"fixed32,9,opt,name=sharePercentage,proto3" json:"sharePercentage,omitempty"` // Capacity metrics // GPU hours available GpuHours float32 `protobuf:"fixed32,10,opt,name=gpuHours,proto3" json:"gpuHours,omitempty"` // GPU request average percentage (0-100) GpuRequestAverage float32 `protobuf:"fixed32,11,opt,name=gpuRequestAverage,proto3" json:"gpuRequestAverage,omitempty"` // GPU usage average percentage (0-100) GpuUsageAverage float32 `protobuf:"fixed32,12,opt,name=gpuUsageAverage,proto3" json:"gpuUsageAverage,omitempty"` // GPU usage max percentage (0-100) GpuUsageMax float32 `protobuf:"fixed32,13,opt,name=gpuUsageMax,proto3" json:"gpuUsageMax,omitempty"` // GPU memory capacity in bytes MemoryBytes int64 `protobuf:"varint,14,opt,name=memoryBytes,proto3" json:"memoryBytes,omitempty"` // Diagnostic information about this resource Diagnostic *DiagnosticResult `protobuf:"bytes,99,opt,name=diagnostic,proto3,oneof" json:"diagnostic,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } func (x *GPUDevice) Reset() { *x = GPUDevice{} mi := &file_kubemodel_gpu_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } func (x *GPUDevice) String() string { return protoimpl.X.MessageStringOf(x) } func (*GPUDevice) ProtoMessage() {} func (x *GPUDevice) ProtoReflect() protoreflect.Message { mi := &file_kubemodel_gpu_proto_msgTypes[0] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use GPUDevice.ProtoReflect.Descriptor instead. func (*GPUDevice) Descriptor() ([]byte, []int) { return file_kubemodel_gpu_proto_rawDescGZIP(), []int{0} } func (x *GPUDevice) GetID() string { if x != nil { return x.ID } return "" } func (x *GPUDevice) GetNodeID() string { if x != nil { return x.NodeID } return "" } func (x *GPUDevice) GetDeviceNumber() int32 { if x != nil { return x.DeviceNumber } return 0 } func (x *GPUDevice) GetModelName() string { if x != nil { return x.ModelName } return "" } func (x *GPUDevice) GetIsShared() bool { if x != nil { return x.IsShared } return false } func (x *GPUDevice) GetSharePercentage() float32 { if x != nil { return x.SharePercentage } return 0 } func (x *GPUDevice) GetGpuHours() float32 { if x != nil { return x.GpuHours } return 0 } func (x *GPUDevice) GetGpuRequestAverage() float32 { if x != nil { return x.GpuRequestAverage } return 0 } func (x *GPUDevice) GetGpuUsageAverage() float32 { if x != nil { return x.GpuUsageAverage } return 0 } func (x *GPUDevice) GetGpuUsageMax() float32 { if x != nil { return x.GpuUsageMax } return 0 } func (x *GPUDevice) GetMemoryBytes() int64 { if x != nil { return x.MemoryBytes } return 0 } func (x *GPUDevice) GetDiagnostic() *DiagnosticResult { if x != nil { return x.Diagnostic } return nil } // GPUUsage represents GPU resources consumed by a container (allocated resource) // This tracks actual GPU usage by containers for cost analysis type GPUUsage struct { state protoimpl.MessageState `protogen:"open.v1"` // Identification ContainerID string `protobuf:"bytes,1,opt,name=containerID,proto3" json:"containerID,omitempty"` // Container consuming GPU resources GpuDeviceID string `protobuf:"bytes,2,opt,name=gpuDeviceID,proto3" json:"gpuDeviceID,omitempty"` // Reference to the GPU device being used // Usage metrics // GPU usage in device-hours consumed GpuHours float32 `protobuf:"fixed32,3,opt,name=gpuHours,proto3" json:"gpuHours,omitempty"` // GPU request in percentage (0-100) GpuRequestPercentage float32 `protobuf:"fixed32,4,opt,name=gpuRequestPercentage,proto3" json:"gpuRequestPercentage,omitempty"` // GPU usage average percentage (0-100) GpuUsageAverage float32 `protobuf:"fixed32,5,opt,name=gpuUsageAverage,proto3" json:"gpuUsageAverage,omitempty"` // GPU usage max percentage (0-100) GpuUsageMax float32 `protobuf:"fixed32,6,opt,name=gpuUsageMax,proto3" json:"gpuUsageMax,omitempty"` // GPU memory usage in bytes MemoryBytesUsed int64 `protobuf:"varint,7,opt,name=memoryBytesUsed,proto3" json:"memoryBytesUsed,omitempty"` // Diagnostic information about this resource Diagnostic *DiagnosticResult `protobuf:"bytes,99,opt,name=diagnostic,proto3,oneof" json:"diagnostic,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } func (x *GPUUsage) Reset() { *x = GPUUsage{} mi := &file_kubemodel_gpu_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } func (x *GPUUsage) String() string { return protoimpl.X.MessageStringOf(x) } func (*GPUUsage) ProtoMessage() {} func (x *GPUUsage) ProtoReflect() protoreflect.Message { mi := &file_kubemodel_gpu_proto_msgTypes[1] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use GPUUsage.ProtoReflect.Descriptor instead. func (*GPUUsage) Descriptor() ([]byte, []int) { return file_kubemodel_gpu_proto_rawDescGZIP(), []int{1} } func (x *GPUUsage) GetContainerID() string { if x != nil { return x.ContainerID } return "" } func (x *GPUUsage) GetGpuDeviceID() string { if x != nil { return x.GpuDeviceID } return "" } func (x *GPUUsage) GetGpuHours() float32 { if x != nil { return x.GpuHours } return 0 } func (x *GPUUsage) GetGpuRequestPercentage() float32 { if x != nil { return x.GpuRequestPercentage } return 0 } func (x *GPUUsage) GetGpuUsageAverage() float32 { if x != nil { return x.GpuUsageAverage } return 0 } func (x *GPUUsage) GetGpuUsageMax() float32 { if x != nil { return x.GpuUsageMax } return 0 } func (x *GPUUsage) GetMemoryBytesUsed() int64 { if x != nil { return x.MemoryBytesUsed } return 0 } func (x *GPUUsage) GetDiagnostic() *DiagnosticResult { if x != nil { return x.Diagnostic } return nil } var File_kubemodel_gpu_proto protoreflect.FileDescriptor const file_kubemodel_gpu_proto_rawDesc = "" + "\n" + "\x13kubemodel/gpu.proto\x12\tkubemodel\x1a\x1akubemodel/diagnostic.proto\"\xc4\x03\n" + "\tGPUDevice\x12\x0e\n" + "\x02ID\x18\x01 \x01(\tR\x02ID\x12\x16\n" + "\x06nodeID\x18\x02 \x01(\tR\x06nodeID\x12\"\n" + "\fdeviceNumber\x18\x03 \x01(\x05R\fdeviceNumber\x12\x1c\n" + "\tmodelName\x18\x04 \x01(\tR\tmodelName\x12\x1a\n" + "\bisShared\x18\x06 \x01(\bR\bisShared\x12(\n" + "\x0fsharePercentage\x18\t \x01(\x02R\x0fsharePercentage\x12\x1a\n" + "\bgpuHours\x18\n" + " \x01(\x02R\bgpuHours\x12,\n" + "\x11gpuRequestAverage\x18\v \x01(\x02R\x11gpuRequestAverage\x12(\n" + "\x0fgpuUsageAverage\x18\f \x01(\x02R\x0fgpuUsageAverage\x12 \n" + "\vgpuUsageMax\x18\r \x01(\x02R\vgpuUsageMax\x12 \n" + "\vmemoryBytes\x18\x0e \x01(\x03R\vmemoryBytes\x12@\n" + "\n" + "diagnostic\x18c \x01(\v2\x1b.kubemodel.DiagnosticResultH\x00R\n" + "diagnostic\x88\x01\x01B\r\n" + "\v_diagnostic\"\xe5\x02\n" + "\bGPUUsage\x12 \n" + "\vcontainerID\x18\x01 \x01(\tR\vcontainerID\x12 \n" + "\vgpuDeviceID\x18\x02 \x01(\tR\vgpuDeviceID\x12\x1a\n" + "\bgpuHours\x18\x03 \x01(\x02R\bgpuHours\x122\n" + "\x14gpuRequestPercentage\x18\x04 \x01(\x02R\x14gpuRequestPercentage\x12(\n" + "\x0fgpuUsageAverage\x18\x05 \x01(\x02R\x0fgpuUsageAverage\x12 \n" + "\vgpuUsageMax\x18\x06 \x01(\x02R\vgpuUsageMax\x12(\n" + "\x0fmemoryBytesUsed\x18\a \x01(\x03R\x0fmemoryBytesUsed\x12@\n" + "\n" + "diagnostic\x18c \x01(\v2\x1b.kubemodel.DiagnosticResultH\x00R\n" + "diagnostic\x88\x01\x01B\r\n" + "\v_diagnosticB:Z8github.com/opencost/opencost/core/pkg/model/pb/kubemodelb\x06proto3" var ( file_kubemodel_gpu_proto_rawDescOnce sync.Once file_kubemodel_gpu_proto_rawDescData []byte ) func file_kubemodel_gpu_proto_rawDescGZIP() []byte { file_kubemodel_gpu_proto_rawDescOnce.Do(func() { file_kubemodel_gpu_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_kubemodel_gpu_proto_rawDesc), len(file_kubemodel_gpu_proto_rawDesc))) }) return file_kubemodel_gpu_proto_rawDescData } var file_kubemodel_gpu_proto_msgTypes = make([]protoimpl.MessageInfo, 2) var file_kubemodel_gpu_proto_goTypes = []any{ (*GPUDevice)(nil), // 0: kubemodel.GPUDevice (*GPUUsage)(nil), // 1: kubemodel.GPUUsage (*DiagnosticResult)(nil), // 2: kubemodel.DiagnosticResult } var file_kubemodel_gpu_proto_depIdxs = []int32{ 2, // 0: kubemodel.GPUDevice.diagnostic:type_name -> kubemodel.DiagnosticResult 2, // 1: kubemodel.GPUUsage.diagnostic:type_name -> kubemodel.DiagnosticResult 2, // [2:2] is the sub-list for method output_type 2, // [2:2] is the sub-list for method input_type 2, // [2:2] is the sub-list for extension type_name 2, // [2:2] is the sub-list for extension extendee 0, // [0:2] is the sub-list for field type_name } func init() { file_kubemodel_gpu_proto_init() } func file_kubemodel_gpu_proto_init() { if File_kubemodel_gpu_proto != nil { return } file_kubemodel_diagnostic_proto_init() file_kubemodel_gpu_proto_msgTypes[0].OneofWrappers = []any{} file_kubemodel_gpu_proto_msgTypes[1].OneofWrappers = []any{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_kubemodel_gpu_proto_rawDesc), len(file_kubemodel_gpu_proto_rawDesc)), NumEnums: 0, NumMessages: 2, NumExtensions: 0, NumServices: 0, }, GoTypes: file_kubemodel_gpu_proto_goTypes, DependencyIndexes: file_kubemodel_gpu_proto_depIdxs, MessageInfos: file_kubemodel_gpu_proto_msgTypes, }.Build() File_kubemodel_gpu_proto = out.File file_kubemodel_gpu_proto_goTypes = nil file_kubemodel_gpu_proto_depIdxs = nil }