gpu.pb.go 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366
  1. // Code generated by protoc-gen-go. DO NOT EDIT.
  2. // versions:
  3. // protoc-gen-go v1.36.9
  4. // protoc v6.32.1
  5. // source: kubemodel/gpu.proto
  6. package kubemodel
  7. import (
  8. protoreflect "google.golang.org/protobuf/reflect/protoreflect"
  9. protoimpl "google.golang.org/protobuf/runtime/protoimpl"
  10. reflect "reflect"
  11. sync "sync"
  12. unsafe "unsafe"
  13. )
  14. const (
  15. // Verify that this generated code is sufficiently up-to-date.
  16. _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
  17. // Verify that runtime/protoimpl is sufficiently up-to-date.
  18. _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
  19. )
  20. // GPUDevice represents a GPU device with DCGM integration (provisioned resource)
  21. // This tracks available GPU capacity on a node
  22. type GPUDevice struct {
  23. state protoimpl.MessageState `protogen:"open.v1"`
  24. // Identification
  25. ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` // GPU UUID (hardware identifier)
  26. NodeID string `protobuf:"bytes,2,opt,name=nodeID,proto3" json:"nodeID,omitempty"` // Node hosting this GPU device
  27. // Properties
  28. DeviceNumber int32 `protobuf:"varint,3,opt,name=deviceNumber,proto3" json:"deviceNumber,omitempty"`
  29. ModelName string `protobuf:"bytes,4,opt,name=modelName,proto3" json:"modelName,omitempty"`
  30. // GPU sharing information
  31. IsShared bool `protobuf:"varint,6,opt,name=isShared,proto3" json:"isShared,omitempty"`
  32. SharePercentage float32 `protobuf:"fixed32,9,opt,name=sharePercentage,proto3" json:"sharePercentage,omitempty"`
  33. // Capacity metrics
  34. // GPU hours available
  35. GpuHours float32 `protobuf:"fixed32,10,opt,name=gpuHours,proto3" json:"gpuHours,omitempty"`
  36. // GPU request average percentage (0-100)
  37. GpuRequestAverage float32 `protobuf:"fixed32,11,opt,name=gpuRequestAverage,proto3" json:"gpuRequestAverage,omitempty"`
  38. // GPU usage average percentage (0-100)
  39. GpuUsageAverage float32 `protobuf:"fixed32,12,opt,name=gpuUsageAverage,proto3" json:"gpuUsageAverage,omitempty"`
  40. // GPU usage max percentage (0-100)
  41. GpuUsageMax float32 `protobuf:"fixed32,13,opt,name=gpuUsageMax,proto3" json:"gpuUsageMax,omitempty"`
  42. // GPU memory capacity in bytes
  43. MemoryBytes int64 `protobuf:"varint,14,opt,name=memoryBytes,proto3" json:"memoryBytes,omitempty"`
  44. // Diagnostic information about this resource
  45. Diagnostic *DiagnosticResult `protobuf:"bytes,99,opt,name=diagnostic,proto3,oneof" json:"diagnostic,omitempty"`
  46. unknownFields protoimpl.UnknownFields
  47. sizeCache protoimpl.SizeCache
  48. }
  49. func (x *GPUDevice) Reset() {
  50. *x = GPUDevice{}
  51. mi := &file_kubemodel_gpu_proto_msgTypes[0]
  52. ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
  53. ms.StoreMessageInfo(mi)
  54. }
  55. func (x *GPUDevice) String() string {
  56. return protoimpl.X.MessageStringOf(x)
  57. }
  58. func (*GPUDevice) ProtoMessage() {}
  59. func (x *GPUDevice) ProtoReflect() protoreflect.Message {
  60. mi := &file_kubemodel_gpu_proto_msgTypes[0]
  61. if x != nil {
  62. ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
  63. if ms.LoadMessageInfo() == nil {
  64. ms.StoreMessageInfo(mi)
  65. }
  66. return ms
  67. }
  68. return mi.MessageOf(x)
  69. }
  70. // Deprecated: Use GPUDevice.ProtoReflect.Descriptor instead.
  71. func (*GPUDevice) Descriptor() ([]byte, []int) {
  72. return file_kubemodel_gpu_proto_rawDescGZIP(), []int{0}
  73. }
  74. func (x *GPUDevice) GetID() string {
  75. if x != nil {
  76. return x.ID
  77. }
  78. return ""
  79. }
  80. func (x *GPUDevice) GetNodeID() string {
  81. if x != nil {
  82. return x.NodeID
  83. }
  84. return ""
  85. }
  86. func (x *GPUDevice) GetDeviceNumber() int32 {
  87. if x != nil {
  88. return x.DeviceNumber
  89. }
  90. return 0
  91. }
  92. func (x *GPUDevice) GetModelName() string {
  93. if x != nil {
  94. return x.ModelName
  95. }
  96. return ""
  97. }
  98. func (x *GPUDevice) GetIsShared() bool {
  99. if x != nil {
  100. return x.IsShared
  101. }
  102. return false
  103. }
  104. func (x *GPUDevice) GetSharePercentage() float32 {
  105. if x != nil {
  106. return x.SharePercentage
  107. }
  108. return 0
  109. }
  110. func (x *GPUDevice) GetGpuHours() float32 {
  111. if x != nil {
  112. return x.GpuHours
  113. }
  114. return 0
  115. }
  116. func (x *GPUDevice) GetGpuRequestAverage() float32 {
  117. if x != nil {
  118. return x.GpuRequestAverage
  119. }
  120. return 0
  121. }
  122. func (x *GPUDevice) GetGpuUsageAverage() float32 {
  123. if x != nil {
  124. return x.GpuUsageAverage
  125. }
  126. return 0
  127. }
  128. func (x *GPUDevice) GetGpuUsageMax() float32 {
  129. if x != nil {
  130. return x.GpuUsageMax
  131. }
  132. return 0
  133. }
  134. func (x *GPUDevice) GetMemoryBytes() int64 {
  135. if x != nil {
  136. return x.MemoryBytes
  137. }
  138. return 0
  139. }
  140. func (x *GPUDevice) GetDiagnostic() *DiagnosticResult {
  141. if x != nil {
  142. return x.Diagnostic
  143. }
  144. return nil
  145. }
  146. // GPUUsage represents GPU resources consumed by a container (allocated resource)
  147. // This tracks actual GPU usage by containers for cost analysis
  148. type GPUUsage struct {
  149. state protoimpl.MessageState `protogen:"open.v1"`
  150. // Identification
  151. ContainerID string `protobuf:"bytes,1,opt,name=containerID,proto3" json:"containerID,omitempty"` // Container consuming GPU resources
  152. GpuDeviceID string `protobuf:"bytes,2,opt,name=gpuDeviceID,proto3" json:"gpuDeviceID,omitempty"` // Reference to the GPU device being used
  153. // Usage metrics
  154. // GPU usage in device-hours consumed
  155. GpuHours float32 `protobuf:"fixed32,3,opt,name=gpuHours,proto3" json:"gpuHours,omitempty"`
  156. // GPU request in percentage (0-100)
  157. GpuRequestPercentage float32 `protobuf:"fixed32,4,opt,name=gpuRequestPercentage,proto3" json:"gpuRequestPercentage,omitempty"`
  158. // GPU usage average percentage (0-100)
  159. GpuUsageAverage float32 `protobuf:"fixed32,5,opt,name=gpuUsageAverage,proto3" json:"gpuUsageAverage,omitempty"`
  160. // GPU usage max percentage (0-100)
  161. GpuUsageMax float32 `protobuf:"fixed32,6,opt,name=gpuUsageMax,proto3" json:"gpuUsageMax,omitempty"`
  162. // GPU memory usage in bytes
  163. MemoryBytesUsed int64 `protobuf:"varint,7,opt,name=memoryBytesUsed,proto3" json:"memoryBytesUsed,omitempty"`
  164. // Diagnostic information about this resource
  165. Diagnostic *DiagnosticResult `protobuf:"bytes,99,opt,name=diagnostic,proto3,oneof" json:"diagnostic,omitempty"`
  166. unknownFields protoimpl.UnknownFields
  167. sizeCache protoimpl.SizeCache
  168. }
  169. func (x *GPUUsage) Reset() {
  170. *x = GPUUsage{}
  171. mi := &file_kubemodel_gpu_proto_msgTypes[1]
  172. ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
  173. ms.StoreMessageInfo(mi)
  174. }
  175. func (x *GPUUsage) String() string {
  176. return protoimpl.X.MessageStringOf(x)
  177. }
  178. func (*GPUUsage) ProtoMessage() {}
  179. func (x *GPUUsage) ProtoReflect() protoreflect.Message {
  180. mi := &file_kubemodel_gpu_proto_msgTypes[1]
  181. if x != nil {
  182. ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
  183. if ms.LoadMessageInfo() == nil {
  184. ms.StoreMessageInfo(mi)
  185. }
  186. return ms
  187. }
  188. return mi.MessageOf(x)
  189. }
  190. // Deprecated: Use GPUUsage.ProtoReflect.Descriptor instead.
  191. func (*GPUUsage) Descriptor() ([]byte, []int) {
  192. return file_kubemodel_gpu_proto_rawDescGZIP(), []int{1}
  193. }
  194. func (x *GPUUsage) GetContainerID() string {
  195. if x != nil {
  196. return x.ContainerID
  197. }
  198. return ""
  199. }
  200. func (x *GPUUsage) GetGpuDeviceID() string {
  201. if x != nil {
  202. return x.GpuDeviceID
  203. }
  204. return ""
  205. }
  206. func (x *GPUUsage) GetGpuHours() float32 {
  207. if x != nil {
  208. return x.GpuHours
  209. }
  210. return 0
  211. }
  212. func (x *GPUUsage) GetGpuRequestPercentage() float32 {
  213. if x != nil {
  214. return x.GpuRequestPercentage
  215. }
  216. return 0
  217. }
  218. func (x *GPUUsage) GetGpuUsageAverage() float32 {
  219. if x != nil {
  220. return x.GpuUsageAverage
  221. }
  222. return 0
  223. }
  224. func (x *GPUUsage) GetGpuUsageMax() float32 {
  225. if x != nil {
  226. return x.GpuUsageMax
  227. }
  228. return 0
  229. }
  230. func (x *GPUUsage) GetMemoryBytesUsed() int64 {
  231. if x != nil {
  232. return x.MemoryBytesUsed
  233. }
  234. return 0
  235. }
  236. func (x *GPUUsage) GetDiagnostic() *DiagnosticResult {
  237. if x != nil {
  238. return x.Diagnostic
  239. }
  240. return nil
  241. }
  242. var File_kubemodel_gpu_proto protoreflect.FileDescriptor
  243. const file_kubemodel_gpu_proto_rawDesc = "" +
  244. "\n" +
  245. "\x13kubemodel/gpu.proto\x12\tkubemodel\x1a\x1akubemodel/diagnostic.proto\"\xc4\x03\n" +
  246. "\tGPUDevice\x12\x0e\n" +
  247. "\x02ID\x18\x01 \x01(\tR\x02ID\x12\x16\n" +
  248. "\x06nodeID\x18\x02 \x01(\tR\x06nodeID\x12\"\n" +
  249. "\fdeviceNumber\x18\x03 \x01(\x05R\fdeviceNumber\x12\x1c\n" +
  250. "\tmodelName\x18\x04 \x01(\tR\tmodelName\x12\x1a\n" +
  251. "\bisShared\x18\x06 \x01(\bR\bisShared\x12(\n" +
  252. "\x0fsharePercentage\x18\t \x01(\x02R\x0fsharePercentage\x12\x1a\n" +
  253. "\bgpuHours\x18\n" +
  254. " \x01(\x02R\bgpuHours\x12,\n" +
  255. "\x11gpuRequestAverage\x18\v \x01(\x02R\x11gpuRequestAverage\x12(\n" +
  256. "\x0fgpuUsageAverage\x18\f \x01(\x02R\x0fgpuUsageAverage\x12 \n" +
  257. "\vgpuUsageMax\x18\r \x01(\x02R\vgpuUsageMax\x12 \n" +
  258. "\vmemoryBytes\x18\x0e \x01(\x03R\vmemoryBytes\x12@\n" +
  259. "\n" +
  260. "diagnostic\x18c \x01(\v2\x1b.kubemodel.DiagnosticResultH\x00R\n" +
  261. "diagnostic\x88\x01\x01B\r\n" +
  262. "\v_diagnostic\"\xe5\x02\n" +
  263. "\bGPUUsage\x12 \n" +
  264. "\vcontainerID\x18\x01 \x01(\tR\vcontainerID\x12 \n" +
  265. "\vgpuDeviceID\x18\x02 \x01(\tR\vgpuDeviceID\x12\x1a\n" +
  266. "\bgpuHours\x18\x03 \x01(\x02R\bgpuHours\x122\n" +
  267. "\x14gpuRequestPercentage\x18\x04 \x01(\x02R\x14gpuRequestPercentage\x12(\n" +
  268. "\x0fgpuUsageAverage\x18\x05 \x01(\x02R\x0fgpuUsageAverage\x12 \n" +
  269. "\vgpuUsageMax\x18\x06 \x01(\x02R\vgpuUsageMax\x12(\n" +
  270. "\x0fmemoryBytesUsed\x18\a \x01(\x03R\x0fmemoryBytesUsed\x12@\n" +
  271. "\n" +
  272. "diagnostic\x18c \x01(\v2\x1b.kubemodel.DiagnosticResultH\x00R\n" +
  273. "diagnostic\x88\x01\x01B\r\n" +
  274. "\v_diagnosticB:Z8github.com/opencost/opencost/core/pkg/model/pb/kubemodelb\x06proto3"
  275. var (
  276. file_kubemodel_gpu_proto_rawDescOnce sync.Once
  277. file_kubemodel_gpu_proto_rawDescData []byte
  278. )
  279. func file_kubemodel_gpu_proto_rawDescGZIP() []byte {
  280. file_kubemodel_gpu_proto_rawDescOnce.Do(func() {
  281. file_kubemodel_gpu_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_kubemodel_gpu_proto_rawDesc), len(file_kubemodel_gpu_proto_rawDesc)))
  282. })
  283. return file_kubemodel_gpu_proto_rawDescData
  284. }
  285. var file_kubemodel_gpu_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
  286. var file_kubemodel_gpu_proto_goTypes = []any{
  287. (*GPUDevice)(nil), // 0: kubemodel.GPUDevice
  288. (*GPUUsage)(nil), // 1: kubemodel.GPUUsage
  289. (*DiagnosticResult)(nil), // 2: kubemodel.DiagnosticResult
  290. }
  291. var file_kubemodel_gpu_proto_depIdxs = []int32{
  292. 2, // 0: kubemodel.GPUDevice.diagnostic:type_name -> kubemodel.DiagnosticResult
  293. 2, // 1: kubemodel.GPUUsage.diagnostic:type_name -> kubemodel.DiagnosticResult
  294. 2, // [2:2] is the sub-list for method output_type
  295. 2, // [2:2] is the sub-list for method input_type
  296. 2, // [2:2] is the sub-list for extension type_name
  297. 2, // [2:2] is the sub-list for extension extendee
  298. 0, // [0:2] is the sub-list for field type_name
  299. }
  300. func init() { file_kubemodel_gpu_proto_init() }
  301. func file_kubemodel_gpu_proto_init() {
  302. if File_kubemodel_gpu_proto != nil {
  303. return
  304. }
  305. file_kubemodel_diagnostic_proto_init()
  306. file_kubemodel_gpu_proto_msgTypes[0].OneofWrappers = []any{}
  307. file_kubemodel_gpu_proto_msgTypes[1].OneofWrappers = []any{}
  308. type x struct{}
  309. out := protoimpl.TypeBuilder{
  310. File: protoimpl.DescBuilder{
  311. GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
  312. RawDescriptor: unsafe.Slice(unsafe.StringData(file_kubemodel_gpu_proto_rawDesc), len(file_kubemodel_gpu_proto_rawDesc)),
  313. NumEnums: 0,
  314. NumMessages: 2,
  315. NumExtensions: 0,
  316. NumServices: 0,
  317. },
  318. GoTypes: file_kubemodel_gpu_proto_goTypes,
  319. DependencyIndexes: file_kubemodel_gpu_proto_depIdxs,
  320. MessageInfos: file_kubemodel_gpu_proto_msgTypes,
  321. }.Build()
  322. File_kubemodel_gpu_proto = out.File
  323. file_kubemodel_gpu_proto_goTypes = nil
  324. file_kubemodel_gpu_proto_depIdxs = nil
  325. }