2
0

watchcontroller.go 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217
  1. package clustercache
  2. import (
  3. "fmt"
  4. "reflect"
  5. "time"
  6. "github.com/opencost/opencost/core/pkg/log"
  7. "k8s.io/apimachinery/pkg/api/meta"
  8. "k8s.io/apimachinery/pkg/fields"
  9. rt "k8s.io/apimachinery/pkg/runtime"
  10. "k8s.io/apimachinery/pkg/util/runtime"
  11. "k8s.io/apimachinery/pkg/util/wait"
  12. "k8s.io/client-go/rest"
  13. "k8s.io/client-go/tools/cache"
  14. "k8s.io/client-go/util/workqueue"
  15. )
  16. // Type alias for a receiver func
  17. type WatchHandler = func(interface{})
  18. // WatchController defines a contract for an object which watches a specific resource set for
  19. // add, updates, and removals
  20. type WatchController interface {
  21. // Initializes the cache
  22. WarmUp(chan struct{})
  23. // Run starts the watching process
  24. Run(int, chan struct{})
  25. // GetAll returns all of the resources
  26. GetAll() []interface{}
  27. // SetUpdateHandler sets a specific handler for adding/updating individual resources
  28. SetUpdateHandler(WatchHandler) WatchController
  29. // SetRemovedHandler sets a specific handler for removing individual resources
  30. SetRemovedHandler(WatchHandler) WatchController
  31. }
  32. // CachingWatchController composites the watching behavior and a cache to ensure that all
  33. // up to date resources are readily available
  34. type CachingWatchController struct {
  35. indexer cache.Indexer
  36. queue workqueue.RateLimitingInterface
  37. informer cache.Controller
  38. resource string
  39. resourceType string
  40. updateHandler WatchHandler
  41. removeHandler WatchHandler
  42. }
  43. func NewCachingWatcher(restClient rest.Interface, resource string, resourceType rt.Object, namespace string, fieldSelector fields.Selector) WatchController {
  44. resourceCache := cache.NewListWatchFromClient(restClient, resource, namespace, fieldSelector)
  45. queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())
  46. indexer, informer := cache.NewTransformingIndexerInformer(resourceCache, resourceType, 0, cache.ResourceEventHandlerFuncs{
  47. AddFunc: func(obj interface{}) {
  48. key, err := cache.MetaNamespaceKeyFunc(obj)
  49. if err == nil {
  50. queue.Add(key)
  51. }
  52. },
  53. UpdateFunc: func(old interface{}, new interface{}) {
  54. key, err := cache.MetaNamespaceKeyFunc(new)
  55. if err == nil {
  56. queue.Add(key)
  57. }
  58. },
  59. DeleteFunc: func(obj interface{}) {
  60. // IndexerInformer uses a delta queue, therefore for deletes we have to use this
  61. // key function.
  62. key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
  63. if err == nil {
  64. queue.Add(key)
  65. }
  66. },
  67. }, cache.Indexers{}, trimUnwantedFields)
  68. return &CachingWatchController{
  69. indexer: indexer,
  70. queue: queue,
  71. informer: informer,
  72. resource: resource,
  73. resourceType: reflect.TypeOf(resourceType).String(),
  74. }
  75. }
  76. func (c *CachingWatchController) GetAll() []interface{} {
  77. list := c.indexer.List()
  78. // since the indexer returns the as-is pointer to the resource,
  79. // we deep copy the resources such that callers don't corrupt the
  80. // index
  81. cloneList := make([]interface{}, 0, len(list))
  82. for _, v := range list {
  83. if deepCopyable, ok := v.(rt.Object); ok {
  84. cloneList = append(cloneList, deepCopyable.DeepCopyObject())
  85. }
  86. }
  87. return cloneList
  88. }
  89. func (c *CachingWatchController) SetUpdateHandler(handler WatchHandler) WatchController {
  90. c.updateHandler = handler
  91. return c
  92. }
  93. func (c *CachingWatchController) SetRemovedHandler(handler WatchHandler) WatchController {
  94. c.removeHandler = handler
  95. return c
  96. }
  97. func (c *CachingWatchController) processNextItem() bool {
  98. // Wait until there is a new item in the working queue
  99. key, quit := c.queue.Get()
  100. if quit {
  101. return false
  102. }
  103. // Tell the queue that we are done with processing this key. This unblocks the key for other workers
  104. // This allows safe parallel processing because two pods with the same key are never processed in
  105. // parallel.
  106. defer c.queue.Done(key)
  107. // Invoke the method containing the business logic
  108. err := c.handle(key.(string))
  109. // Handle the error if something went wrong during the execution of the business logic
  110. c.handleErr(err, key)
  111. return true
  112. }
  113. // handle is the business logic of the controller.
  114. func (c *CachingWatchController) handle(key string) error {
  115. obj, exists, err := c.indexer.GetByKey(key)
  116. if err != nil {
  117. log.Errorf("Fetching %s with key %s from store failed with %v", c.resourceType, key, err)
  118. return err
  119. }
  120. if !exists {
  121. if c.removeHandler != nil {
  122. c.removeHandler(key)
  123. }
  124. } else {
  125. if c.updateHandler != nil {
  126. c.updateHandler(obj)
  127. }
  128. }
  129. return nil
  130. }
  131. // handleErr checks if an error happened and makes sure we will retry later.
  132. func (c *CachingWatchController) handleErr(err error, key interface{}) {
  133. if err == nil {
  134. // Forget about the #AddRateLimited history of the key on every successful synchronization.
  135. // This ensures that future processing of updates for this key is not delayed because of
  136. // an outdated error history.
  137. c.queue.Forget(key)
  138. return
  139. }
  140. // This controller retries 5 times if something goes wrong. After that, it stops trying.
  141. if c.queue.NumRequeues(key) < 5 {
  142. log.Errorf("Error syncing %s %v: %v", c.resourceType, key, err)
  143. // Re-enqueue the key rate limited. Based on the rate limiter on the
  144. // queue and the re-enqueue history, the key will be processed later again.
  145. c.queue.AddRateLimited(key)
  146. return
  147. }
  148. c.queue.Forget(key)
  149. // Report to an external entity that, even after several retries, we could not successfully process this key
  150. runtime.HandleError(err)
  151. log.Infof("Dropping %s %q out of the queue: %v", c.resourceType, key, err)
  152. }
  153. func (c *CachingWatchController) WarmUp(cancelCh chan struct{}) {
  154. go c.informer.Run(cancelCh)
  155. // Wait for all involved caches to be synced, before processing items from the queue is started
  156. if !cache.WaitForCacheSync(cancelCh, c.informer.HasSynced) {
  157. runtime.HandleError(fmt.Errorf("Timed out waiting for caches to sync"))
  158. return
  159. }
  160. }
  161. func (c *CachingWatchController) Run(threadiness int, stopCh chan struct{}) {
  162. defer runtime.HandleCrash()
  163. // Let the workers stop when we are done
  164. defer c.queue.ShutDown()
  165. log.Infof("Starting %s controller", c.resourceType)
  166. for i := 0; i < threadiness; i++ {
  167. go wait.Until(c.runWorker, time.Second, stopCh)
  168. }
  169. <-stopCh
  170. log.Infof("Stopping %s controller", c.resourceType)
  171. }
  172. func (c *CachingWatchController) runWorker() {
  173. for c.processNextItem() {
  174. }
  175. }
  176. // trimUnwantedFields removes unwanted fields from the object
  177. // - managedFields as this metadata can be quite large
  178. func trimUnwantedFields(obj interface{}) (interface{}, error) {
  179. if accessor, err := meta.Accessor(obj); err == nil {
  180. accessor.SetManagedFields(nil)
  181. }
  182. return obj, nil
  183. }