| 12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847 |
- package costmodel
- import (
- "context"
- "encoding/base64"
- "fmt"
- "io"
- "net/http"
- "os"
- "path"
- "reflect"
- "regexp"
- "strconv"
- "strings"
- "sync"
- "time"
- "github.com/microcosm-cc/bluemonday"
- "github.com/opencost/opencost/core/pkg/opencost"
- "github.com/opencost/opencost/core/pkg/util/httputil"
- "github.com/opencost/opencost/core/pkg/util/timeutil"
- "github.com/opencost/opencost/core/pkg/util/watcher"
- "github.com/opencost/opencost/core/pkg/version"
- "github.com/opencost/opencost/pkg/cloud/aws"
- cloudconfig "github.com/opencost/opencost/pkg/cloud/config"
- "github.com/opencost/opencost/pkg/cloud/gcp"
- "github.com/opencost/opencost/pkg/cloud/provider"
- "github.com/opencost/opencost/pkg/cloudcost"
- "github.com/opencost/opencost/pkg/config"
- clustermap "github.com/opencost/opencost/pkg/costmodel/clusters"
- "github.com/opencost/opencost/pkg/customcost"
- "github.com/opencost/opencost/pkg/kubeconfig"
- "github.com/opencost/opencost/pkg/metrics"
- "github.com/opencost/opencost/pkg/services"
- "github.com/spf13/viper"
- v1 "k8s.io/api/core/v1"
- "github.com/julienschmidt/httprouter"
- "github.com/getsentry/sentry-go"
- "github.com/opencost/opencost/core/pkg/clusters"
- sysenv "github.com/opencost/opencost/core/pkg/env"
- "github.com/opencost/opencost/core/pkg/log"
- "github.com/opencost/opencost/core/pkg/util/json"
- "github.com/opencost/opencost/pkg/cloud/azure"
- "github.com/opencost/opencost/pkg/cloud/models"
- "github.com/opencost/opencost/pkg/cloud/utils"
- "github.com/opencost/opencost/pkg/clustercache"
- "github.com/opencost/opencost/pkg/env"
- "github.com/opencost/opencost/pkg/errors"
- "github.com/opencost/opencost/pkg/prom"
- "github.com/opencost/opencost/pkg/thanos"
- prometheus "github.com/prometheus/client_golang/api"
- prometheusAPI "github.com/prometheus/client_golang/api/prometheus/v1"
- appsv1 "k8s.io/api/apps/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "github.com/patrickmn/go-cache"
- "k8s.io/client-go/kubernetes"
- )
- var sanitizePolicy = bluemonday.UGCPolicy()
- const (
- RFC3339Milli = "2006-01-02T15:04:05.000Z"
- maxCacheMinutes1d = 11
- maxCacheMinutes2d = 17
- maxCacheMinutes7d = 37
- maxCacheMinutes30d = 137
- CustomPricingSetting = "CustomPricing"
- DiscountSetting = "Discount"
- epRules = apiPrefix + "/rules"
- LogSeparator = "+-------------------------------------------------------------------------------------"
- )
- var (
- // gitCommit is set by the build system
- gitCommit string
- // ANSIRegex matches ANSI escape and colors https://en.wikipedia.org/wiki/ANSI_escape_code
- ANSIRegex = regexp.MustCompile("\x1b\\[[0-9;]*m")
- )
- // Accesses defines a singleton application instance, providing access to
- // Prometheus, Kubernetes, the cloud provider, and caches.
- type Accesses struct {
- PrometheusClient prometheus.Client
- ThanosClient prometheus.Client
- KubeClientSet kubernetes.Interface
- ClusterCache clustercache.ClusterCache
- ClusterMap clusters.ClusterMap
- CloudProvider models.Provider
- ConfigFileManager *config.ConfigFileManager
- ClusterInfoProvider clusters.ClusterInfoProvider
- Model *CostModel
- MetricsEmitter *CostModelMetricsEmitter
- OutOfClusterCache *cache.Cache
- AggregateCache *cache.Cache
- CostDataCache *cache.Cache
- ClusterCostsCache *cache.Cache
- CacheExpiration map[time.Duration]time.Duration
- AggAPI Aggregator
- // SettingsCache stores current state of app settings
- SettingsCache *cache.Cache
- // settingsSubscribers tracks channels through which changes to different
- // settings will be published in a pub/sub model
- settingsSubscribers map[string][]chan string
- settingsMutex sync.Mutex
- // registered http service instances
- httpServices services.HTTPServices
- }
- // GetPrometheusClient decides whether the default Prometheus client or the Thanos client
- // should be used.
- func (a *Accesses) GetPrometheusClient(remote bool) prometheus.Client {
- // Use Thanos Client if it exists (enabled) and remote flag set
- var pc prometheus.Client
- if remote && a.ThanosClient != nil {
- pc = a.ThanosClient
- } else {
- pc = a.PrometheusClient
- }
- return pc
- }
- // GetCacheExpiration looks up and returns custom cache expiration for the given duration.
- // If one does not exists, it returns the default cache expiration, which is defined by
- // the particular cache.
- func (a *Accesses) GetCacheExpiration(dur time.Duration) time.Duration {
- if expiration, ok := a.CacheExpiration[dur]; ok {
- return expiration
- }
- return cache.DefaultExpiration
- }
- // GetCacheRefresh determines how long to wait before refreshing the cache for the given duration,
- // which is done 1 minute before we expect the cache to expire, or 1 minute if expiration is
- // not found or is less than 2 minutes.
- func (a *Accesses) GetCacheRefresh(dur time.Duration) time.Duration {
- expiry := a.GetCacheExpiration(dur).Minutes()
- if expiry <= 2.0 {
- return time.Minute
- }
- mins := time.Duration(expiry/2.0) * time.Minute
- return mins
- }
- func (a *Accesses) ClusterCostsFromCacheHandler(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
- w.Header().Set("Content-Type", "application/json")
- duration := 24 * time.Hour
- offset := time.Minute
- durationHrs := "24h"
- fmtOffset := "1m"
- pClient := a.GetPrometheusClient(true)
- key := fmt.Sprintf("%s:%s", durationHrs, fmtOffset)
- if data, valid := a.ClusterCostsCache.Get(key); valid {
- clusterCosts := data.(map[string]*ClusterCosts)
- w.Write(WrapDataWithMessage(clusterCosts, nil, "clusterCosts cache hit"))
- } else {
- data, err := a.ComputeClusterCosts(pClient, a.CloudProvider, duration, offset, true)
- w.Write(WrapDataWithMessage(data, err, fmt.Sprintf("clusterCosts cache miss: %s", key)))
- }
- }
- type Response struct {
- Code int `json:"code"`
- Status string `json:"status"`
- Data interface{} `json:"data"`
- Message string `json:"message,omitempty"`
- Warning string `json:"warning,omitempty"`
- }
- // FilterFunc is a filter that returns true iff the given CostData should be filtered out, and the environment that was used as the filter criteria, if it was an aggregate
- type FilterFunc func(*CostData) (bool, string)
- // FilterCostData allows through only CostData that matches all the given filter functions
- func FilterCostData(data map[string]*CostData, retains []FilterFunc, filters []FilterFunc) (map[string]*CostData, int, map[string]int) {
- result := make(map[string]*CostData)
- filteredEnvironments := make(map[string]int)
- filteredContainers := 0
- DataLoop:
- for key, datum := range data {
- for _, rf := range retains {
- if ok, _ := rf(datum); ok {
- result[key] = datum
- // if any retain function passes, the data is retained and move on
- continue DataLoop
- }
- }
- for _, ff := range filters {
- if ok, environment := ff(datum); !ok {
- if environment != "" {
- filteredEnvironments[environment]++
- }
- filteredContainers++
- // if any filter function check fails, move on to the next datum
- continue DataLoop
- }
- }
- result[key] = datum
- }
- return result, filteredContainers, filteredEnvironments
- }
- func filterFields(fields string, data map[string]*CostData) map[string]CostData {
- fs := strings.Split(fields, ",")
- fmap := make(map[string]bool)
- for _, f := range fs {
- fieldNameLower := strings.ToLower(f) // convert to go struct name by uppercasing first letter
- log.Debugf("to delete: %s", fieldNameLower)
- fmap[fieldNameLower] = true
- }
- filteredData := make(map[string]CostData)
- for cname, costdata := range data {
- s := reflect.TypeOf(*costdata)
- val := reflect.ValueOf(*costdata)
- costdata2 := CostData{}
- cd2 := reflect.New(reflect.Indirect(reflect.ValueOf(costdata2)).Type()).Elem()
- n := s.NumField()
- for i := 0; i < n; i++ {
- field := s.Field(i)
- value := val.Field(i)
- value2 := cd2.Field(i)
- if _, ok := fmap[strings.ToLower(field.Name)]; !ok {
- value2.Set(reflect.Value(value))
- }
- }
- filteredData[cname] = cd2.Interface().(CostData)
- }
- return filteredData
- }
- func normalizeTimeParam(param string) (string, error) {
- if param == "" {
- return "", fmt.Errorf("invalid time param")
- }
- // convert days to hours
- if param[len(param)-1:] == "d" {
- count := param[:len(param)-1]
- val, err := strconv.ParseInt(count, 10, 64)
- if err != nil {
- return "", err
- }
- val = val * 24
- param = fmt.Sprintf("%dh", val)
- }
- return param, nil
- }
- // ParsePercentString takes a string of expected format "N%" and returns a floating point 0.0N.
- // If the "%" symbol is missing, it just returns 0.0N. Empty string is interpreted as "0%" and
- // return 0.0.
- func ParsePercentString(percentStr string) (float64, error) {
- if len(percentStr) == 0 {
- return 0.0, nil
- }
- if percentStr[len(percentStr)-1:] == "%" {
- percentStr = percentStr[:len(percentStr)-1]
- }
- discount, err := strconv.ParseFloat(percentStr, 64)
- if err != nil {
- return 0.0, err
- }
- discount *= 0.01
- return discount, nil
- }
- func WrapData(data interface{}, err error) []byte {
- var resp []byte
- if err != nil {
- log.Errorf("Error returned to client: %s", err.Error())
- resp, _ = json.Marshal(&Response{
- Code: http.StatusInternalServerError,
- Status: "error",
- Message: err.Error(),
- Data: data,
- })
- } else {
- resp, err = json.Marshal(&Response{
- Code: http.StatusOK,
- Status: "success",
- Data: data,
- })
- if err != nil {
- log.Errorf("error marshaling response json: %s", err.Error())
- }
- }
- return resp
- }
- func WrapDataWithMessage(data interface{}, err error, message string) []byte {
- var resp []byte
- if err != nil {
- log.Errorf("Error returned to client: %s", err.Error())
- resp, _ = json.Marshal(&Response{
- Code: http.StatusInternalServerError,
- Status: "error",
- Message: err.Error(),
- Data: data,
- })
- } else {
- resp, _ = json.Marshal(&Response{
- Code: http.StatusOK,
- Status: "success",
- Data: data,
- Message: message,
- })
- }
- return resp
- }
- func WrapDataWithWarning(data interface{}, err error, warning string) []byte {
- var resp []byte
- if err != nil {
- log.Errorf("Error returned to client: %s", err.Error())
- resp, _ = json.Marshal(&Response{
- Code: http.StatusInternalServerError,
- Status: "error",
- Message: err.Error(),
- Warning: warning,
- Data: data,
- })
- } else {
- resp, _ = json.Marshal(&Response{
- Code: http.StatusOK,
- Status: "success",
- Data: data,
- Warning: warning,
- })
- }
- return resp
- }
- func WrapDataWithMessageAndWarning(data interface{}, err error, message, warning string) []byte {
- var resp []byte
- if err != nil {
- log.Errorf("Error returned to client: %s", err.Error())
- resp, _ = json.Marshal(&Response{
- Code: http.StatusInternalServerError,
- Status: "error",
- Message: err.Error(),
- Warning: warning,
- Data: data,
- })
- } else {
- resp, _ = json.Marshal(&Response{
- Code: http.StatusOK,
- Status: "success",
- Data: data,
- Message: message,
- Warning: warning,
- })
- }
- return resp
- }
- // wrapAsObjectItems wraps a slice of items into an object containing a single items list
- // allows our k8s proxy methods to emulate a List() request to k8s API
- func wrapAsObjectItems(items interface{}) map[string]interface{} {
- return map[string]interface{}{
- "items": items,
- }
- }
- // RefreshPricingData needs to be called when a new node joins the fleet, since we cache the relevant subsets of pricing data to avoid storing the whole thing.
- func (a *Accesses) RefreshPricingData(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Access-Control-Allow-Origin", "*")
- err := a.CloudProvider.DownloadPricingData()
- if err != nil {
- log.Errorf("Error refreshing pricing data: %s", err.Error())
- }
- w.Write(WrapData(nil, err))
- }
- func (a *Accesses) CostDataModel(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Access-Control-Allow-Origin", "*")
- window := r.URL.Query().Get("timeWindow")
- offset := r.URL.Query().Get("offset")
- fields := r.URL.Query().Get("filterFields")
- namespace := r.URL.Query().Get("namespace")
- if offset != "" {
- offset = "offset " + offset
- }
- data, err := a.Model.ComputeCostData(a.PrometheusClient, a.CloudProvider, window, offset, namespace)
- if fields != "" {
- filteredData := filterFields(fields, data)
- w.Write(WrapData(filteredData, err))
- } else {
- w.Write(WrapData(data, err))
- }
- }
- func (a *Accesses) ClusterCosts(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Access-Control-Allow-Origin", "*")
- window := r.URL.Query().Get("window")
- offset := r.URL.Query().Get("offset")
- if window == "" {
- w.Write(WrapData(nil, fmt.Errorf("missing window argument")))
- return
- }
- windowDur, err := timeutil.ParseDuration(window)
- if err != nil {
- w.Write(WrapData(nil, fmt.Errorf("error parsing window (%s): %s", window, err)))
- return
- }
- // offset is not a required parameter
- var offsetDur time.Duration
- if offset != "" {
- offsetDur, err = timeutil.ParseDuration(offset)
- if err != nil {
- w.Write(WrapData(nil, fmt.Errorf("error parsing offset (%s): %s", offset, err)))
- return
- }
- }
- useThanos, _ := strconv.ParseBool(r.URL.Query().Get("multi"))
- if useThanos && !thanos.IsEnabled() {
- w.Write(WrapData(nil, fmt.Errorf("Multi=true while Thanos is not enabled.")))
- return
- }
- var client prometheus.Client
- if useThanos {
- client = a.ThanosClient
- offsetDur = thanos.OffsetDuration()
- } else {
- client = a.PrometheusClient
- }
- data, err := a.ComputeClusterCosts(client, a.CloudProvider, windowDur, offsetDur, true)
- w.Write(WrapData(data, err))
- }
- func (a *Accesses) ClusterCostsOverTime(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Access-Control-Allow-Origin", "*")
- start := r.URL.Query().Get("start")
- end := r.URL.Query().Get("end")
- window := r.URL.Query().Get("window")
- offset := r.URL.Query().Get("offset")
- if window == "" {
- w.Write(WrapData(nil, fmt.Errorf("missing window argument")))
- return
- }
- windowDur, err := timeutil.ParseDuration(window)
- if err != nil {
- w.Write(WrapData(nil, fmt.Errorf("error parsing window (%s): %s", window, err)))
- return
- }
- // offset is not a required parameter
- var offsetDur time.Duration
- if offset != "" {
- offsetDur, err = timeutil.ParseDuration(offset)
- if err != nil {
- w.Write(WrapData(nil, fmt.Errorf("error parsing offset (%s): %s", offset, err)))
- return
- }
- }
- data, err := ClusterCostsOverTime(a.PrometheusClient, a.CloudProvider, start, end, windowDur, offsetDur)
- w.Write(WrapData(data, err))
- }
- func (a *Accesses) CostDataModelRange(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Access-Control-Allow-Origin", "*")
- startStr := r.URL.Query().Get("start")
- endStr := r.URL.Query().Get("end")
- windowStr := r.URL.Query().Get("window")
- fields := r.URL.Query().Get("filterFields")
- namespace := r.URL.Query().Get("namespace")
- cluster := r.URL.Query().Get("cluster")
- remote := r.URL.Query().Get("remote")
- remoteEnabled := env.IsRemoteEnabled() && remote != "false"
- layout := "2006-01-02T15:04:05.000Z"
- start, err := time.Parse(layout, startStr)
- if err != nil {
- w.Write(WrapDataWithMessage(nil, fmt.Errorf("invalid start date: %s", startStr), fmt.Sprintf("invalid start date: %s", startStr)))
- return
- }
- end, err := time.Parse(layout, endStr)
- if err != nil {
- w.Write(WrapDataWithMessage(nil, fmt.Errorf("invalid end date: %s", endStr), fmt.Sprintf("invalid end date: %s", endStr)))
- return
- }
- window := opencost.NewWindow(&start, &end)
- if window.IsOpen() || !window.HasDuration() || window.IsNegative() {
- w.Write(WrapDataWithMessage(nil, fmt.Errorf("invalid date range: %s", window), fmt.Sprintf("invalid date range: %s", window)))
- return
- }
- resolution := time.Hour
- if resDur, err := time.ParseDuration(windowStr); err == nil {
- resolution = resDur
- }
- // Use Thanos Client if it exists (enabled) and remote flag set
- var pClient prometheus.Client
- if remote != "false" && a.ThanosClient != nil {
- pClient = a.ThanosClient
- } else {
- pClient = a.PrometheusClient
- }
- data, err := a.Model.ComputeCostDataRange(pClient, a.CloudProvider, window, resolution, namespace, cluster, remoteEnabled)
- if err != nil {
- w.Write(WrapData(nil, err))
- }
- if fields != "" {
- filteredData := filterFields(fields, data)
- w.Write(WrapData(filteredData, err))
- } else {
- w.Write(WrapData(data, err))
- }
- }
- func parseAggregations(customAggregation, aggregator, filterType string) (string, []string, string) {
- var key string
- var filter string
- var val []string
- if customAggregation != "" {
- key = customAggregation
- filter = filterType
- val = strings.Split(customAggregation, ",")
- } else {
- aggregations := strings.Split(aggregator, ",")
- for i, agg := range aggregations {
- aggregations[i] = "kubernetes_" + agg
- }
- key = strings.Join(aggregations, ",")
- filter = "kubernetes_" + filterType
- val = aggregations
- }
- return key, val, filter
- }
- func (a *Accesses) GetAllNodePricing(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Access-Control-Allow-Origin", "*")
- data, err := a.CloudProvider.AllNodePricing()
- w.Write(WrapData(data, err))
- }
- func (a *Accesses) GetConfigs(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Access-Control-Allow-Origin", "*")
- data, err := a.CloudProvider.GetConfig()
- w.Write(WrapData(data, err))
- }
- func (a *Accesses) UpdateSpotInfoConfigs(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Access-Control-Allow-Origin", "*")
- data, err := a.CloudProvider.UpdateConfig(r.Body, aws.SpotInfoUpdateType)
- if err != nil {
- w.Write(WrapData(data, err))
- return
- }
- w.Write(WrapData(data, err))
- err = a.CloudProvider.DownloadPricingData()
- if err != nil {
- log.Errorf("Error redownloading data on config update: %s", err.Error())
- }
- return
- }
- func (a *Accesses) UpdateAthenaInfoConfigs(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Access-Control-Allow-Origin", "*")
- data, err := a.CloudProvider.UpdateConfig(r.Body, aws.AthenaInfoUpdateType)
- if err != nil {
- w.Write(WrapData(data, err))
- return
- }
- w.Write(WrapData(data, err))
- return
- }
- func (a *Accesses) UpdateBigQueryInfoConfigs(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Access-Control-Allow-Origin", "*")
- data, err := a.CloudProvider.UpdateConfig(r.Body, gcp.BigqueryUpdateType)
- if err != nil {
- w.Write(WrapData(data, err))
- return
- }
- w.Write(WrapData(data, err))
- return
- }
- func (a *Accesses) UpdateAzureStorageConfigs(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Access-Control-Allow-Origin", "*")
- data, err := a.CloudProvider.UpdateConfig(r.Body, azure.AzureStorageUpdateType)
- if err != nil {
- w.Write(WrapData(data, err))
- return
- }
- w.Write(WrapData(data, err))
- return
- }
- func (a *Accesses) UpdateConfigByKey(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Access-Control-Allow-Origin", "*")
- data, err := a.CloudProvider.UpdateConfig(r.Body, "")
- if err != nil {
- w.Write(WrapData(data, err))
- return
- }
- w.Write(WrapData(data, err))
- return
- }
- func (a *Accesses) ManagementPlatform(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Access-Control-Allow-Origin", "*")
- data, err := a.CloudProvider.GetManagementPlatform()
- if err != nil {
- w.Write(WrapData(data, err))
- return
- }
- w.Write(WrapData(data, err))
- return
- }
- func (a *Accesses) ClusterInfo(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Access-Control-Allow-Origin", "*")
- data := a.ClusterInfoProvider.GetClusterInfo()
- w.Write(WrapData(data, nil))
- }
- func (a *Accesses) GetClusterInfoMap(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Access-Control-Allow-Origin", "*")
- data := a.ClusterMap.AsMap()
- w.Write(WrapData(data, nil))
- }
- func (a *Accesses) GetServiceAccountStatus(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) {
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Access-Control-Allow-Origin", "*")
- w.Write(WrapData(a.CloudProvider.ServiceAccountStatus(), nil))
- }
- func (a *Accesses) GetPricingSourceStatus(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) {
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Access-Control-Allow-Origin", "*")
- w.Write(WrapData(a.CloudProvider.PricingSourceStatus(), nil))
- }
- func (a *Accesses) GetPricingSourceCounts(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) {
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Access-Control-Allow-Origin", "*")
- w.Write(WrapData(a.Model.GetPricingSourceCounts()))
- }
- func (a *Accesses) GetPricingSourceSummary(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Access-Control-Allow-Origin", "*")
- data := a.CloudProvider.PricingSourceSummary()
- w.Write(WrapData(data, nil))
- }
- func (a *Accesses) GetPrometheusMetadata(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) {
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Access-Control-Allow-Origin", "*")
- w.Write(WrapData(prom.Validate(a.PrometheusClient)))
- }
- func (a *Accesses) PrometheusQuery(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Access-Control-Allow-Origin", "*")
- qp := httputil.NewQueryParams(r.URL.Query())
- query := qp.Get("query", "")
- if query == "" {
- w.Write(WrapData(nil, fmt.Errorf("Query Parameter 'query' is unset'")))
- return
- }
- // Attempt to parse time as either a unix timestamp or as an RFC3339 value
- var timeVal time.Time
- timeStr := qp.Get("time", "")
- if len(timeStr) > 0 {
- if t, err := strconv.ParseInt(timeStr, 10, 64); err == nil {
- timeVal = time.Unix(t, 0)
- } else if t, err := time.Parse(time.RFC3339, timeStr); err == nil {
- timeVal = t
- }
- // If time is given, but not parse-able, return an error
- if timeVal.IsZero() {
- http.Error(w, fmt.Sprintf("time must be a unix timestamp or RFC3339 value; illegal value given: %s", timeStr), http.StatusBadRequest)
- }
- }
- ctx := prom.NewNamedContext(a.PrometheusClient, prom.FrontendContextName)
- body, err := ctx.RawQuery(query, timeVal)
- if err != nil {
- w.Write(WrapData(nil, fmt.Errorf("Error running query %s. Error: %s", query, err)))
- return
- }
- w.Write(body)
- }
- func (a *Accesses) PrometheusQueryRange(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Access-Control-Allow-Origin", "*")
- qp := httputil.NewQueryParams(r.URL.Query())
- query := qp.Get("query", "")
- if query == "" {
- fmt.Fprintf(w, "Error parsing query from request parameters.")
- return
- }
- start, end, duration, err := toStartEndStep(qp)
- if err != nil {
- fmt.Fprintf(w, err.Error())
- return
- }
- ctx := prom.NewNamedContext(a.PrometheusClient, prom.FrontendContextName)
- body, err := ctx.RawQueryRange(query, start, end, duration)
- if err != nil {
- fmt.Fprintf(w, "Error running query %s. Error: %s", query, err)
- return
- }
- w.Write(body)
- }
- func (a *Accesses) ThanosQuery(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Access-Control-Allow-Origin", "*")
- if !thanos.IsEnabled() {
- w.Write(WrapData(nil, fmt.Errorf("ThanosDisabled")))
- return
- }
- qp := httputil.NewQueryParams(r.URL.Query())
- query := qp.Get("query", "")
- if query == "" {
- w.Write(WrapData(nil, fmt.Errorf("Query Parameter 'query' is unset'")))
- return
- }
- // Attempt to parse time as either a unix timestamp or as an RFC3339 value
- var timeVal time.Time
- timeStr := qp.Get("time", "")
- if len(timeStr) > 0 {
- if t, err := strconv.ParseInt(timeStr, 10, 64); err == nil {
- timeVal = time.Unix(t, 0)
- } else if t, err := time.Parse(time.RFC3339, timeStr); err == nil {
- timeVal = t
- }
- // If time is given, but not parse-able, return an error
- if timeVal.IsZero() {
- http.Error(w, fmt.Sprintf("time must be a unix timestamp or RFC3339 value; illegal value given: %s", timeStr), http.StatusBadRequest)
- }
- }
- ctx := prom.NewNamedContext(a.ThanosClient, prom.FrontendContextName)
- body, err := ctx.RawQuery(query, timeVal)
- if err != nil {
- w.Write(WrapData(nil, fmt.Errorf("Error running query %s. Error: %s", query, err)))
- return
- }
- w.Write(body)
- }
- func (a *Accesses) ThanosQueryRange(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Access-Control-Allow-Origin", "*")
- if !thanos.IsEnabled() {
- w.Write(WrapData(nil, fmt.Errorf("ThanosDisabled")))
- return
- }
- qp := httputil.NewQueryParams(r.URL.Query())
- query := qp.Get("query", "")
- if query == "" {
- fmt.Fprintf(w, "Error parsing query from request parameters.")
- return
- }
- start, end, duration, err := toStartEndStep(qp)
- if err != nil {
- fmt.Fprintf(w, err.Error())
- return
- }
- ctx := prom.NewNamedContext(a.ThanosClient, prom.FrontendContextName)
- body, err := ctx.RawQueryRange(query, start, end, duration)
- if err != nil {
- fmt.Fprintf(w, "Error running query %s. Error: %s", query, err)
- return
- }
- w.Write(body)
- }
- // helper for query range proxy requests
- func toStartEndStep(qp httputil.QueryParams) (start, end time.Time, step time.Duration, err error) {
- var e error
- ss := qp.Get("start", "")
- es := qp.Get("end", "")
- ds := qp.Get("duration", "")
- layout := "2006-01-02T15:04:05.000Z"
- start, e = time.Parse(layout, ss)
- if e != nil {
- err = fmt.Errorf("Error parsing time %s. Error: %s", ss, err)
- return
- }
- end, e = time.Parse(layout, es)
- if e != nil {
- err = fmt.Errorf("Error parsing time %s. Error: %s", es, err)
- return
- }
- step, e = time.ParseDuration(ds)
- if e != nil {
- err = fmt.Errorf("Error parsing duration %s. Error: %s", ds, err)
- return
- }
- err = nil
- return
- }
- func (a *Accesses) GetPrometheusQueueState(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) {
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Access-Control-Allow-Origin", "*")
- promQueueState, err := prom.GetPrometheusQueueState(a.PrometheusClient)
- if err != nil {
- w.Write(WrapData(nil, err))
- return
- }
- result := map[string]*prom.PrometheusQueueState{
- "prometheus": promQueueState,
- }
- if thanos.IsEnabled() {
- thanosQueueState, err := prom.GetPrometheusQueueState(a.ThanosClient)
- if err != nil {
- log.Warnf("Error getting Thanos queue state: %s", err)
- } else {
- result["thanos"] = thanosQueueState
- }
- }
- w.Write(WrapData(result, nil))
- }
- // GetPrometheusMetrics retrieves availability of Prometheus and Thanos metrics
- func (a *Accesses) GetPrometheusMetrics(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) {
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Access-Control-Allow-Origin", "*")
- promMetrics := prom.GetPrometheusMetrics(a.PrometheusClient, "")
- result := map[string][]*prom.PrometheusDiagnostic{
- "prometheus": promMetrics,
- }
- if thanos.IsEnabled() {
- thanosMetrics := prom.GetPrometheusMetrics(a.ThanosClient, thanos.QueryOffset())
- result["thanos"] = thanosMetrics
- }
- w.Write(WrapData(result, nil))
- }
- func (a *Accesses) GetAllPersistentVolumes(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Access-Control-Allow-Origin", "*")
- pvList := a.ClusterCache.GetAllPersistentVolumes()
- body, err := json.Marshal(wrapAsObjectItems(pvList))
- if err != nil {
- fmt.Fprintf(w, "Error decoding persistent volumes: "+err.Error())
- } else {
- w.Write(body)
- }
- }
- func (a *Accesses) GetAllDeployments(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Access-Control-Allow-Origin", "*")
- qp := httputil.NewQueryParams(r.URL.Query())
- namespace := qp.Get("namespace", "")
- deploymentsList := a.ClusterCache.GetAllDeployments()
- // filter for provided namespace
- var deployments []*appsv1.Deployment
- if namespace == "" {
- deployments = deploymentsList
- } else {
- deployments = []*appsv1.Deployment{}
- for _, d := range deploymentsList {
- if d.Namespace == namespace {
- deployments = append(deployments, d)
- }
- }
- }
- body, err := json.Marshal(wrapAsObjectItems(deployments))
- if err != nil {
- fmt.Fprintf(w, "Error decoding deployment: "+err.Error())
- } else {
- w.Write(body)
- }
- }
- func (a *Accesses) GetAllStorageClasses(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Access-Control-Allow-Origin", "*")
- scList := a.ClusterCache.GetAllStorageClasses()
- body, err := json.Marshal(wrapAsObjectItems(scList))
- if err != nil {
- fmt.Fprintf(w, "Error decoding storageclasses: "+err.Error())
- } else {
- w.Write(body)
- }
- }
- func (a *Accesses) GetAllStatefulSets(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Access-Control-Allow-Origin", "*")
- qp := httputil.NewQueryParams(r.URL.Query())
- namespace := qp.Get("namespace", "")
- statefulSetsList := a.ClusterCache.GetAllStatefulSets()
- // filter for provided namespace
- var statefulSets []*appsv1.StatefulSet
- if namespace == "" {
- statefulSets = statefulSetsList
- } else {
- statefulSets = []*appsv1.StatefulSet{}
- for _, ss := range statefulSetsList {
- if ss.Namespace == namespace {
- statefulSets = append(statefulSets, ss)
- }
- }
- }
- body, err := json.Marshal(wrapAsObjectItems(statefulSets))
- if err != nil {
- fmt.Fprintf(w, "Error decoding deployment: "+err.Error())
- } else {
- w.Write(body)
- }
- }
- func (a *Accesses) GetAllNodes(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Access-Control-Allow-Origin", "*")
- nodeList := a.ClusterCache.GetAllNodes()
- body, err := json.Marshal(wrapAsObjectItems(nodeList))
- if err != nil {
- fmt.Fprintf(w, "Error decoding nodes: "+err.Error())
- } else {
- w.Write(body)
- }
- }
- func (a *Accesses) GetAllPods(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Access-Control-Allow-Origin", "*")
- podlist := a.ClusterCache.GetAllPods()
- body, err := json.Marshal(wrapAsObjectItems(podlist))
- if err != nil {
- fmt.Fprintf(w, "Error decoding pods: "+err.Error())
- } else {
- w.Write(body)
- }
- }
- func (a *Accesses) GetAllNamespaces(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Access-Control-Allow-Origin", "*")
- namespaces := a.ClusterCache.GetAllNamespaces()
- body, err := json.Marshal(wrapAsObjectItems(namespaces))
- if err != nil {
- fmt.Fprintf(w, "Error decoding deployment: "+err.Error())
- } else {
- w.Write(body)
- }
- }
- func (a *Accesses) GetAllDaemonSets(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Access-Control-Allow-Origin", "*")
- daemonSets := a.ClusterCache.GetAllDaemonSets()
- body, err := json.Marshal(wrapAsObjectItems(daemonSets))
- if err != nil {
- fmt.Fprintf(w, "Error decoding daemon set: "+err.Error())
- } else {
- w.Write(body)
- }
- }
- func (a *Accesses) GetPod(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Access-Control-Allow-Origin", "*")
- podName := ps.ByName("name")
- podNamespace := ps.ByName("namespace")
- // TODO: ClusterCache API could probably afford to have some better filtering
- allPods := a.ClusterCache.GetAllPods()
- for _, pod := range allPods {
- for _, container := range pod.Spec.Containers {
- container.Env = make([]v1.EnvVar, 0)
- }
- if pod.Namespace == podNamespace && pod.Name == podName {
- body, err := json.Marshal(pod)
- if err != nil {
- fmt.Fprintf(w, "Error decoding pod: "+err.Error())
- } else {
- w.Write(body)
- }
- return
- }
- }
- fmt.Fprintf(w, "Pod not found\n")
- }
- func (a *Accesses) PrometheusRecordingRules(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Access-Control-Allow-Origin", "*")
- u := a.PrometheusClient.URL(epRules, nil)
- req, err := http.NewRequest(http.MethodGet, u.String(), nil)
- if err != nil {
- fmt.Fprintf(w, "Error creating Prometheus rule request: "+err.Error())
- }
- _, body, err := a.PrometheusClient.Do(r.Context(), req)
- if err != nil {
- fmt.Fprintf(w, "Error making Prometheus rule request: "+err.Error())
- } else {
- w.Write(body)
- }
- }
- func (a *Accesses) PrometheusConfig(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Access-Control-Allow-Origin", "*")
- pConfig := map[string]string{
- "address": env.GetPrometheusServerEndpoint(),
- }
- body, err := json.Marshal(pConfig)
- if err != nil {
- fmt.Fprintf(w, "Error marshalling prometheus config")
- } else {
- w.Write(body)
- }
- }
- func (a *Accesses) PrometheusTargets(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Access-Control-Allow-Origin", "*")
- u := a.PrometheusClient.URL(epTargets, nil)
- req, err := http.NewRequest(http.MethodGet, u.String(), nil)
- if err != nil {
- fmt.Fprintf(w, "Error creating Prometheus rule request: "+err.Error())
- }
- _, body, err := a.PrometheusClient.Do(r.Context(), req)
- if err != nil {
- fmt.Fprintf(w, "Error making Prometheus rule request: "+err.Error())
- } else {
- w.Write(body)
- }
- }
- func (a *Accesses) GetOrphanedPods(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Access-Control-Allow-Origin", "*")
- podlist := a.ClusterCache.GetAllPods()
- var lonePods []*v1.Pod
- for _, pod := range podlist {
- if len(pod.OwnerReferences) == 0 {
- lonePods = append(lonePods, pod)
- }
- }
- body, err := json.Marshal(lonePods)
- if err != nil {
- fmt.Fprintf(w, "Error decoding pod: "+err.Error())
- } else {
- w.Write(body)
- }
- }
- func (a *Accesses) GetInstallNamespace(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Access-Control-Allow-Origin", "*")
- ns := env.GetKubecostNamespace()
- w.Write([]byte(ns))
- }
- type InstallInfo struct {
- Containers []ContainerInfo `json:"containers"`
- ClusterInfo map[string]string `json:"clusterInfo"`
- Version string `json:"version"`
- }
- type ContainerInfo struct {
- ContainerName string `json:"containerName"`
- Image string `json:"image"`
- StartTime string `json:"startTime"`
- }
- func (a *Accesses) GetInstallInfo(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Access-Control-Allow-Origin", "*")
- containers, err := GetKubecostContainers(a.KubeClientSet)
- if err != nil {
- writeErrorResponse(w, 500, fmt.Sprintf("Unable to list pods: %s", err.Error()))
- return
- }
- info := InstallInfo{
- Containers: containers,
- ClusterInfo: make(map[string]string),
- Version: version.FriendlyVersion(),
- }
- nodes := a.ClusterCache.GetAllNodes()
- cachePods := a.ClusterCache.GetAllPods()
- info.ClusterInfo["nodeCount"] = strconv.Itoa(len(nodes))
- info.ClusterInfo["podCount"] = strconv.Itoa(len(cachePods))
- body, err := json.Marshal(info)
- if err != nil {
- writeErrorResponse(w, 500, fmt.Sprintf("Error decoding pod: %s", err.Error()))
- return
- }
- w.Write(body)
- }
- func GetKubecostContainers(kubeClientSet kubernetes.Interface) ([]ContainerInfo, error) {
- pods, err := kubeClientSet.CoreV1().Pods(env.GetKubecostNamespace()).List(context.Background(), metav1.ListOptions{
- LabelSelector: "app=cost-analyzer",
- FieldSelector: "status.phase=Running",
- Limit: 1,
- })
- if err != nil {
- return nil, fmt.Errorf("failed to query kubernetes client for kubecost pods: %s", err)
- }
- // If we have zero pods either something is weird with the install since the app selector is not exposed in the helm
- // chart or more likely we are running locally - in either case Images field will return as null
- var containers []ContainerInfo
- if len(pods.Items) > 0 {
- for _, pod := range pods.Items {
- for _, container := range pod.Spec.Containers {
- c := ContainerInfo{
- ContainerName: container.Name,
- Image: container.Image,
- StartTime: pod.Status.StartTime.String(),
- }
- containers = append(containers, c)
- }
- }
- }
- return containers, nil
- }
- // logsFor pulls the logs for a specific pod, namespace, and container
- func logsFor(c kubernetes.Interface, namespace string, pod string, container string, dur time.Duration, ctx context.Context) (string, error) {
- since := time.Now().UTC().Add(-dur)
- logOpts := v1.PodLogOptions{
- SinceTime: &metav1.Time{Time: since},
- }
- if container != "" {
- logOpts.Container = container
- }
- req := c.CoreV1().Pods(namespace).GetLogs(pod, &logOpts)
- reader, err := req.Stream(ctx)
- if err != nil {
- return "", err
- }
- podLogs, err := io.ReadAll(reader)
- if err != nil {
- return "", err
- }
- // If color is already disabled then we don't need to process the logs
- // to drop ANSI colors
- if !viper.GetBool("disable-log-color") {
- podLogs = ANSIRegex.ReplaceAll(podLogs, []byte{})
- }
- return string(podLogs), nil
- }
- func (a *Accesses) GetPodLogs(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Access-Control-Allow-Origin", "*")
- qp := httputil.NewQueryParams(r.URL.Query())
- ns := qp.Get("namespace", env.GetKubecostNamespace())
- pod := qp.Get("pod", "")
- selector := qp.Get("selector", "")
- container := qp.Get("container", "")
- since := qp.Get("since", "24h")
- sinceDuration, err := time.ParseDuration(since)
- if err != nil {
- fmt.Fprintf(w, "Invalid Duration String: "+err.Error())
- return
- }
- var logResult string
- appendLog := func(ns string, pod string, container string, l string) {
- if l == "" {
- return
- }
- logResult += fmt.Sprintf("%s\n| %s:%s:%s\n%s\n%s\n\n", LogSeparator, ns, pod, container, LogSeparator, l)
- }
- if pod != "" {
- pd, err := a.KubeClientSet.CoreV1().Pods(ns).Get(r.Context(), pod, metav1.GetOptions{})
- if err != nil {
- fmt.Fprintf(w, "Error Finding Pod: "+err.Error())
- return
- }
- if container != "" {
- var foundContainer bool
- for _, cont := range pd.Spec.Containers {
- if strings.EqualFold(cont.Name, container) {
- foundContainer = true
- break
- }
- }
- if !foundContainer {
- fmt.Fprintf(w, "Could not find container: "+container)
- return
- }
- }
- logs, err := logsFor(a.KubeClientSet, ns, pod, container, sinceDuration, r.Context())
- if err != nil {
- fmt.Fprintf(w, "Error Getting Logs: "+err.Error())
- return
- }
- appendLog(ns, pod, container, logs)
- w.Write([]byte(logResult))
- return
- }
- if selector != "" {
- pods, err := a.KubeClientSet.CoreV1().Pods(ns).List(r.Context(), metav1.ListOptions{LabelSelector: selector})
- if err != nil {
- fmt.Fprintf(w, "Error Finding Pod: "+err.Error())
- return
- }
- for _, pd := range pods.Items {
- for _, cont := range pd.Spec.Containers {
- logs, err := logsFor(a.KubeClientSet, ns, pd.Name, cont.Name, sinceDuration, r.Context())
- if err != nil {
- continue
- }
- appendLog(ns, pd.Name, cont.Name, logs)
- }
- }
- }
- w.Write([]byte(logResult))
- }
- func (a *Accesses) AddServiceKey(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Access-Control-Allow-Origin", "*")
- r.ParseForm()
- key := r.PostForm.Get("key")
- k := []byte(key)
- err := os.WriteFile(path.Join(env.GetConfigPathWithDefault(env.DefaultConfigMountPath), "key.json"), k, 0644)
- if err != nil {
- fmt.Fprintf(w, "Error writing service key: "+err.Error())
- }
- w.WriteHeader(http.StatusOK)
- }
- func (a *Accesses) GetHelmValues(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Access-Control-Allow-Origin", "*")
- encodedValues := sysenv.Get("HELM_VALUES", "")
- if encodedValues == "" {
- fmt.Fprintf(w, "Values reporting disabled")
- return
- }
- result, err := base64.StdEncoding.DecodeString(encodedValues)
- if err != nil {
- fmt.Fprintf(w, "Failed to decode encoded values: %s", err)
- return
- }
- w.Write(result)
- }
- func (a *Accesses) Status(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Access-Control-Allow-Origin", "*")
- promServer := env.GetPrometheusServerEndpoint()
- api := prometheusAPI.NewAPI(a.PrometheusClient)
- result, err := api.Buildinfo(r.Context())
- if err != nil {
- fmt.Fprintf(w, "Using Prometheus at "+promServer+". Error: "+err.Error())
- } else {
- fmt.Fprintf(w, "Using Prometheus at "+promServer+". Version: "+result.Version)
- }
- }
- // captures the panic event in sentry
- func capturePanicEvent(err string, stack string) {
- msg := fmt.Sprintf("Panic: %s\nStackTrace: %s\n", err, stack)
- log.Infof(msg)
- sentry.CurrentHub().CaptureEvent(&sentry.Event{
- Level: sentry.LevelError,
- Message: msg,
- })
- sentry.Flush(5 * time.Second)
- }
- // handle any panics reported by the errors package
- func handlePanic(p errors.Panic) bool {
- err := p.Error
- if err != nil {
- if err, ok := err.(error); ok {
- capturePanicEvent(err.Error(), p.Stack)
- }
- if err, ok := err.(string); ok {
- capturePanicEvent(err, p.Stack)
- }
- }
- // Return true to recover iff the type is http, otherwise allow kubernetes
- // to recover.
- return p.Type == errors.PanicTypeHTTP
- }
- func Initialize(router *httprouter.Router, additionalConfigWatchers ...*watcher.ConfigMapWatcher) *Accesses {
- configWatchers := watcher.NewConfigMapWatchers(additionalConfigWatchers...)
- var err error
- if errorReportingEnabled {
- err = sentry.Init(sentry.ClientOptions{Release: version.FriendlyVersion()})
- if err != nil {
- log.Infof("Failed to initialize sentry for error reporting")
- } else {
- err = errors.SetPanicHandler(handlePanic)
- if err != nil {
- log.Infof("Failed to set panic handler: %s", err)
- }
- }
- }
- address := env.GetPrometheusServerEndpoint()
- if address == "" {
- log.Fatalf("No address for prometheus set in $%s. Aborting.", env.PrometheusServerEndpointEnvVar)
- }
- queryConcurrency := env.GetMaxQueryConcurrency()
- log.Infof("Prometheus/Thanos Client Max Concurrency set to %d", queryConcurrency)
- timeout := 120 * time.Second
- keepAlive := 120 * time.Second
- tlsHandshakeTimeout := 10 * time.Second
- scrapeInterval := env.GetKubecostScrapeInterval()
- var rateLimitRetryOpts *prom.RateLimitRetryOpts = nil
- if env.IsPrometheusRetryOnRateLimitResponse() {
- rateLimitRetryOpts = &prom.RateLimitRetryOpts{
- MaxRetries: env.GetPrometheusRetryOnRateLimitMaxRetries(),
- DefaultRetryWait: env.GetPrometheusRetryOnRateLimitDefaultWait(),
- }
- }
- promCli, err := prom.NewPrometheusClient(address, &prom.PrometheusClientConfig{
- Timeout: timeout,
- KeepAlive: keepAlive,
- TLSHandshakeTimeout: tlsHandshakeTimeout,
- TLSInsecureSkipVerify: env.GetInsecureSkipVerify(),
- RateLimitRetryOpts: rateLimitRetryOpts,
- Auth: &prom.ClientAuth{
- Username: env.GetDBBasicAuthUsername(),
- Password: env.GetDBBasicAuthUserPassword(),
- BearerToken: env.GetDBBearerToken(),
- },
- QueryConcurrency: queryConcurrency,
- QueryLogFile: "",
- HeaderXScopeOrgId: env.GetPrometheusHeaderXScopeOrgId(),
- })
- if err != nil {
- log.Fatalf("Failed to create prometheus client, Error: %v", err)
- }
- m, err := prom.Validate(promCli)
- if err != nil || !m.Running {
- if err != nil {
- log.Errorf("Failed to query prometheus at %s. Error: %s . Troubleshooting help available at: %s", address, err.Error(), prom.PrometheusTroubleshootingURL)
- } else if !m.Running {
- log.Errorf("Prometheus at %s is not running. Troubleshooting help available at: %s", address, prom.PrometheusTroubleshootingURL)
- }
- } else {
- log.Infof("Success: retrieved the 'up' query against prometheus at: " + address)
- }
- api := prometheusAPI.NewAPI(promCli)
- _, err = api.Buildinfo(context.Background())
- if err != nil {
- log.Infof("No valid prometheus config file at %s. Error: %s . Troubleshooting help available at: %s. Ignore if using cortex/mimir/thanos here.", address, err.Error(), prom.PrometheusTroubleshootingURL)
- } else {
- log.Infof("Retrieved a prometheus config file from: %s", address)
- }
- if scrapeInterval == 0 {
- scrapeInterval = time.Minute
- // Lookup scrape interval for kubecost job, update if found
- si, err := prom.ScrapeIntervalFor(promCli, env.GetKubecostJobName())
- if err == nil {
- scrapeInterval = si
- }
- }
- log.Infof("Using scrape interval of %f", scrapeInterval.Seconds())
- // Kubernetes API setup
- kubeClientset, err := kubeconfig.LoadKubeClient("")
- if err != nil {
- log.Fatalf("Failed to build Kubernetes client: %s", err.Error())
- }
- // Create ConfigFileManager for synchronization of shared configuration
- confManager := config.NewConfigFileManager(&config.ConfigFileManagerOpts{
- BucketStoreConfig: env.GetKubecostConfigBucket(),
- LocalConfigPath: "/",
- })
- configPrefix := env.GetConfigPathWithDefault("/var/configs/")
- // Create Kubernetes Cluster Cache + Watchers
- var k8sCache clustercache.ClusterCache
- if env.IsClusterCacheFileEnabled() {
- importLocation := confManager.ConfigFileAt(path.Join(configPrefix, "cluster-cache.json"))
- k8sCache = clustercache.NewClusterImporter(importLocation)
- } else {
- k8sCache = clustercache.NewKubernetesClusterCache(kubeClientset)
- }
- k8sCache.Run()
- cloudProviderKey := env.GetCloudProviderAPIKey()
- cloudProvider, err := provider.NewProvider(k8sCache, cloudProviderKey, confManager)
- if err != nil {
- panic(err.Error())
- }
- // Append the pricing config watcher
- configWatchers.AddWatcher(provider.ConfigWatcherFor(cloudProvider))
- configWatchers.AddWatcher(metrics.GetMetricsConfigWatcher())
- watchConfigFunc := configWatchers.ToWatchFunc()
- watchedConfigs := configWatchers.GetWatchedConfigs()
- kubecostNamespace := env.GetKubecostNamespace()
- // We need an initial invocation because the init of the cache has happened before we had access to the provider.
- for _, cw := range watchedConfigs {
- configs, err := kubeClientset.CoreV1().ConfigMaps(kubecostNamespace).Get(context.Background(), cw, metav1.GetOptions{})
- if err != nil {
- log.Infof("No %s configmap found at install time, using existing configs: %s", cw, err.Error())
- } else {
- log.Infof("Found configmap %s, watching...", configs.Name)
- watchConfigFunc(configs)
- }
- }
- k8sCache.SetConfigMapUpdateFunc(watchConfigFunc)
- remoteEnabled := env.IsRemoteEnabled()
- if remoteEnabled {
- info, err := cloudProvider.ClusterInfo()
- log.Infof("Saving cluster with id:'%s', and name:'%s' to durable storage", info["id"], info["name"])
- if err != nil {
- log.Infof("Error saving cluster id %s", err.Error())
- }
- _, _, err = utils.GetOrCreateClusterMeta(info["id"], info["name"])
- if err != nil {
- log.Infof("Unable to set cluster id '%s' for cluster '%s', %s", info["id"], info["name"], err.Error())
- }
- }
- // Thanos Client
- var thanosClient prometheus.Client
- if thanos.IsEnabled() {
- thanosAddress := thanos.QueryURL()
- if thanosAddress != "" {
- thanosCli, _ := thanos.NewThanosClient(thanosAddress, &prom.PrometheusClientConfig{
- Timeout: timeout,
- KeepAlive: keepAlive,
- TLSHandshakeTimeout: tlsHandshakeTimeout,
- TLSInsecureSkipVerify: env.GetInsecureSkipVerify(),
- RateLimitRetryOpts: rateLimitRetryOpts,
- Auth: &prom.ClientAuth{
- Username: env.GetMultiClusterBasicAuthUsername(),
- Password: env.GetMultiClusterBasicAuthPassword(),
- BearerToken: env.GetMultiClusterBearerToken(),
- },
- QueryConcurrency: queryConcurrency,
- QueryLogFile: env.GetQueryLoggingFile(),
- })
- _, err = prom.Validate(thanosCli)
- if err != nil {
- log.Warnf("Failed to query Thanos at %s. Error: %s.", thanosAddress, err.Error())
- thanosClient = thanosCli
- } else {
- log.Infof("Success: retrieved the 'up' query against Thanos at: " + thanosAddress)
- thanosClient = thanosCli
- }
- } else {
- log.Infof("Error resolving environment variable: $%s", env.ThanosQueryUrlEnvVar)
- }
- }
- // ClusterInfo Provider to provide the cluster map with local and remote cluster data
- var clusterInfoProvider clusters.ClusterInfoProvider
- if env.IsClusterInfoFileEnabled() {
- clusterInfoFile := confManager.ConfigFileAt(path.Join(configPrefix, "cluster-info.json"))
- clusterInfoProvider = NewConfiguredClusterInfoProvider(clusterInfoFile)
- } else {
- clusterInfoProvider = NewLocalClusterInfoProvider(kubeClientset, cloudProvider)
- }
- // Initialize ClusterMap for maintaining ClusterInfo by ClusterID
- var clusterMap clusters.ClusterMap
- if thanosClient != nil {
- clusterMap = clustermap.NewClusterMap(thanosClient, clusterInfoProvider, 10*time.Minute)
- } else {
- clusterMap = clustermap.NewClusterMap(promCli, clusterInfoProvider, 5*time.Minute)
- }
- // cache responses from model and aggregation for a default of 10 minutes;
- // clear expired responses every 20 minutes
- aggregateCache := cache.New(time.Minute*10, time.Minute*20)
- costDataCache := cache.New(time.Minute*10, time.Minute*20)
- clusterCostsCache := cache.New(cache.NoExpiration, cache.NoExpiration)
- outOfClusterCache := cache.New(time.Minute*5, time.Minute*10)
- settingsCache := cache.New(cache.NoExpiration, cache.NoExpiration)
- // query durations that should be cached longer should be registered here
- // use relatively prime numbers to minimize likelihood of synchronized
- // attempts at cache warming
- day := 24 * time.Hour
- cacheExpiration := map[time.Duration]time.Duration{
- day: maxCacheMinutes1d * time.Minute,
- 2 * day: maxCacheMinutes2d * time.Minute,
- 7 * day: maxCacheMinutes7d * time.Minute,
- 30 * day: maxCacheMinutes30d * time.Minute,
- }
- var pc prometheus.Client
- if thanosClient != nil {
- pc = thanosClient
- } else {
- pc = promCli
- }
- costModel := NewCostModel(pc, cloudProvider, k8sCache, clusterMap, scrapeInterval)
- metricsEmitter := NewCostModelMetricsEmitter(promCli, k8sCache, cloudProvider, clusterInfoProvider, costModel)
- a := &Accesses{
- httpServices: services.NewCostModelServices(),
- PrometheusClient: promCli,
- ThanosClient: thanosClient,
- KubeClientSet: kubeClientset,
- ClusterCache: k8sCache,
- ClusterMap: clusterMap,
- CloudProvider: cloudProvider,
- ConfigFileManager: confManager,
- ClusterInfoProvider: clusterInfoProvider,
- Model: costModel,
- MetricsEmitter: metricsEmitter,
- AggregateCache: aggregateCache,
- CostDataCache: costDataCache,
- ClusterCostsCache: clusterCostsCache,
- OutOfClusterCache: outOfClusterCache,
- SettingsCache: settingsCache,
- CacheExpiration: cacheExpiration,
- }
- // Use the Accesses instance, itself, as the CostModelAggregator. This is
- // confusing and unconventional, but necessary so that we can swap it
- // out for the ETL-adapted version elsewhere.
- // TODO clean this up once ETL is open-sourced.
- a.AggAPI = a
- // Initialize mechanism for subscribing to settings changes
- a.InitializeSettingsPubSub()
- err = a.CloudProvider.DownloadPricingData()
- if err != nil {
- log.Infof("Failed to download pricing data: " + err.Error())
- }
- // Warm the aggregate cache unless explicitly set to false
- if env.IsCacheWarmingEnabled() {
- log.Infof("Init: AggregateCostModel cache warming enabled")
- a.warmAggregateCostModelCache()
- } else {
- log.Infof("Init: AggregateCostModel cache warming disabled")
- }
- if !env.IsKubecostMetricsPodEnabled() {
- a.MetricsEmitter.Start()
- }
- a.httpServices.RegisterAll(router)
- router.GET("/costDataModel", a.CostDataModel)
- router.GET("/costDataModelRange", a.CostDataModelRange)
- router.GET("/aggregatedCostModel", a.AggregateCostModelHandler)
- router.GET("/allocation/compute", a.ComputeAllocationHandler)
- router.GET("/allocation/compute/summary", a.ComputeAllocationHandlerSummary)
- router.GET("/allNodePricing", a.GetAllNodePricing)
- router.POST("/refreshPricing", a.RefreshPricingData)
- router.GET("/clusterCostsOverTime", a.ClusterCostsOverTime)
- router.GET("/clusterCosts", a.ClusterCosts)
- router.GET("/clusterCostsFromCache", a.ClusterCostsFromCacheHandler)
- router.GET("/validatePrometheus", a.GetPrometheusMetadata)
- router.GET("/managementPlatform", a.ManagementPlatform)
- router.GET("/clusterInfo", a.ClusterInfo)
- router.GET("/clusterInfoMap", a.GetClusterInfoMap)
- router.GET("/serviceAccountStatus", a.GetServiceAccountStatus)
- router.GET("/pricingSourceStatus", a.GetPricingSourceStatus)
- router.GET("/pricingSourceSummary", a.GetPricingSourceSummary)
- router.GET("/pricingSourceCounts", a.GetPricingSourceCounts)
- // endpoints migrated from server
- router.GET("/allPersistentVolumes", a.GetAllPersistentVolumes)
- router.GET("/allDeployments", a.GetAllDeployments)
- router.GET("/allStorageClasses", a.GetAllStorageClasses)
- router.GET("/allStatefulSets", a.GetAllStatefulSets)
- router.GET("/allNodes", a.GetAllNodes)
- router.GET("/allPods", a.GetAllPods)
- router.GET("/allNamespaces", a.GetAllNamespaces)
- router.GET("/allDaemonSets", a.GetAllDaemonSets)
- router.GET("/pod/:namespace/:name", a.GetPod)
- router.GET("/prometheusRecordingRules", a.PrometheusRecordingRules)
- router.GET("/prometheusConfig", a.PrometheusConfig)
- router.GET("/prometheusTargets", a.PrometheusTargets)
- router.GET("/orphanedPods", a.GetOrphanedPods)
- router.GET("/installNamespace", a.GetInstallNamespace)
- router.GET("/installInfo", a.GetInstallInfo)
- router.GET("/podLogs", a.GetPodLogs)
- router.POST("/serviceKey", a.AddServiceKey)
- router.GET("/helmValues", a.GetHelmValues)
- router.GET("/status", a.Status)
- // prom query proxies
- router.GET("/prometheusQuery", a.PrometheusQuery)
- router.GET("/prometheusQueryRange", a.PrometheusQueryRange)
- router.GET("/thanosQuery", a.ThanosQuery)
- router.GET("/thanosQueryRange", a.ThanosQueryRange)
- // diagnostics
- router.GET("/diagnostics/requestQueue", a.GetPrometheusQueueState)
- router.GET("/diagnostics/prometheusMetrics", a.GetPrometheusMetrics)
- return a
- }
- // InitializeCloudCost Initializes Cloud Cost pipeline and querier and registers endpoints
- func InitializeCloudCost(router *httprouter.Router, providerConfig models.ProviderConfig) {
- log.Debugf("Cloud Cost config path: %s", env.GetCloudCostConfigPath())
- cloudConfigController := cloudconfig.NewMemoryController(providerConfig)
- repo := cloudcost.NewMemoryRepository()
- cloudCostPipelineService := cloudcost.NewPipelineService(repo, cloudConfigController, cloudcost.DefaultIngestorConfiguration())
- repoQuerier := cloudcost.NewRepositoryQuerier(repo)
- cloudCostQueryService := cloudcost.NewQueryService(repoQuerier, repoQuerier)
- router.GET("/cloud/config/export", cloudConfigController.GetExportConfigHandler())
- router.GET("/cloud/config/enable", cloudConfigController.GetEnableConfigHandler())
- router.GET("/cloud/config/disable", cloudConfigController.GetDisableConfigHandler())
- router.GET("/cloud/config/delete", cloudConfigController.GetDeleteConfigHandler())
- router.GET("/cloudCost", cloudCostQueryService.GetCloudCostHandler())
- router.GET("/cloudCost/view/graph", cloudCostQueryService.GetCloudCostViewGraphHandler())
- router.GET("/cloudCost/view/totals", cloudCostQueryService.GetCloudCostViewTotalsHandler())
- router.GET("/cloudCost/view/table", cloudCostQueryService.GetCloudCostViewTableHandler())
- router.GET("/cloudCost/status", cloudCostPipelineService.GetCloudCostStatusHandler())
- router.GET("/cloudCost/rebuild", cloudCostPipelineService.GetCloudCostRebuildHandler())
- router.GET("/cloudCost/repair", cloudCostPipelineService.GetCloudCostRepairHandler())
- }
- func InitializeCustomCost(router *httprouter.Router) *customcost.PipelineService {
- hourlyRepo := customcost.NewMemoryRepository()
- dailyRepo := customcost.NewMemoryRepository()
- ingConfig := customcost.DefaultIngestorConfiguration()
- var err error
- customCostPipelineService, err := customcost.NewPipelineService(hourlyRepo, dailyRepo, ingConfig)
- if err != nil {
- log.Errorf("error instantiating custom cost pipeline service: %v", err)
- return nil
- }
- customCostQuerier := customcost.NewRepositoryQuerier(hourlyRepo, dailyRepo, ingConfig.HourlyDuration, ingConfig.DailyDuration)
- customCostQueryService := customcost.NewQueryService(customCostQuerier)
- router.GET("/customCost/total", customCostQueryService.GetCustomCostTotalHandler())
- router.GET("/customCost/timeseries", customCostQueryService.GetCustomCostTimeseriesHandler())
- return customCostPipelineService
- }
- func writeErrorResponse(w http.ResponseWriter, code int, message string) {
- out := map[string]string{
- "message": message,
- }
- bytes, err := json.Marshal(out)
- if err != nil {
- w.Header().Set("Content-Type", "text/plain")
- w.WriteHeader(500)
- fmt.Fprint(w, "unable to marshall json for error")
- log.Warnf("Failed to marshall JSON for error response: %s", err.Error())
- return
- }
- w.WriteHeader(code)
- fmt.Fprint(w, string(bytes))
- }
|