Quellcode durchsuchen

Merge branch 'develop' into fix-custom-provider

Jan Lauber vor 2 Jahren
Ursprung
Commit
1bed77aaa3

+ 2 - 1
MAINTAINERS.md

@@ -7,6 +7,7 @@ Official list of [OpenCost Maintainers](https://github.com/orgs/opencost/teams/o
 | Maintainer | GitHub ID | Affiliation | Email |
 | --------------- | --------- | ----------- | ----------- |
 | Ajay Tripathy | @AjayTripathy | Kubecost | <Ajay@kubecost.com> |
+| Alex Meijer | @ameijer | Kubecost | <ameijer@kubecost.com> |
 | Artur Khantimirov | @r2k1 | Microsoft | |
 | Matt Bolt | @​mbolt35 | Kubecost | <matt@kubecost.com> |
 | Matt Ray | @mattray | Kubecost | <mattray@kubecost.com> |
@@ -18,4 +19,4 @@ Official list of [OpenCost Maintainers](https://github.com/orgs/opencost/teams/o
 We would like to acknowledge previous committers and their huge contributions to our collective success:
 | Maintainer | GitHub ID | Affiliation | Email |
 | --------------- | --------- | ----------- | ----------- |
-| Michael Dresser | @michaelmdresser | Kubecost | <michaelmdresser@gmail.com> |
+| Michael Dresser | @michaelmdresser | Kubecost | <michaelmdresser@gmail.com> |

+ 4 - 0
core/pkg/util/buffer.go

@@ -114,6 +114,10 @@ func (b *Buffer) WriteFloat64(i float64) {
 // WriteString writes the string's length as a uint16 followed by the string contents.
 func (b *Buffer) WriteString(i string) {
 	s := stringToBytes(i)
+	// string lengths are limited to uint16 - See ReadString()
+	if len(s) > math.MaxUint16 {
+		s = s[:math.MaxUint16]
+	}
 	write(b.b, uint16(len(s)))
 	b.b.Write(s)
 }

+ 274 - 0
core/pkg/util/buffer_test.go

@@ -0,0 +1,274 @@
+package util
+
+import (
+	"bytes"
+	"math"
+	"math/rand"
+	"testing"
+)
+
+func TestBufferReadWrite(t *testing.T) {
+	buf := NewBuffer()
+
+	buf.WriteBool(true)
+	buf.WriteInt(42)
+	buf.WriteFloat64(3.14)
+	buf.WriteString("Testing, 1, 2, 3!")
+
+	readBuf := NewBufferFromBytes(buf.Bytes())
+
+	boolVal := readBuf.ReadBool()
+	intVal := readBuf.ReadInt()
+	floatVal := readBuf.ReadFloat64()
+	stringVal := readBuf.ReadString()
+
+	if boolVal != true {
+		t.Errorf("Expected bool value to be true, got %v", boolVal)
+	}
+	if intVal != 42 {
+		t.Errorf("Expected int value to be 42, got %v", intVal)
+	}
+	if floatVal != 3.14 {
+		t.Errorf("Expected float value to be 3.14, got %v", floatVal)
+	}
+	if stringVal != "Testing, 1, 2, 3!" {
+		t.Errorf("Expected string value to be 'Hello, World!', got %v", stringVal)
+	}
+}
+
+func TestBufferWriteReadBytes(t *testing.T) {
+	buf := NewBuffer()
+
+	bytesToWrite := []byte{0x01, 0x02, 0x03, 0x04}
+	buf.WriteBytes(bytesToWrite)
+
+	readBuf := NewBufferFromBytes(buf.Bytes())
+	readBytes := readBuf.ReadBytes(len(bytesToWrite))
+
+	if !bytes.Equal(readBytes, bytesToWrite) {
+		t.Errorf("Expected bytes to be %v, got %v", bytesToWrite, readBytes)
+	}
+}
+
+func TestBufferWriteReadUInt64(t *testing.T) {
+	buf := NewBuffer()
+
+	uint64Val := uint64(1234567890)
+	buf.WriteUInt64(uint64Val)
+
+	readBuf := NewBufferFromBytes(buf.Bytes())
+	readUInt64 := readBuf.ReadUInt64()
+
+	if readUInt64 != uint64Val {
+		t.Errorf("Expected uint64 value to be %v, got %v", uint64Val, readUInt64)
+	}
+}
+
+func TestBufferWriteReadFloat32(t *testing.T) {
+	buf := NewBuffer()
+
+	float32Val := float32(3.14159)
+	buf.WriteFloat32(float32Val)
+
+	readBuf := NewBufferFromBytes(buf.Bytes())
+	readFloat32 := readBuf.ReadFloat32()
+
+	if readFloat32 != float32Val {
+		t.Errorf("Expected float32 value to be %v, got %v", float32Val, readFloat32)
+	}
+}
+
+func TestBufferWriteReadInt8(t *testing.T) {
+	buf := NewBuffer()
+
+	int8Val := int8(-42)
+	buf.WriteInt8(int8Val)
+
+	readBuf := NewBufferFromBytes(buf.Bytes())
+	readInt8 := readBuf.ReadInt8()
+
+	if readInt8 != int8Val {
+		t.Errorf("Expected int8 value to be %v, got %v", int8Val, readInt8)
+	}
+}
+
+func TestBufferWriteReadUInt16(t *testing.T) {
+	buf := NewBuffer()
+
+	uint16Val := uint16(65535)
+	buf.WriteUInt16(uint16Val)
+
+	readBuf := NewBufferFromBytes(buf.Bytes())
+	readUInt16 := readBuf.ReadUInt16()
+
+	if readUInt16 != uint16Val {
+		t.Errorf("Expected uint16 value to be %v, got %v", uint16Val, readUInt16)
+	}
+}
+
+func TestBufferWriteReadInt32(t *testing.T) {
+	buf := NewBuffer()
+
+	int32Val := int32(-1234567890)
+	buf.WriteInt32(int32Val)
+
+	readBuf := NewBufferFromBytes(buf.Bytes())
+	readInt32 := readBuf.ReadInt32()
+
+	if readInt32 != int32Val {
+		t.Errorf("Expected int32 value to be %v, got %v", int32Val, readInt32)
+	}
+}
+
+func TestBufferWriteReadUInt8(t *testing.T) {
+	buf := NewBuffer()
+
+	uint8Val := uint8(255)
+	buf.WriteUInt8(uint8Val)
+
+	readBuf := NewBufferFromBytes(buf.Bytes())
+	readUInt8 := readBuf.ReadUInt8()
+
+	if readUInt8 != uint8Val {
+		t.Errorf("Expected uint8 value to be %v, got %v", uint8Val, readUInt8)
+	}
+}
+
+func TestBufferWriteReadInt16(t *testing.T) {
+	buf := NewBuffer()
+
+	int16Val := int16(-32768)
+	buf.WriteInt16(int16Val)
+
+	readBuf := NewBufferFromBytes(buf.Bytes())
+	readInt16 := readBuf.ReadInt16()
+
+	if readInt16 != int16Val {
+		t.Errorf("Expected int16 value to be %v, got %v", int16Val, readInt16)
+	}
+}
+
+func TestBufferWriteReadUInt32(t *testing.T) {
+	buf := NewBuffer()
+
+	uint32Val := uint32(4294967295)
+	buf.WriteUInt32(uint32Val)
+
+	readBuf := NewBufferFromBytes(buf.Bytes())
+	readUInt32 := readBuf.ReadUInt32()
+
+	if readUInt32 != uint32Val {
+		t.Errorf("Expected uint32 value to be %v, got %v", uint32Val, readUInt32)
+	}
+}
+
+func TestBufferWriteReadInt64(t *testing.T) {
+	buf := NewBuffer()
+
+	int64Val := int64(-9223372036854775808)
+	buf.WriteInt64(int64Val)
+
+	readBuf := NewBufferFromBytes(buf.Bytes())
+	readInt64 := readBuf.ReadInt64()
+
+	if readInt64 != int64Val {
+		t.Errorf("Expected int64 value to be %v, got %v", int64Val, readInt64)
+	}
+}
+
+func TestBufferBytes(t *testing.T) {
+	buf := NewBuffer()
+
+	buf.WriteInt(42)
+	buf.WriteFloat64(3.14)
+
+	unreadBytes := buf.Bytes()
+
+	newBuf := NewBufferFromBytes(unreadBytes)
+
+	intVal := newBuf.ReadInt()
+	floatVal := newBuf.ReadFloat64()
+
+	if intVal != 42 {
+		t.Errorf("Expected int value to be 42, got %v", intVal)
+	}
+	if floatVal != 3.14 {
+		t.Errorf("Expected float value to be 3.14, got %v", floatVal)
+	}
+}
+
+func TestBufferNewBufferFrom(t *testing.T) {
+	buf := NewBuffer()
+
+	buf.WriteInt(42)
+	buf.WriteFloat64(3.14)
+
+	newBuf := NewBufferFrom(buf)
+
+	intVal := newBuf.ReadInt()
+	floatVal := newBuf.ReadFloat64()
+
+	if intVal != 42 {
+		t.Errorf("Expected int value to be 42, got %v", intVal)
+	}
+	if floatVal != 3.14 {
+		t.Errorf("Expected float value to be 3.14, got %v", floatVal)
+	}
+}
+
+const letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
+
+func generateRandomString(ln int) string {
+	b := make([]byte, ln)
+	for i := range b {
+		b[i] = letters[rand.Intn(len(letters))]
+	}
+	return string(b)
+}
+
+func TestTooLargeStringTruncate(t *testing.T) {
+	normalStr := generateRandomString(100)
+	bigStr := generateRandomString(math.MaxUint16 + (math.MaxUint16 / 2))
+	expectedBigStrRead := bigStr[:math.MaxUint16]
+
+	otherBigStr := generateRandomString(math.MaxUint16)
+	plusOne := generateRandomString(math.MaxUint16 + 1)
+	expectedPlusOne := plusOne[:math.MaxUint16]
+
+	buf := NewBuffer()
+
+	buf.WriteInt(42)
+	buf.WriteFloat64(3.14)
+	buf.WriteString(normalStr)
+	buf.WriteString(bigStr)
+	buf.WriteString(otherBigStr)
+	buf.WriteString(plusOne)
+
+	readBuf := NewBufferFromBytes(buf.Bytes())
+
+	intVal := readBuf.ReadInt()
+	floatVal := readBuf.ReadFloat64()
+	normalStrRead := readBuf.ReadString()
+	bigStrRead := readBuf.ReadString()
+	otherBigStrRead := readBuf.ReadString()
+	plusOneRead := readBuf.ReadString()
+
+	if intVal != 42 {
+		t.Errorf("Expected int value to be 42, got %v", intVal)
+	}
+	if floatVal != 3.14 {
+		t.Errorf("Expected float value to be 3.14, got %v", floatVal)
+	}
+	if normalStrRead != normalStr {
+		t.Errorf("Expected string value to be %v, got %v", normalStr, normalStrRead)
+	}
+	if bigStrRead != expectedBigStrRead {
+		t.Errorf("Expected large string values to be equivalent!")
+	}
+	if otherBigStrRead != otherBigStr {
+		t.Errorf("Expected large string values to be equivalent!")
+	}
+	if plusOneRead != expectedPlusOne {
+		t.Errorf("Expected large string values to be equivalent!")
+	}
+}

+ 4 - 0
pkg/cloud/aws/athenaquerier.go

@@ -208,6 +208,10 @@ func SelectAWSCategory(providerID, usageType, service string) string {
 	// The node and volume conditions are mutually exclusive.
 	// Provider ID has prefix "i-"
 	if strings.HasPrefix(providerID, "i-") {
+		// GuardDuty has a ProviderID prefix of "i-", but should not be categorized as compute
+		if strings.ToUpper(service) == "AMAZONGUARDDUTY" {
+			return opencost.OtherCategory
+		}
 		return opencost.ComputeCategory
 	}
 	// Provider ID has prefix "vol-"

+ 2 - 3
pkg/cloud/aws/authorizer.go

@@ -15,7 +15,7 @@ import (
 const AccessKeyAuthorizerType = "AWSAccessKey"
 const ServiceAccountAuthorizerType = "AWSServiceAccount"
 const AssumeRoleAuthorizerType = "AWSAssumeRole"
-const WebIdentityAuthorizerType = "WebIdentity"
+const WebIdentityAuthorizerType = "AWSWebIdentity"
 
 // Authorizer implementations provide aws.Config for AWS SDK calls
 type Authorizer interface {
@@ -269,7 +269,7 @@ func (wea *WebIdentity) CreateAWSConfig(region string) (aws.Config, error) {
 }
 
 func (wea *WebIdentity) MarshalJSON() ([]byte, error) {
-	fmap := make(map[string]any, 1)
+	fmap := make(map[string]any, 4)
 	fmap[cloud.AuthorizerTypeProperty] = WebIdentityAuthorizerType
 	fmap["roleARN"] = wea.RoleARN
 	fmap["identityProvider"] = wea.IdentityProvider
@@ -314,7 +314,6 @@ func (wea *WebIdentity) UnmarshalJSON(b []byte) error {
 	switch idp {
 	case "Google":
 		tokenRetriever = &GoogleIDTokenRetriever{}
-
 	}
 
 	err = json.Unmarshal(trb, &tokenRetriever)

+ 1 - 1
pkg/cloud/azure/billingexportparser.go

@@ -285,7 +285,7 @@ func AzureSetProviderID(abv *BillingRowValues) (providerID string, isVMSSShared
 }
 
 func SelectAzureCategory(meterCategory string) string {
-	if meterCategory == "Virtual Machines" {
+	if meterCategory == "Virtual Machines" || meterCategory == "Virtual Machines Licenses" {
 		return opencost.ComputeCategory
 	} else if meterCategory == "Storage" {
 		return opencost.StorageCategory

+ 79 - 35
pkg/cmd/costmodel/costmodel.go

@@ -8,7 +8,9 @@ import (
 	"time"
 
 	"github.com/julienschmidt/httprouter"
-	"github.com/opencost/opencost/pkg/cloudcost"
+	"github.com/opencost/opencost/core/pkg/util/json"
+	"github.com/opencost/opencost/pkg/cloud/models"
+	"github.com/opencost/opencost/pkg/customcost"
 	"github.com/prometheus/client_golang/prometheus/promhttp"
 	"github.com/rs/cors"
 
@@ -36,59 +38,60 @@ func Execute(opts *CostModelOpts) error {
 	log.Infof("Starting cost-model version %s", version.FriendlyVersion())
 	log.Infof("Kubernetes enabled: %t", env.IsKubernetesEnabled())
 
+	router := httprouter.New()
 	var a *costmodel.Accesses
-
+	var cp models.Provider
 	if env.IsKubernetesEnabled() {
-		a = costmodel.Initialize()
+		a = costmodel.Initialize(router)
 		err := StartExportWorker(context.Background(), a.Model)
 		if err != nil {
 			log.Errorf("couldn't start CSV export worker: %v", err)
 		}
-	} else {
-		a = costmodel.InitializeWithoutKubernetes()
-		log.Debugf("Cloud Cost config path: %s", env.GetCloudCostConfigPath())
+
+		// Register OpenCost Specific Endpoints
+		router.GET("/allocation", a.ComputeAllocationHandler)
+		router.GET("/allocation/summary", a.ComputeAllocationHandlerSummary)
+		router.GET("/assets", a.ComputeAssetsHandler)
+		if env.IsCarbonEstimatesEnabled() {
+			router.GET("/assets/carbon", a.ComputeAssetsCarbonHandler)
+		}
+
+		// set cloud provider for cloud cost
+		cp = a.CloudProvider
 	}
 
 	log.Infof("Cloud Costs enabled: %t", env.IsCloudCostEnabled())
 	if env.IsCloudCostEnabled() {
-		repo := cloudcost.NewMemoryRepository()
-		a.CloudCostPipelineService = cloudcost.NewPipelineService(repo, a.CloudConfigController, cloudcost.DefaultIngestorConfiguration())
-		repoQuerier := cloudcost.NewRepositoryQuerier(repo)
-		a.CloudCostQueryService = cloudcost.NewQueryService(repoQuerier, repoQuerier)
+		costmodel.InitializeCloudCost(router, cp)
 	}
 
-	rootMux := http.NewServeMux()
-	a.Router.GET("/healthz", Healthz)
-
-	if env.IsKubernetesEnabled() {
-		a.Router.GET("/allocation", a.ComputeAllocationHandler)
-		a.Router.GET("/allocation/summary", a.ComputeAllocationHandlerSummary)
-		a.Router.GET("/assets", a.ComputeAssetsHandler)
-		if env.IsCarbonEstimatesEnabled() {
-			a.Router.GET("/assets/carbon", a.ComputeAssetsCarbonHandler)
-		}
+	log.Infof("Custom Costs enabled: %t", env.IsCustomCostEnabled())
+	var customCostPipelineService *customcost.PipelineService
+	if env.IsCustomCostEnabled() {
+		customCostPipelineService = costmodel.InitializeCustomCost(router)
 	}
 
-	a.Router.GET("/cloudCost", a.CloudCostQueryService.GetCloudCostHandler())
-	a.Router.GET("/cloudCost/view/graph", a.CloudCostQueryService.GetCloudCostViewGraphHandler())
-	a.Router.GET("/cloudCost/view/totals", a.CloudCostQueryService.GetCloudCostViewTotalsHandler())
-	a.Router.GET("/cloudCost/view/table", a.CloudCostQueryService.GetCloudCostViewTableHandler())
+	// this endpoint is intentionally left out of the "if env.IsCustomCostEnabled()" conditional; in the handler, it is
+	// valid for CustomCostPipelineService to be nil
+	router.GET("/customCost/status", customCostPipelineService.GetCustomCostStatusHandler())
 
-	a.Router.GET("/cloudCost/status", a.CloudCostPipelineService.GetCloudCostStatusHandler())
-	a.Router.GET("/cloudCost/rebuild", a.CloudCostPipelineService.GetCloudCostRebuildHandler())
-	a.Router.GET("/cloudCost/repair", a.CloudCostPipelineService.GetCloudCostRepairHandler())
+	router.GET("/healthz", Healthz)
+
+	router.GET("/logs/level", GetLogLevel)
+	router.POST("/logs/level", SetLogLevel)
 
 	if env.IsPProfEnabled() {
-		a.Router.HandlerFunc(http.MethodGet, "/debug/pprof/", pprof.Index)
-		a.Router.HandlerFunc(http.MethodGet, "/debug/pprof/cmdline", pprof.Cmdline)
-		a.Router.HandlerFunc(http.MethodGet, "/debug/pprof/profile", pprof.Profile)
-		a.Router.HandlerFunc(http.MethodGet, "/debug/pprof/symbol", pprof.Symbol)
-		a.Router.HandlerFunc(http.MethodGet, "/debug/pprof/trace", pprof.Trace)
-		a.Router.Handler(http.MethodGet, "/debug/pprof/goroutine", pprof.Handler("goroutine"))
-		a.Router.Handler(http.MethodGet, "/debug/pprof/heap", pprof.Handler("heap"))
+		router.HandlerFunc(http.MethodGet, "/debug/pprof/", pprof.Index)
+		router.HandlerFunc(http.MethodGet, "/debug/pprof/cmdline", pprof.Cmdline)
+		router.HandlerFunc(http.MethodGet, "/debug/pprof/profile", pprof.Profile)
+		router.HandlerFunc(http.MethodGet, "/debug/pprof/symbol", pprof.Symbol)
+		router.HandlerFunc(http.MethodGet, "/debug/pprof/trace", pprof.Trace)
+		router.Handler(http.MethodGet, "/debug/pprof/goroutine", pprof.Handler("goroutine"))
+		router.Handler(http.MethodGet, "/debug/pprof/heap", pprof.Handler("heap"))
 	}
 
-	rootMux.Handle("/", a.Router)
+	rootMux := http.NewServeMux()
+	rootMux.Handle("/", router)
 	rootMux.Handle("/metrics", promhttp.Handler())
 	telemetryHandler := metrics.ResponseMetricMiddleware(rootMux)
 	handler := cors.AllowAll().Handler(telemetryHandler)
@@ -130,3 +133,44 @@ func StartExportWorker(ctx context.Context, model costmodel.AllocationModel) err
 	}()
 	return nil
 }
+
+type LogLevelRequestResponse struct {
+	Level string `json:"level"`
+}
+
+func GetLogLevel(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
+	w.Header().Set("Content-Type", "application/json")
+	w.Header().Set("Access-Control-Allow-Origin", "*")
+
+	level := log.GetLogLevel()
+	llrr := LogLevelRequestResponse{
+		Level: level,
+	}
+
+	body, err := json.Marshal(llrr)
+	if err != nil {
+		http.Error(w, fmt.Sprintf("unable to retrive log level"), http.StatusInternalServerError)
+		return
+	}
+	_, err = w.Write(body)
+	if err != nil {
+		http.Error(w, fmt.Sprintf("unable to write response: %s", body), http.StatusInternalServerError)
+		return
+	}
+}
+
+func SetLogLevel(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+	params := LogLevelRequestResponse{}
+	err := json.NewDecoder(r.Body).Decode(&params)
+	if err != nil {
+		http.Error(w, fmt.Sprintf("unable to decode request body, error: %s", err), http.StatusBadRequest)
+		return
+	}
+
+	err = log.SetLogLevel(params.Level)
+	if err != nil {
+		http.Error(w, fmt.Sprintf("level must be a valid log level according to zerolog; level given: %s, error: %s", params.Level, err), http.StatusBadRequest)
+		return
+	}
+	w.WriteHeader(http.StatusOK)
+}

+ 113 - 178
pkg/costmodel/router.go

@@ -86,28 +86,22 @@ var (
 // Accesses defines a singleton application instance, providing access to
 // Prometheus, Kubernetes, the cloud provider, and caches.
 type Accesses struct {
-	Router                    *httprouter.Router
-	PrometheusClient          prometheus.Client
-	ThanosClient              prometheus.Client
-	KubeClientSet             kubernetes.Interface
-	ClusterCache              clustercache.ClusterCache
-	ClusterMap                clusters.ClusterMap
-	CloudProvider             models.Provider
-	ConfigFileManager         *config.ConfigFileManager
-	CloudConfigController     *cloudconfig.Controller
-	CloudCostPipelineService  *cloudcost.PipelineService
-	CloudCostQueryService     *cloudcost.QueryService
-	CustomCostQueryService    *customcost.QueryService
-	CustomCostPipelineService *customcost.PipelineService
-	ClusterInfoProvider       clusters.ClusterInfoProvider
-	Model                     *CostModel
-	MetricsEmitter            *CostModelMetricsEmitter
-	OutOfClusterCache         *cache.Cache
-	AggregateCache            *cache.Cache
-	CostDataCache             *cache.Cache
-	ClusterCostsCache         *cache.Cache
-	CacheExpiration           map[time.Duration]time.Duration
-	AggAPI                    Aggregator
+	PrometheusClient    prometheus.Client
+	ThanosClient        prometheus.Client
+	KubeClientSet       kubernetes.Interface
+	ClusterCache        clustercache.ClusterCache
+	ClusterMap          clusters.ClusterMap
+	CloudProvider       models.Provider
+	ConfigFileManager   *config.ConfigFileManager
+	ClusterInfoProvider clusters.ClusterInfoProvider
+	Model               *CostModel
+	MetricsEmitter      *CostModelMetricsEmitter
+	OutOfClusterCache   *cache.Cache
+	AggregateCache      *cache.Cache
+	CostDataCache       *cache.Cache
+	ClusterCostsCache   *cache.Cache
+	CacheExpiration     map[time.Duration]time.Duration
+	AggAPI              Aggregator
 	// SettingsCache stores current state of app settings
 	SettingsCache *cache.Cache
 	// settingsSubscribers tracks channels through which changes to different
@@ -1430,47 +1424,6 @@ func (a *Accesses) Status(w http.ResponseWriter, r *http.Request, _ httprouter.P
 	}
 }
 
-type LogLevelRequestResponse struct {
-	Level string `json:"level"`
-}
-
-func (a *Accesses) GetLogLevel(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
-	w.Header().Set("Content-Type", "application/json")
-	w.Header().Set("Access-Control-Allow-Origin", "*")
-
-	level := log.GetLogLevel()
-	llrr := LogLevelRequestResponse{
-		Level: level,
-	}
-
-	body, err := json.Marshal(llrr)
-	if err != nil {
-		http.Error(w, fmt.Sprintf("unable to retrive log level"), http.StatusInternalServerError)
-		return
-	}
-	_, err = w.Write(body)
-	if err != nil {
-		http.Error(w, fmt.Sprintf("unable to write response: %s", body), http.StatusInternalServerError)
-		return
-	}
-}
-
-func (a *Accesses) SetLogLevel(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
-	params := LogLevelRequestResponse{}
-	err := json.NewDecoder(r.Body).Decode(&params)
-	if err != nil {
-		http.Error(w, fmt.Sprintf("unable to decode request body, error: %s", err), http.StatusBadRequest)
-		return
-	}
-
-	err = log.SetLogLevel(params.Level)
-	if err != nil {
-		http.Error(w, fmt.Sprintf("level must be a valid log level according to zerolog; level given: %s, error: %s", params.Level, err), http.StatusBadRequest)
-		return
-	}
-	w.WriteHeader(http.StatusOK)
-}
-
 // captures the panic event in sentry
 func capturePanicEvent(err string, stack string) {
 	msg := fmt.Sprintf("Panic: %s\nStackTrace: %s\n", err, stack)
@@ -1501,7 +1454,7 @@ func handlePanic(p errors.Panic) bool {
 	return p.Type == errors.PanicTypeHTTP
 }
 
-func Initialize(additionalConfigWatchers ...*watcher.ConfigMapWatcher) *Accesses {
+func Initialize(router *httprouter.Router, additionalConfigWatchers ...*watcher.ConfigMapWatcher) *Accesses {
 	configWatchers := watcher.NewConfigMapWatchers(additionalConfigWatchers...)
 
 	var err error
@@ -1733,25 +1686,23 @@ func Initialize(additionalConfigWatchers ...*watcher.ConfigMapWatcher) *Accesses
 	metricsEmitter := NewCostModelMetricsEmitter(promCli, k8sCache, cloudProvider, clusterInfoProvider, costModel)
 
 	a := &Accesses{
-		Router:                httprouter.New(),
-		PrometheusClient:      promCli,
-		ThanosClient:          thanosClient,
-		KubeClientSet:         kubeClientset,
-		ClusterCache:          k8sCache,
-		ClusterMap:            clusterMap,
-		CloudProvider:         cloudProvider,
-		CloudConfigController: cloudconfig.NewController(cloudProvider),
-		ConfigFileManager:     confManager,
-		ClusterInfoProvider:   clusterInfoProvider,
-		Model:                 costModel,
-		MetricsEmitter:        metricsEmitter,
-		AggregateCache:        aggregateCache,
-		CostDataCache:         costDataCache,
-		ClusterCostsCache:     clusterCostsCache,
-		OutOfClusterCache:     outOfClusterCache,
-		SettingsCache:         settingsCache,
-		CacheExpiration:       cacheExpiration,
-		httpServices:          services.NewCostModelServices(),
+		httpServices:        services.NewCostModelServices(),
+		PrometheusClient:    promCli,
+		ThanosClient:        thanosClient,
+		KubeClientSet:       kubeClientset,
+		ClusterCache:        k8sCache,
+		ClusterMap:          clusterMap,
+		CloudProvider:       cloudProvider,
+		ConfigFileManager:   confManager,
+		ClusterInfoProvider: clusterInfoProvider,
+		Model:               costModel,
+		MetricsEmitter:      metricsEmitter,
+		AggregateCache:      aggregateCache,
+		CostDataCache:       costDataCache,
+		ClusterCostsCache:   clusterCostsCache,
+		OutOfClusterCache:   outOfClusterCache,
+		SettingsCache:       settingsCache,
+		CacheExpiration:     cacheExpiration,
 	}
 
 	// Use the Accesses instance, itself, as the CostModelAggregator. This is
@@ -1779,120 +1730,104 @@ func Initialize(additionalConfigWatchers ...*watcher.ConfigMapWatcher) *Accesses
 		a.MetricsEmitter.Start()
 	}
 
-	log.Infof("Custom Costs enabled: %t", env.IsCustomCostEnabled())
-	if env.IsCustomCostEnabled() {
-		hourlyRepo := customcost.NewMemoryRepository()
-		dailyRepo := customcost.NewMemoryRepository()
-		ingConfig := customcost.DefaultIngestorConfiguration()
-		var err error
-		a.CustomCostPipelineService, err = customcost.NewPipelineService(hourlyRepo, dailyRepo, ingConfig)
-		if err != nil {
-			log.Errorf("error instantiating custom cost pipeline service: %v", err)
-			return nil
-		}
-
-		customCostQuerier := customcost.NewRepositoryQuerier(hourlyRepo, dailyRepo, ingConfig.HourlyDuration, ingConfig.DailyDuration)
-		a.CustomCostQueryService = customcost.NewQueryService(customCostQuerier)
-	}
-
-	a.Router.GET("/costDataModel", a.CostDataModel)
-	a.Router.GET("/costDataModelRange", a.CostDataModelRange)
-	a.Router.GET("/aggregatedCostModel", a.AggregateCostModelHandler)
-	a.Router.GET("/allocation/compute", a.ComputeAllocationHandler)
-	a.Router.GET("/allocation/compute/summary", a.ComputeAllocationHandlerSummary)
-	a.Router.GET("/allNodePricing", a.GetAllNodePricing)
-	a.Router.POST("/refreshPricing", a.RefreshPricingData)
-	a.Router.GET("/clusterCostsOverTime", a.ClusterCostsOverTime)
-	a.Router.GET("/clusterCosts", a.ClusterCosts)
-	a.Router.GET("/clusterCostsFromCache", a.ClusterCostsFromCacheHandler)
-	a.Router.GET("/validatePrometheus", a.GetPrometheusMetadata)
-	a.Router.GET("/managementPlatform", a.ManagementPlatform)
-	a.Router.GET("/clusterInfo", a.ClusterInfo)
-	a.Router.GET("/clusterInfoMap", a.GetClusterInfoMap)
-	a.Router.GET("/serviceAccountStatus", a.GetServiceAccountStatus)
-	a.Router.GET("/pricingSourceStatus", a.GetPricingSourceStatus)
-	a.Router.GET("/pricingSourceSummary", a.GetPricingSourceSummary)
-	a.Router.GET("/pricingSourceCounts", a.GetPricingSourceCounts)
+	a.httpServices.RegisterAll(router)
+
+	router.GET("/costDataModel", a.CostDataModel)
+	router.GET("/costDataModelRange", a.CostDataModelRange)
+	router.GET("/aggregatedCostModel", a.AggregateCostModelHandler)
+	router.GET("/allocation/compute", a.ComputeAllocationHandler)
+	router.GET("/allocation/compute/summary", a.ComputeAllocationHandlerSummary)
+	router.GET("/allNodePricing", a.GetAllNodePricing)
+	router.POST("/refreshPricing", a.RefreshPricingData)
+	router.GET("/clusterCostsOverTime", a.ClusterCostsOverTime)
+	router.GET("/clusterCosts", a.ClusterCosts)
+	router.GET("/clusterCostsFromCache", a.ClusterCostsFromCacheHandler)
+	router.GET("/validatePrometheus", a.GetPrometheusMetadata)
+	router.GET("/managementPlatform", a.ManagementPlatform)
+	router.GET("/clusterInfo", a.ClusterInfo)
+	router.GET("/clusterInfoMap", a.GetClusterInfoMap)
+	router.GET("/serviceAccountStatus", a.GetServiceAccountStatus)
+	router.GET("/pricingSourceStatus", a.GetPricingSourceStatus)
+	router.GET("/pricingSourceSummary", a.GetPricingSourceSummary)
+	router.GET("/pricingSourceCounts", a.GetPricingSourceCounts)
 
 	// endpoints migrated from server
-	a.Router.GET("/allPersistentVolumes", a.GetAllPersistentVolumes)
-	a.Router.GET("/allDeployments", a.GetAllDeployments)
-	a.Router.GET("/allStorageClasses", a.GetAllStorageClasses)
-	a.Router.GET("/allStatefulSets", a.GetAllStatefulSets)
-	a.Router.GET("/allNodes", a.GetAllNodes)
-	a.Router.GET("/allPods", a.GetAllPods)
-	a.Router.GET("/allNamespaces", a.GetAllNamespaces)
-	a.Router.GET("/allDaemonSets", a.GetAllDaemonSets)
-	a.Router.GET("/pod/:namespace/:name", a.GetPod)
-	a.Router.GET("/prometheusRecordingRules", a.PrometheusRecordingRules)
-	a.Router.GET("/prometheusConfig", a.PrometheusConfig)
-	a.Router.GET("/prometheusTargets", a.PrometheusTargets)
-	a.Router.GET("/orphanedPods", a.GetOrphanedPods)
-	a.Router.GET("/installNamespace", a.GetInstallNamespace)
-	a.Router.GET("/installInfo", a.GetInstallInfo)
-	a.Router.GET("/podLogs", a.GetPodLogs)
-	a.Router.POST("/serviceKey", a.AddServiceKey)
-	a.Router.GET("/helmValues", a.GetHelmValues)
-	a.Router.GET("/status", a.Status)
+	router.GET("/allPersistentVolumes", a.GetAllPersistentVolumes)
+	router.GET("/allDeployments", a.GetAllDeployments)
+	router.GET("/allStorageClasses", a.GetAllStorageClasses)
+	router.GET("/allStatefulSets", a.GetAllStatefulSets)
+	router.GET("/allNodes", a.GetAllNodes)
+	router.GET("/allPods", a.GetAllPods)
+	router.GET("/allNamespaces", a.GetAllNamespaces)
+	router.GET("/allDaemonSets", a.GetAllDaemonSets)
+	router.GET("/pod/:namespace/:name", a.GetPod)
+	router.GET("/prometheusRecordingRules", a.PrometheusRecordingRules)
+	router.GET("/prometheusConfig", a.PrometheusConfig)
+	router.GET("/prometheusTargets", a.PrometheusTargets)
+	router.GET("/orphanedPods", a.GetOrphanedPods)
+	router.GET("/installNamespace", a.GetInstallNamespace)
+	router.GET("/installInfo", a.GetInstallInfo)
+	router.GET("/podLogs", a.GetPodLogs)
+	router.POST("/serviceKey", a.AddServiceKey)
+	router.GET("/helmValues", a.GetHelmValues)
+	router.GET("/status", a.Status)
 
 	// prom query proxies
-	a.Router.GET("/prometheusQuery", a.PrometheusQuery)
-	a.Router.GET("/prometheusQueryRange", a.PrometheusQueryRange)
-	a.Router.GET("/thanosQuery", a.ThanosQuery)
-	a.Router.GET("/thanosQueryRange", a.ThanosQueryRange)
+	router.GET("/prometheusQuery", a.PrometheusQuery)
+	router.GET("/prometheusQueryRange", a.PrometheusQueryRange)
+	router.GET("/thanosQuery", a.ThanosQuery)
+	router.GET("/thanosQueryRange", a.ThanosQueryRange)
 
 	// diagnostics
-	a.Router.GET("/diagnostics/requestQueue", a.GetPrometheusQueueState)
-	a.Router.GET("/diagnostics/prometheusMetrics", a.GetPrometheusMetrics)
+	router.GET("/diagnostics/requestQueue", a.GetPrometheusQueueState)
+	router.GET("/diagnostics/prometheusMetrics", a.GetPrometheusMetrics)
 
-	a.Router.GET("/logs/level", a.GetLogLevel)
-	a.Router.POST("/logs/level", a.SetLogLevel)
+	return a
+}
 
-	a.Router.GET("/cloud/config/export", a.CloudConfigController.GetExportConfigHandler())
-	a.Router.GET("/cloud/config/enable", a.CloudConfigController.GetEnableConfigHandler())
-	a.Router.GET("/cloud/config/disable", a.CloudConfigController.GetDisableConfigHandler())
-	a.Router.GET("/cloud/config/delete", a.CloudConfigController.GetDeleteConfigHandler())
+// InitializeCloudCost Initializes Cloud Cost pipeline and querier and registers endpoints
+func InitializeCloudCost(router *httprouter.Router, cp models.Provider) {
+	log.Debugf("Cloud Cost config path: %s", env.GetCloudCostConfigPath())
+	cloudConfigController := cloudconfig.NewController(cp)
 
-	if env.IsCustomCostEnabled() {
-		a.Router.GET("/customCost/total", a.CustomCostQueryService.GetCustomCostTotalHandler())
-		a.Router.GET("/customCost/timeseries", a.CustomCostQueryService.GetCustomCostTimeseriesHandler())
-	}
+	repo := cloudcost.NewMemoryRepository()
+	cloudCostPipelineService := cloudcost.NewPipelineService(repo, cloudConfigController, cloudcost.DefaultIngestorConfiguration())
+	repoQuerier := cloudcost.NewRepositoryQuerier(repo)
+	cloudCostQueryService := cloudcost.NewQueryService(repoQuerier, repoQuerier)
 
-	// this endpoint is intentionally left out of the "if env.IsCustomCostEnabled()" conditional; in the handler, it is
-	// valid for CustomCostPipelineService to be nil
-	a.Router.GET("/customCost/status", a.CustomCostPipelineService.GetCustomCostStatusHandler())
+	router.GET("/cloud/config/export", cloudConfigController.GetExportConfigHandler())
+	router.GET("/cloud/config/enable", cloudConfigController.GetEnableConfigHandler())
+	router.GET("/cloud/config/disable", cloudConfigController.GetDisableConfigHandler())
+	router.GET("/cloud/config/delete", cloudConfigController.GetDeleteConfigHandler())
 
-	a.httpServices.RegisterAll(a.Router)
+	router.GET("/cloudCost", cloudCostQueryService.GetCloudCostHandler())
+	router.GET("/cloudCost/view/graph", cloudCostQueryService.GetCloudCostViewGraphHandler())
+	router.GET("/cloudCost/view/totals", cloudCostQueryService.GetCloudCostViewTotalsHandler())
+	router.GET("/cloudCost/view/table", cloudCostQueryService.GetCloudCostViewTableHandler())
 
-	return a
+	router.GET("/cloudCost/status", cloudCostPipelineService.GetCloudCostStatusHandler())
+	router.GET("/cloudCost/rebuild", cloudCostPipelineService.GetCloudCostRebuildHandler())
+	router.GET("/cloudCost/repair", cloudCostPipelineService.GetCloudCostRepairHandler())
 }
 
-func InitializeWithoutKubernetes() *Accesses {
+func InitializeCustomCost(router *httprouter.Router) *customcost.PipelineService {
+	hourlyRepo := customcost.NewMemoryRepository()
+	dailyRepo := customcost.NewMemoryRepository()
+	ingConfig := customcost.DefaultIngestorConfiguration()
 	var err error
-	if errorReportingEnabled {
-		err = sentry.Init(sentry.ClientOptions{Release: version.FriendlyVersion()})
-		if err != nil {
-			log.Infof("Failed to initialize sentry for error reporting")
-		} else {
-			err = errors.SetPanicHandler(handlePanic)
-			if err != nil {
-				log.Infof("Failed to set panic handler: %s", err)
-			}
-		}
-	}
-
-	a := &Accesses{
-		Router:                httprouter.New(),
-		CloudConfigController: cloudconfig.NewController(nil),
-		httpServices:          services.NewCostModelServices(),
+	customCostPipelineService, err := customcost.NewPipelineService(hourlyRepo, dailyRepo, ingConfig)
+	if err != nil {
+		log.Errorf("error instantiating custom cost pipeline service: %v", err)
+		return nil
 	}
 
-	a.Router.GET("/logs/level", a.GetLogLevel)
-	a.Router.POST("/logs/level", a.SetLogLevel)
+	customCostQuerier := customcost.NewRepositoryQuerier(hourlyRepo, dailyRepo, ingConfig.HourlyDuration, ingConfig.DailyDuration)
+	customCostQueryService := customcost.NewQueryService(customCostQuerier)
 
-	a.httpServices.RegisterAll(a.Router)
+	router.GET("/customCost/total", customCostQueryService.GetCustomCostTotalHandler())
+	router.GET("/customCost/timeseries", customCostQueryService.GetCustomCostTimeseriesHandler())
 
-	return a
+	return customCostPipelineService
 }
 
 func writeErrorResponse(w http.ResponseWriter, code int, message string) {

+ 1 - 1
pkg/storage/prefixedbucketstorage.go

@@ -32,7 +32,7 @@ func validPrefix(prefix string) bool {
 }
 
 func conditionalPrefix(prefix, name string) string {
-	if len(name) > 0 {
+	if len(name) > 0 && !strings.HasPrefix(name, prefix) {
 		return withPrefix(prefix, name)
 	}
 

+ 7 - 7
pkg/storage/s3storage.go

@@ -347,7 +347,7 @@ func (s3 *S3Storage) FullPath(name string) string {
 func (s3 *S3Storage) Read(name string) ([]byte, error) {
 	name = trimLeading(name)
 
-	log.Debugf("S3Storage::Read(%s)", name)
+	log.Tracef("S3Storage::Read(%s)", name)
 	ctx := context.Background()
 
 	return s3.getRange(ctx, name, 0, -1)
@@ -357,7 +357,7 @@ func (s3 *S3Storage) Read(name string) ([]byte, error) {
 // Exists checks if the given object exists.
 func (s3 *S3Storage) Exists(name string) (bool, error) {
 	name = trimLeading(name)
-	//log.Debugf("S3Storage::Exists(%s)", name)
+	log.Tracef("S3Storage::Exists(%s)", name)
 
 	ctx := context.Background()
 
@@ -376,7 +376,7 @@ func (s3 *S3Storage) Exists(name string) (bool, error) {
 func (s3 *S3Storage) Write(name string, data []byte) error {
 	name = trimLeading(name)
 
-	log.Debugf("S3Storage::Write(%s)", name)
+	log.Tracef("S3Storage::Write(%s)", name)
 
 	ctx := context.Background()
 	sse, err := s3.getServerSideEncryption(ctx)
@@ -410,7 +410,7 @@ func (s3 *S3Storage) Write(name string, data []byte) error {
 func (s3 *S3Storage) Stat(name string) (*StorageInfo, error) {
 	name = trimLeading(name)
 
-	//log.Debugf("S3Storage::Stat(%s)", name)
+	log.Tracef("S3Storage::Stat(%s)", name)
 	ctx := context.Background()
 
 	objInfo, err := s3.client.StatObject(ctx, s3.name, name, minio.StatObjectOptions{})
@@ -432,7 +432,7 @@ func (s3 *S3Storage) Stat(name string) (*StorageInfo, error) {
 func (s3 *S3Storage) Remove(name string) error {
 	name = trimLeading(name)
 
-	log.Debugf("S3Storage::Remove(%s)", name)
+	log.Tracef("S3Storage::Remove(%s)", name)
 	ctx := context.Background()
 
 	return s3.client.RemoveObject(ctx, s3.name, name, minio.RemoveObjectOptions{})
@@ -441,7 +441,7 @@ func (s3 *S3Storage) Remove(name string) error {
 func (s3 *S3Storage) List(path string) ([]*StorageInfo, error) {
 	path = trimLeading(path)
 
-	log.Debugf("S3Storage::List(%s)", path)
+	log.Tracef("S3Storage::List(%s)", path)
 	ctx := context.Background()
 
 	// Ensure the object name actually ends with a dir suffix. Otherwise we'll just iterate the
@@ -488,7 +488,7 @@ func (s3 *S3Storage) List(path string) ([]*StorageInfo, error) {
 func (s3 *S3Storage) ListDirectories(path string) ([]*StorageInfo, error) {
 	path = trimLeading(path)
 
-	log.Debugf("S3Storage::List(%s)", path)
+	log.Tracef("S3Storage::List(%s)", path)
 	ctx := context.Background()
 
 	if path != "" {

+ 3 - 0
pkg/storage/storage.go

@@ -4,6 +4,7 @@ import (
 	"os"
 	"time"
 
+	"github.com/opencost/opencost/core/pkg/log"
 	"github.com/pkg/errors"
 )
 
@@ -63,6 +64,8 @@ func Validate(storage Storage) error {
 	const testPath = "tmp/test.txt"
 	const testContent = "test"
 
+	log.Debug("validating storage")
+
 	// attempt to read a path
 	_, err := storage.Exists(testPath)
 	if err != nil {