Selaa lähdekoodia

Merge pull request #1178 from kubecost/ramich-logging

Drop klog and migrate to logging package
Dan Ramich 4 vuotta sitten
vanhempi
sitoutus
98da7cde21
41 muutettua tiedostoa jossa 540 lisäystä ja 511 poistoa
  1. 2 5
      cmd/costmodel/main.go
  2. 15 7
      go.mod
  3. 21 21
      go.sum
  4. 40 43
      pkg/cloud/awsprovider.go
  5. 14 16
      pkg/cloud/azureprovider.go
  6. 40 37
      pkg/cloud/gcpprovider.go
  7. 6 8
      pkg/cloud/provider.go
  8. 6 8
      pkg/cloud/providerconfig.go
  9. 3 3
      pkg/clustercache/clustercache.go
  10. 2 2
      pkg/clustercache/clusterexporter.go
  11. 3 3
      pkg/clustercache/clusterimporter.go
  12. 6 6
      pkg/clustercache/watchcontroller.go
  13. 3 4
      pkg/cmd/agent/agent.go
  14. 17 11
      pkg/cmd/commands.go
  15. 2 2
      pkg/config/configfile.go
  16. 2 2
      pkg/config/configmanager.go
  17. 31 32
      pkg/costmodel/aggregation.go
  18. 37 37
      pkg/costmodel/allocation.go
  19. 25 26
      pkg/costmodel/cluster.go
  20. 17 17
      pkg/costmodel/cluster_helpers.go
  21. 5 6
      pkg/costmodel/clusterinfo.go
  22. 2 2
      pkg/costmodel/clusters/clustermap.go
  23. 89 90
      pkg/costmodel/costmodel.go
  24. 22 24
      pkg/costmodel/metrics.go
  25. 1 1
      pkg/costmodel/promparsers.go
  26. 31 32
      pkg/costmodel/router.go
  27. 4 5
      pkg/costmodel/settings.go
  28. 2 3
      pkg/costmodel/sql.go
  29. 1 1
      pkg/env/costmodelenv.go
  30. 12 12
      pkg/kubecost/allocation.go
  31. 1 1
      pkg/kubecost/allocationprops.go
  32. 15 15
      pkg/kubecost/asset.go
  33. 4 4
      pkg/kubecost/summaryallocation.go
  34. 43 9
      pkg/log/log.go
  35. 2 2
      pkg/prom/query.go
  36. 2 2
      pkg/prom/result.go
  37. 5 5
      pkg/services/clusters/clustermanager.go
  38. 2 2
      pkg/services/clusters/clustersendpoints.go
  39. 2 2
      pkg/services/clusterservice.go
  40. 1 1
      pkg/services/services.go
  41. 2 2
      pkg/util/watcher/configwatchers.go

+ 2 - 5
cmd/costmodel/main.go

@@ -1,17 +1,14 @@
 package main
 
 import (
-	"os"
-
 	"github.com/kubecost/cost-model/pkg/cmd"
-	"k8s.io/klog"
+	"github.com/rs/zerolog/log"
 )
 
 func main() {
 	// runs the appropriate application mode using the default cost-model command
 	// see: github.com/kubecost/cost-model/pkg/cmd package for details
 	if err := cmd.Execute(nil); err != nil {
-		klog.Fatal(err)
-		os.Exit(1)
+		log.Fatal().Err(err)
 	}
 }

+ 15 - 7
go.mod

@@ -32,8 +32,9 @@ require (
 	github.com/prometheus/client_golang v1.0.0
 	github.com/prometheus/client_model v0.2.0
 	github.com/rs/cors v1.7.0
+	github.com/rs/zerolog v1.26.1
 	github.com/spf13/cobra v1.2.1
-	github.com/spf13/pflag v1.0.5
+	github.com/spf13/viper v1.8.1
 	go.etcd.io/bbolt v1.3.5
 	golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602
 	golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
@@ -42,7 +43,6 @@ require (
 	k8s.io/api v0.20.4
 	k8s.io/apimachinery v0.20.4
 	k8s.io/client-go v0.20.4
-	k8s.io/klog v0.4.0
 	sigs.k8s.io/yaml v1.2.0
 )
 
@@ -51,7 +51,6 @@ require (
 	github.com/Azure/go-autorest/autorest/adal v0.9.18 // indirect
 	github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 // indirect
 	github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
-	github.com/Azure/go-autorest/autorest/mocks v0.4.2 // indirect
 	github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect
 	github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect
 	github.com/Azure/go-autorest/logger v0.2.1 // indirect
@@ -70,7 +69,7 @@ require (
 	github.com/beorn7/perks v1.0.0 // indirect
 	github.com/dimchansky/utfbom v1.1.1 // indirect
 	github.com/dustin/go-humanize v1.0.0 // indirect
-	github.com/form3tech-oss/jwt-go v3.2.2+incompatible // indirect
+	github.com/fsnotify/fsnotify v1.4.9 // indirect
 	github.com/go-logr/logr v0.2.0 // indirect
 	github.com/gofrs/uuid v4.2.0+incompatible // indirect
 	github.com/gogo/protobuf v1.3.2 // indirect
@@ -83,34 +82,43 @@ require (
 	github.com/googleapis/gnostic v0.4.1 // indirect
 	github.com/gorilla/css v1.0.0 // indirect
 	github.com/hashicorp/golang-lru v0.5.1 // indirect
+	github.com/hashicorp/hcl v1.0.0 // indirect
 	github.com/imdario/mergo v0.3.5 // indirect
 	github.com/inconshreveable/mousetrap v1.0.0 // indirect
 	github.com/jmespath/go-jmespath v0.4.0 // indirect
 	github.com/jstemmer/go-junit-report v0.9.1 // indirect
 	github.com/klauspost/compress v1.13.5 // indirect
 	github.com/klauspost/cpuid v1.3.1 // indirect
+	github.com/magiconair/properties v1.8.5 // indirect
 	github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
 	github.com/minio/md5-simd v1.1.0 // indirect
 	github.com/minio/sha256-simd v0.1.1 // indirect
 	github.com/mitchellh/go-homedir v1.1.0 // indirect
+	github.com/mitchellh/mapstructure v1.4.1 // indirect
 	github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
 	github.com/modern-go/reflect2 v1.0.2 // indirect
 	github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
+	github.com/pelletier/go-toml v1.9.3 // indirect
 	github.com/prometheus/common v0.4.1 // indirect
 	github.com/prometheus/procfs v0.0.2 // indirect
-	github.com/rs/xid v1.2.1 // indirect
+	github.com/rs/xid v1.3.0 // indirect
 	github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 // indirect
 	github.com/sirupsen/logrus v1.8.1 // indirect
+	github.com/spf13/afero v1.6.0 // indirect
+	github.com/spf13/cast v1.3.1 // indirect
+	github.com/spf13/jwalterweatherman v1.1.0 // indirect
+	github.com/spf13/pflag v1.0.5 // indirect
+	github.com/subosito/gotenv v1.2.0 // indirect
 	go.opencensus.io v0.23.0 // indirect
 	golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 // indirect
 	golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect
 	golang.org/x/mod v0.4.2 // indirect
 	golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 // indirect
-	golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 // indirect
+	golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e // indirect
 	golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 // indirect
 	golang.org/x/text v0.3.6 // indirect
 	golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e // indirect
-	golang.org/x/tools v0.1.2 // indirect
+	golang.org/x/tools v0.1.7 // indirect
 	golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
 	google.golang.org/appengine v1.6.7 // indirect
 	google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c // indirect

+ 21 - 21
go.sum

@@ -46,29 +46,20 @@ github.com/Azure/azure-sdk-for-go v61.6.0+incompatible/go.mod h1:9XXNKU+eRnpl9mo
 github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
 github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
 github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
-github.com/Azure/go-autorest/autorest v0.11.17 h1:2zCdHwNgRH+St1J+ZMf66xI8aLr/5KMy+wWLH97zwYM=
-github.com/Azure/go-autorest/autorest v0.11.17/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw=
 github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc=
 github.com/Azure/go-autorest/autorest v0.11.27 h1:F3R3q42aWytozkV8ihzcgMO4OA4cuqr3bNlsEuF6//A=
 github.com/Azure/go-autorest/autorest v0.11.27/go.mod h1:7l8ybrIdUmGqZMTD0sRtAr8NvbHjfofbf8RSP2q7w7U=
 github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
 github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
-github.com/Azure/go-autorest/autorest/adal v0.9.10 h1:r6fZHMaHD8B6LDCn0o5vyBFHIHrM6Ywwx7mb49lPItI=
-github.com/Azure/go-autorest/autorest/adal v0.9.10/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
 github.com/Azure/go-autorest/autorest/adal v0.9.18 h1:kLnPsRjzZZUF3K5REu/Kc+qMQrvuza2bwSnNdhmzLfQ=
 github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ=
-github.com/Azure/go-autorest/autorest/azure/auth v0.5.6 h1:cgiBtUxatlt/e3qY6fQJioqbocWHr5osz259MomF5M0=
-github.com/Azure/go-autorest/autorest/azure/auth v0.5.6/go.mod h1:nYlP+G+n8MhD5CjIi6W8nFTIJn/PnTHes5nUbK6BxD0=
 github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 h1:P6bYXFoao05z5uhOQzbC3Qd8JqF3jUoocoTeIxkp2cA=
 github.com/Azure/go-autorest/autorest/azure/auth v0.5.11/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg=
-github.com/Azure/go-autorest/autorest/azure/cli v0.4.2 h1:dMOmEJfkLKW/7JsokJqkyoYSgmR08hi9KrhjZb+JALY=
-github.com/Azure/go-autorest/autorest/azure/cli v0.4.2/go.mod h1:7qkJkT+j6b+hIpzMOwPChJhTqS8VbsqqgULzMNRugoM=
 github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 h1:0W/yGmFdTIT77fvdlGZ0LMISoLHFJ7Tx4U0yeB+uFs4=
 github.com/Azure/go-autorest/autorest/azure/cli v0.4.5/go.mod h1:ADQAXrkgm7acgWVUNamOgh8YNrv4p27l3Wc55oVfpzg=
 github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
 github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
 github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
-github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk=
 github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
 github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw=
 github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU=
@@ -76,7 +67,6 @@ github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+X
 github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE=
 github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac=
 github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E=
-github.com/Azure/go-autorest/logger v0.2.0 h1:e4RVHVZKC5p6UANLJHkM4OfR1UKZPj8Wt8Pcx+3oqrE=
 github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
 github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg=
 github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
@@ -169,7 +159,6 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
 github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4=
 github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
 github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
-github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
 github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U=
 github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE=
 github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
@@ -192,9 +181,9 @@ github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod
 github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
 github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
 github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4/go.mod h1:T9YF2M40nIgbVgp3rreNmTged+9HrbNTIQf1PsaIiTA=
-github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk=
 github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
 github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
 github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
 github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc=
 github.com/getsentry/sentry-go v0.6.1 h1:K84dY1/57OtWhdyr5lbU78Q/+qgzkEyGc/ud+Sipi5k=
@@ -340,6 +329,7 @@ github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA
 github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
 github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
 github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
 github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
 github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
 github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
@@ -410,6 +400,7 @@ github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL
 github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0=
 github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
 github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
+github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls=
 github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
 github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
 github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
@@ -443,6 +434,7 @@ github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS4
 github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
 github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
 github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag=
 github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
 github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
 github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
@@ -471,6 +463,7 @@ github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FI
 github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
 github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
 github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
+github.com/pelletier/go-toml v1.9.3 h1:zeC5b1GviRUyKYd6OJPvBU/mcVDVoL1OhT17FCt5dSQ=
 github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
 github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
 github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=
@@ -500,8 +493,11 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L
 github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
 github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
 github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
-github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc=
 github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
+github.com/rs/xid v1.3.0 h1:6NjYksEUlhurdVehpc7S7dk6DAmcKv8V9gG0FsVN2U4=
+github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
+github.com/rs/zerolog v1.26.1 h1:/ihwxqH+4z8UxyI70wM1z9yCvkWcfz/a3mj48k/Zngc=
+github.com/rs/zerolog v1.26.1/go.mod h1:/wSSJWX7lVrsOwlbyTRSOJvqRlc+WjWlfes+CiJ+tmc=
 github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
 github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
 github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
@@ -520,19 +516,23 @@ github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIK
 github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
 github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
 github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
+github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY=
 github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
 github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
 github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
 github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
 github.com/spf13/cobra v1.2.1 h1:+KmjbUw1hriSNMF55oPrkZcb27aECyrj8V2ytv7kWDw=
 github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk=
 github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
+github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
 github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
 github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
 github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
 github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
 github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
 github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
+github.com/spf13/viper v1.8.1 h1:Kq1fyeebqsBfbjZj4EL7gj2IO0mMaiyjYUWcUsl2O44=
 github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
 github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
 github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
@@ -544,6 +544,7 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5
 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
 github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
 github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
 github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
 github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
 github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
@@ -567,6 +568,7 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
 github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
 github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
 github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
 go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0=
 go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
 go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
@@ -596,10 +598,9 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
 golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
 golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
 golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
-golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad h1:DN0cp81fZ3njFcrLCytUHRSUkqBjfTo4Tx9RJTWs0EY=
-golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
 golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
 golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.0.0-20211215165025-cf75a172585e/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
 golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 h1:kUhD7nTDoI3fVd9G4ORWrbV5NY0liEs/Jg2pv5f+bBA=
 golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
 golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -681,8 +682,8 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v
 golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
 golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
 golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
-golang.org/x/net v0.0.0-20210614182718-04defd469f4e h1:XpT3nA5TvE525Ne3hInMh6+GETgn27Zfm9dxsThnX2Q=
 golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
 golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 h1:CIJ76btIcR3eFI5EgSo6k1qKw9KJexJuRLI9G7Hp5wE=
 golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
 golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@@ -762,10 +763,10 @@ golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7w
 golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210510120138-977fb7262007 h1:gG67DSER+11cZvqIMb8S8bt0vZtiN6xWYARwirrOSfE=
 golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 h1:SrN+KX8Art/Sf4HNj6Zcz06G7VEz+7w9tdXTPOZ7+l4=
 golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e h1:WUoyKPm6nCo1BnNUvPGnFG3T5DUVem42yDJZZ4CNxMA=
+golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
 golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
 golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
@@ -837,8 +838,9 @@ golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4f
 golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
 golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
 golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
-golang.org/x/tools v0.1.2 h1:kRBLX7v7Af8W7Gdbbc908OJcdgtK8bOz9Uaj8/F1ACA=
 golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.7 h1:6j8CgantCy3yc8JGBqkDLMKWqZ0RDU2g1HVgacojGWQ=
+golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
 golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -992,8 +994,6 @@ k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRp
 k8s.io/client-go v0.20.4 h1:85crgh1IotNkLpKYKZHVNI1JT86nr/iDCvq2iWKsql4=
 k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k=
 k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
-k8s.io/klog v0.4.0 h1:lCJCxf/LIowc2IGS9TPjWDyXY4nOmdGdfcwwDQCOURQ=
-k8s.io/klog v0.4.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
 k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
 k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ=
 k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=

+ 40 - 43
pkg/cloud/awsprovider.go

@@ -6,7 +6,6 @@ import (
 	"context"
 	"encoding/csv"
 	"fmt"
-
 	"io"
 	"io/ioutil"
 	"net/http"
@@ -16,8 +15,6 @@ import (
 	"sync"
 	"time"
 
-	"k8s.io/klog"
-
 	"github.com/kubecost/cost-model/pkg/clustercache"
 	"github.com/kubecost/cost-model/pkg/env"
 	"github.com/kubecost/cost-model/pkg/errors"
@@ -615,7 +612,7 @@ func (k *awsKey) ID() string {
 			return group
 		}
 	}
-	klog.V(3).Infof("Could not find instance ID in \"%s\"", k.ProviderID)
+	log.Warnf("Could not find instance ID in \"%s\"", k.ProviderID)
 	return ""
 }
 
@@ -640,7 +637,7 @@ func (k *awsKey) Features() string {
 func (aws *AWS) PVPricing(pvk PVKey) (*PV, error) {
 	pricing, ok := aws.Pricing[pvk.Features()]
 	if !ok {
-		klog.V(4).Infof("Persistent Volume pricing not found for %s: %s", pvk.GetStorageClass(), pvk.Features())
+		log.Debugf("Persistent Volume pricing not found for %s: %s", pvk.GetStorageClass(), pvk.Features())
 		return &PV{}, nil
 	}
 	return pricing.PV, nil
@@ -694,7 +691,7 @@ func (key *awsPVKey) Features() string {
 	}
 	class, ok := volTypes[storageClass]
 	if !ok {
-		klog.V(4).Infof("No voltype mapping for %s's storageClass: %s", key.Name, storageClass)
+		log.Debugf("No voltype mapping for %s's storageClass: %s", key.Name, storageClass)
 	}
 	return region + "," + class
 }
@@ -755,10 +752,10 @@ func (aws *AWS) getRegionPricing(nodeList []*v1.Node) (*http.Response, string, e
 
 	pricingURL += "index.json"
 
-	klog.V(2).Infof("starting download of \"%s\", which is quite large ...", pricingURL)
+	log.Infof("starting download of \"%s\", which is quite large ...", pricingURL)
 	resp, err := http.Get(pricingURL)
 	if err != nil {
-		klog.V(2).Infof("Bogus fetch of \"%s\": %v", pricingURL, err)
+		log.Errorf("Bogus fetch of \"%s\": %v", pricingURL, err)
 		return nil, pricingURL, err
 	}
 	return resp, pricingURL, err
@@ -776,7 +773,7 @@ func (aws *AWS) DownloadPricingData() error {
 	defer aws.DownloadPricingDataLock.Unlock()
 	c, err := aws.Config.GetCustomPricingData()
 	if err != nil {
-		klog.V(1).Infof("Error downloading default pricing data: %s", err.Error())
+		log.Errorf("Error downloading default pricing data: %s", err.Error())
 	}
 	aws.BaseCPUPrice = c.CPU
 	aws.BaseRAMPrice = c.RAM
@@ -794,7 +791,7 @@ func (aws *AWS) DownloadPricingData() error {
 	aws.ConfigureAuthWith(c) // load aws authentication from configuration or secret
 
 	if len(aws.SpotDataBucket) != 0 && len(aws.ProjectID) == 0 {
-		klog.V(1).Infof("using SpotDataBucket \"%s\" without ProjectID will not end well", aws.SpotDataBucket)
+		log.Warnf("using SpotDataBucket \"%s\" without ProjectID will not end well", aws.SpotDataBucket)
 	}
 	nodeList := aws.Clientset.GetAllNodes()
 
@@ -829,7 +826,7 @@ func (aws *AWS) DownloadPricingData() error {
 	for _, pv := range pvList {
 		params, ok := storageClassMap[pv.Spec.StorageClassName]
 		if !ok {
-			klog.V(2).Infof("Unable to find params for storageClassName %s, falling back to default pricing", pv.Spec.StorageClassName)
+			log.Infof("Unable to find params for storageClassName %s, falling back to default pricing", pv.Spec.StorageClassName)
 			continue
 		}
 		key := aws.GetPVKey(pv, params, "")
@@ -841,18 +838,18 @@ func (aws *AWS) DownloadPricingData() error {
 	if !aws.RIDataRunning {
 		err = aws.GetReservationDataFromAthena() // Block until one run has completed.
 		if err != nil {
-			klog.V(1).Infof("Failed to lookup reserved instance data: %s", err.Error())
+			log.Errorf("Failed to lookup reserved instance data: %s", err.Error())
 		} else { // If we make one successful run, check on new reservation data every hour
 			go func() {
 				defer errors.HandlePanic()
 				aws.RIDataRunning = true
 
 				for {
-					klog.Infof("Reserved Instance watcher running... next update in 1h")
+					log.Infof("Reserved Instance watcher running... next update in 1h")
 					time.Sleep(time.Hour)
 					err := aws.GetReservationDataFromAthena()
 					if err != nil {
-						klog.Infof("Error updating RI data: %s", err.Error())
+						log.Infof("Error updating RI data: %s", err.Error())
 					}
 				}
 			}()
@@ -861,17 +858,17 @@ func (aws *AWS) DownloadPricingData() error {
 	if !aws.SavingsPlanDataRunning {
 		err = aws.GetSavingsPlanDataFromAthena()
 		if err != nil {
-			klog.V(1).Infof("Failed to lookup savings plan data: %s", err.Error())
+			log.Errorf("Failed to lookup savings plan data: %s", err.Error())
 		} else {
 			go func() {
 				defer errors.HandlePanic()
 				aws.SavingsPlanDataRunning = true
 				for {
-					klog.Infof("Savings Plan watcher running... next update in 1h")
+					log.Infof("Savings Plan watcher running... next update in 1h")
 					time.Sleep(time.Hour)
 					err := aws.GetSavingsPlanDataFromAthena()
 					if err != nil {
-						klog.Infof("Error updating Savings Plan data: %s", err.Error())
+						log.Infof("Error updating Savings Plan data: %s", err.Error())
 					}
 				}
 			}()
@@ -890,10 +887,10 @@ func (aws *AWS) DownloadPricingData() error {
 	for {
 		t, err := dec.Token()
 		if err == io.EOF {
-			klog.V(2).Infof("done loading \"%s\"\n", pricingURL)
+			log.Infof("done loading \"%s\"\n", pricingURL)
 			break
 		} else if err != nil {
-			klog.V(2).Infof("error parsing response json %v", resp.Body)
+			log.Errorf("error parsing response json %v", resp.Body)
 			break
 		}
 		if t == "products" {
@@ -910,7 +907,7 @@ func (aws *AWS) DownloadPricingData() error {
 
 				err = dec.Decode(&product)
 				if err != nil {
-					klog.V(1).Infof("Error parsing response from \"%s\": %v", pricingURL, err.Error())
+					log.Errorf("Error parsing response from \"%s\": %v", pricingURL, err.Error())
 					break
 				}
 
@@ -987,7 +984,7 @@ func (aws *AWS) DownloadPricingData() error {
 					offerTerm := &AWSOfferTerm{}
 					err = dec.Decode(&offerTerm)
 					if err != nil {
-						klog.V(1).Infof("Error decoding AWS Offer Term: " + err.Error())
+						log.Errorf("Error decoding AWS Offer Term: " + err.Error())
 					}
 
 					key, ok := skusToKeys[sku.(string)]
@@ -1028,7 +1025,7 @@ func (aws *AWS) DownloadPricingData() error {
 			}
 		}
 	}
-	klog.V(2).Infof("Finished downloading \"%s\"", pricingURL)
+	log.Infof("Finished downloading \"%s\"", pricingURL)
 
 	if !aws.SpotRefreshEnabled() {
 		return nil
@@ -1045,7 +1042,7 @@ func (aws *AWS) DownloadPricingData() error {
 			defer errors.HandlePanic()
 
 			for {
-				klog.Infof("Spot Pricing Refresh scheduled in %.2f minutes.", SpotRefreshDuration.Minutes())
+				log.Infof("Spot Pricing Refresh scheduled in %.2f minutes.", SpotRefreshDuration.Minutes())
 				time.Sleep(SpotRefreshDuration)
 
 				// Reoccurring refresh checks update times
@@ -1071,7 +1068,7 @@ func (aws *AWS) refreshSpotPricing(force bool) {
 
 	sp, err := aws.parseSpotData(aws.SpotDataBucket, aws.SpotDataPrefix, aws.ProjectID, aws.SpotDataRegion)
 	if err != nil {
-		klog.V(1).Infof("Skipping AWS spot data download: %s", err.Error())
+		log.Warnf("Skipping AWS spot data download: %s", err.Error())
 		aws.SpotPricingError = err
 		return
 	}
@@ -1168,7 +1165,7 @@ func (aws *AWS) createNode(terms *AWSProductTerms, usageType string, k Key) (*No
 		if len(arr) == 2 {
 			spotcost = arr[0]
 		} else {
-			klog.V(2).Infof("Spot data for node %s is missing", k.ID())
+			log.Infof("Spot data for node %s is missing", k.ID())
 		}
 		return &Node{
 			Cost:         spotcost,
@@ -1324,11 +1321,11 @@ func (awsProvider *AWS) ClusterInfo() (map[string]string, error) {
 
 	maybeClusterId := env.GetAWSClusterID()
 	if len(maybeClusterId) != 0 {
-		klog.V(2).Infof("Returning \"%s\" as ClusterName", maybeClusterId)
+		log.Infof("Returning \"%s\" as ClusterName", maybeClusterId)
 		return makeStructure(maybeClusterId)
 	}
 
-	klog.V(2).Infof("Unable to sniff out cluster ID, perhaps set $%s to force one", env.AWSClusterIDEnvVar)
+	log.Infof("Unable to sniff out cluster ID, perhaps set $%s to force one", env.AWSClusterIDEnvVar)
 	return makeStructure(defaultClusterName)
 }
 
@@ -1336,7 +1333,7 @@ func (awsProvider *AWS) ClusterInfo() (map[string]string, error) {
 func (aws *AWS) ConfigureAuth() error {
 	c, err := aws.Config.GetCustomPricingData()
 	if err != nil {
-		klog.V(1).Infof("Error downloading default pricing data: %s", err.Error())
+		log.Errorf("Error downloading default pricing data: %s", err.Error())
 	}
 	return aws.ConfigureAuthWith(c)
 }
@@ -1740,7 +1737,7 @@ func (aws *AWS) GetSavingsPlanDataFromAthena() error {
 			}
 			cost, err := strconv.ParseFloat(*r.Data[3].VarCharValue, 64)
 			if err != nil {
-				klog.Infof("Error converting `%s` from float ", *r.Data[3].VarCharValue)
+				log.Infof("Error converting `%s` from float ", *r.Data[3].VarCharValue)
 			}
 			r := &SavingsPlanData{
 				ResourceID:     *r.Data[2].VarCharValue,
@@ -1750,7 +1747,7 @@ func (aws *AWS) GetSavingsPlanDataFromAthena() error {
 			}
 			aws.SavingsPlanDataByInstanceID[r.ResourceID] = r
 		}
-		klog.V(1).Infof("Found %d savings plan applied instances", len(aws.SavingsPlanDataByInstanceID))
+		log.Debugf("Found %d savings plan applied instances", len(aws.SavingsPlanDataByInstanceID))
 		for k, r := range aws.SavingsPlanDataByInstanceID {
 			log.DedupedInfof(5, "Savings Plan Instance Data found for node %s : %f at time %s", k, r.EffectiveCost, r.MostRecentDate)
 		}
@@ -1760,7 +1757,7 @@ func (aws *AWS) GetSavingsPlanDataFromAthena() error {
 
 	query := fmt.Sprintf(q, cfg.AthenaTable, start, end)
 
-	klog.V(3).Infof("Running Query: %s", query)
+	log.Debugf("Running Query: %s", query)
 
 	err = aws.QueryAthenaPaginated(context.TODO(), query, processResults)
 	if err != nil {
@@ -1842,7 +1839,7 @@ func (aws *AWS) GetReservationDataFromAthena() error {
 			}
 			cost, err := strconv.ParseFloat(*r.Data[3].VarCharValue, 64)
 			if err != nil {
-				klog.Infof("Error converting `%s` from float ", *r.Data[3].VarCharValue)
+				log.Infof("Error converting `%s` from float ", *r.Data[3].VarCharValue)
 			}
 			r := &RIData{
 				ResourceID:     *r.Data[2].VarCharValue,
@@ -1852,7 +1849,7 @@ func (aws *AWS) GetReservationDataFromAthena() error {
 			}
 			aws.RIPricingByInstanceID[r.ResourceID] = r
 		}
-		klog.V(1).Infof("Found %d reserved instances", len(aws.RIPricingByInstanceID))
+		log.Debugf("Found %d reserved instances", len(aws.RIPricingByInstanceID))
 		for k, r := range aws.RIPricingByInstanceID {
 			log.DedupedInfof(5, "Reserved Instance Data found for node %s : %f at time %s", k, r.EffectiveCost, r.MostRecentDate)
 		}
@@ -1862,7 +1859,7 @@ func (aws *AWS) GetReservationDataFromAthena() error {
 
 	query := fmt.Sprintf(q, cfg.AthenaTable, start, end)
 
-	klog.V(3).Infof("Running Query: %s", query)
+	log.Debugf("Running Query: %s", query)
 
 	err = aws.QueryAthenaPaginated(context.TODO(), query, processResults)
 	if err != nil {
@@ -1973,18 +1970,18 @@ func (aws *AWS) parseSpotData(bucket string, prefix string, projectID string, re
 		})
 	}
 	lsoLen := len(lso.Contents)
-	klog.V(2).Infof("Found %d spot data files from yesterday", lsoLen)
+	log.Debugf("Found %d spot data files from yesterday", lsoLen)
 	if lsoLen == 0 {
-		klog.V(5).Infof("ListObjects \"s3://%s/%s\" produced no keys", *ls.Bucket, *ls.Prefix)
+		log.Debugf("ListObjects \"s3://%s/%s\" produced no keys", *ls.Bucket, *ls.Prefix)
 	}
 	lso2, err := cli.ListObjects(context.TODO(), ls2)
 	if err != nil {
 		return nil, err
 	}
 	lso2Len := len(lso2.Contents)
-	klog.V(2).Infof("Found %d spot data files from today", lso2Len)
+	log.Debugf("Found %d spot data files from today", lso2Len)
 	if lso2Len == 0 {
-		klog.V(5).Infof("ListObjects \"s3://%s/%s\" produced no keys", *ls2.Bucket, *ls2.Prefix)
+		log.Debugf("ListObjects \"s3://%s/%s\" produced no keys", *ls2.Bucket, *ls2.Prefix)
 	}
 
 	// TODO: Worth it to use LastModifiedDate to determine if we should reparse the spot data?
@@ -2056,17 +2053,17 @@ func (aws *AWS) parseSpotData(bucket string, prefix string, projectID string, re
 				// the first of which is "#Version"
 				// the second of which is "#Fields: "
 				if len(rec) != 1 {
-					klog.V(2).Infof("Expected %d spot info fields but received %d: %s", fieldsPerRecord, len(rec), rec)
+					log.Infof("Expected %d spot info fields but received %d: %s", fieldsPerRecord, len(rec), rec)
 					continue
 				}
 				if len(foundVersion) == 0 {
 					spotFeedVersion := rec[0]
-					klog.V(4).Infof("Spot feed version is \"%s\"", spotFeedVersion)
+					log.Debugf("Spot feed version is \"%s\"", spotFeedVersion)
 					matches := versionRx.FindStringSubmatch(spotFeedVersion)
 					if matches != nil {
 						foundVersion = matches[1]
 						if foundVersion != supportedSpotFeedVersion {
-							klog.V(2).Infof("Unsupported spot info feed version: wanted \"%s\" got \"%s\"", supportedSpotFeedVersion, foundVersion)
+							log.Infof("Unsupported spot info feed version: wanted \"%s\" got \"%s\"", supportedSpotFeedVersion, foundVersion)
 							break
 						}
 					}
@@ -2074,11 +2071,11 @@ func (aws *AWS) parseSpotData(bucket string, prefix string, projectID string, re
 				} else if strings.Index(rec[0], "#") == 0 {
 					continue
 				} else {
-					klog.V(3).Infof("skipping non-TSV line: %s", rec)
+					log.Infof("skipping non-TSV line: %s", rec)
 					continue
 				}
 			} else if err != nil {
-				klog.V(2).Infof("Error during spot info decode: %+v", err)
+				log.Warnf("Error during spot info decode: %+v", err)
 				continue
 			}
 

+ 14 - 16
pkg/cloud/azureprovider.go

@@ -13,10 +13,9 @@ import (
 	"sync"
 	"time"
 
-	"github.com/kubecost/cost-model/pkg/log"
-
 	"github.com/kubecost/cost-model/pkg/clustercache"
 	"github.com/kubecost/cost-model/pkg/env"
+	"github.com/kubecost/cost-model/pkg/log"
 	"github.com/kubecost/cost-model/pkg/util"
 	"github.com/kubecost/cost-model/pkg/util/fileutil"
 	"github.com/kubecost/cost-model/pkg/util/json"
@@ -28,7 +27,6 @@ import (
 	"github.com/Azure/go-autorest/autorest/azure"
 	"github.com/Azure/go-autorest/autorest/azure/auth"
 	v1 "k8s.io/api/core/v1"
-	"k8s.io/klog"
 )
 
 const (
@@ -196,7 +194,7 @@ func getRegions(service string, subscriptionsClient subscriptions.Client, provid
 						if loc, ok := allLocations[displName]; ok {
 							supLocations[loc] = displName
 						} else {
-							klog.V(1).Infof("unsupported cloud region %s", loc)
+							log.Warnf("unsupported cloud region %s", loc)
 						}
 					}
 					break
@@ -214,7 +212,7 @@ func getRegions(service string, subscriptionsClient subscriptions.Client, provid
 						if loc, ok := allLocations[displName]; ok {
 							supLocations[loc] = displName
 						} else {
-							klog.V(1).Infof("unsupported cloud region %s", loc)
+							log.Warnf("unsupported cloud region %s", loc)
 						}
 					}
 					break
@@ -628,7 +626,7 @@ func (az *Azure) loadAzureStorageConfig(force bool) (*AzureStorageConfig, error)
 func (az *Azure) GetKey(labels map[string]string, n *v1.Node) Key {
 	cfg, err := az.GetConfig()
 	if err != nil {
-		klog.Infof("Error loading azure custom pricing information")
+		log.Infof("Error loading azure custom pricing information")
 	}
 	// azure defaults, see https://docs.microsoft.com/en-us/azure/aks/gpu-cluster
 	gpuLabel := "accelerator"
@@ -786,17 +784,17 @@ func (az *Azure) DownloadPricingData() error {
 
 	rateCardFilter := fmt.Sprintf("OfferDurableId eq '%s' and Currency eq '%s' and Locale eq 'en-US' and RegionInfo eq '%s'", config.AzureOfferDurableID, config.CurrencyCode, config.AzureBillingRegion)
 
-	klog.Infof("Using ratecard query %s", rateCardFilter)
+	log.Infof("Using ratecard query %s", rateCardFilter)
 	result, err := rcClient.Get(context.TODO(), rateCardFilter)
 	if err != nil {
-		klog.Warningf("Error in pricing download query from API")
+		log.Warnf("Error in pricing download query from API")
 		az.RateCardPricingError = err
 		return err
 	}
 
 	regions, err := getRegions("compute", sClient, providersClient, config.AzureSubscriptionID)
 	if err != nil {
-		klog.Warningf("Error in pricing download regions from API")
+		log.Warnf("Error in pricing download regions from API")
 		az.RateCardPricingError = err
 		return err
 	}
@@ -834,7 +832,7 @@ func (az *Azure) DownloadPricingData() error {
 						var priceInUsd float64
 
 						if len(v.MeterRates) < 1 {
-							klog.V(1).Infof("missing rate info %+v", map[string]interface{}{"MeterSubCategory": *v.MeterSubCategory, "region": region})
+							log.Warnf("missing rate info %+v", map[string]interface{}{"MeterSubCategory": *v.MeterSubCategory, "region": region})
 							continue
 						}
 						for _, rate := range v.MeterRates {
@@ -845,7 +843,7 @@ func (az *Azure) DownloadPricingData() error {
 						priceStr := fmt.Sprintf("%f", pricePerHour)
 
 						key := region + "," + storageClass
-						klog.V(4).Infof("Adding PV.Key: %s, Cost: %s", key, priceStr)
+						log.Debugf("Adding PV.Key: %s, Cost: %s", key, priceStr)
 						allPrices[key] = &AzurePricing{
 							PV: &PV{
 								Cost:   priceStr,
@@ -883,7 +881,7 @@ func (az *Azure) DownloadPricingData() error {
 				var priceInUsd float64
 
 				if len(v.MeterRates) < 1 {
-					klog.V(1).Infof("missing rate info %+v", map[string]interface{}{"MeterSubCategory": *v.MeterSubCategory, "region": region})
+					log.Warnf("missing rate info %+v", map[string]interface{}{"MeterSubCategory": *v.MeterSubCategory, "region": region})
 					continue
 				}
 				for _, rate := range v.MeterRates {
@@ -912,7 +910,7 @@ func (az *Azure) DownloadPricingData() error {
 	zeroPrice := "0.0"
 	for region := range regions {
 		key := region + "," + AzureFileStandardStorageClass
-		klog.V(4).Infof("Adding PV.Key: %s, Cost: %s", key, zeroPrice)
+		log.Debugf("Adding PV.Key: %s, Cost: %s", key, zeroPrice)
 		allPrices[key] = &AzurePricing{
 			PV: &PV{
 				Cost:   zeroPrice,
@@ -1011,13 +1009,13 @@ func (az *Azure) NodePricing(key Key) (*Node, error) {
 	}
 
 	if n, ok := az.Pricing[azKey.Features()]; ok {
-		klog.V(4).Infof("Returning pricing for node %s: %+v from key %s", azKey, n, azKey.Features())
+		log.Debugf("Returning pricing for node %s: %+v from key %s", azKey, n, azKey.Features())
 		if azKey.isValidGPUNode() {
 			n.Node.GPU = azKey.GetGPUCount()
 		}
 		return n.Node, nil
 	}
-	klog.V(1).Infof("[Warning] no pricing data found for %s: %s", azKey.Features(), azKey)
+	log.Warnf("no pricing data found for %s: %s", azKey.Features(), azKey)
 	c, err := az.GetConfig()
 	if err != nil {
 		return nil, fmt.Errorf("No default pricing data available")
@@ -1246,7 +1244,7 @@ func (az *Azure) PVPricing(pvk PVKey) (*PV, error) {
 
 	pricing, ok := az.Pricing[pvk.Features()]
 	if !ok {
-		klog.V(4).Infof("Persistent Volume pricing not found for %s: %s", pvk.GetStorageClass(), pvk.Features())
+		log.Debugf("Persistent Volume pricing not found for %s: %s", pvk.GetStorageClass(), pvk.Features())
 		return &PV{}, nil
 	}
 	return pricing.PV, nil

+ 40 - 37
pkg/cloud/gcpprovider.go

@@ -20,6 +20,7 @@ import (
 	"github.com/kubecost/cost-model/pkg/util/fileutil"
 	"github.com/kubecost/cost-model/pkg/util/json"
 	"github.com/kubecost/cost-model/pkg/util/timeutil"
+	"github.com/rs/zerolog"
 
 	"cloud.google.com/go/bigquery"
 	"cloud.google.com/go/compute/metadata"
@@ -27,7 +28,6 @@ import (
 	"golang.org/x/oauth2/google"
 	compute "google.golang.org/api/compute/v1"
 	v1 "k8s.io/api/core/v1"
-	"k8s.io/klog"
 )
 
 const GKE_GPU_TAG = "cloud.google.com/gke-accelerator"
@@ -194,7 +194,7 @@ func (*GCP) loadGCPAuthSecret() {
 	keyPath := path + "key.json"
 	keyExists, _ := fileutil.FileExists(keyPath)
 	if keyExists {
-		klog.V(1).Infof("GCP Auth Key already exists, no need to load from secret")
+		log.Info("GCP Auth Key already exists, no need to load from secret")
 		return
 	}
 
@@ -205,19 +205,19 @@ func (*GCP) loadGCPAuthSecret() {
 			errMessage = err.Error()
 		}
 
-		klog.V(4).Infof("[Warning] Failed to load auth secret, or was not mounted: %s", errMessage)
+		log.Warnf("Failed to load auth secret, or was not mounted: %s", errMessage)
 		return
 	}
 
 	result, err := ioutil.ReadFile(authSecretPath)
 	if err != nil {
-		klog.V(4).Infof("[Warning] Failed to load auth secret, or was not mounted: %s", err.Error())
+		log.Warnf("Failed to load auth secret, or was not mounted: %s", err.Error())
 		return
 	}
 
 	err = ioutil.WriteFile(keyPath, result, 0644)
 	if err != nil {
-		klog.V(4).Infof("[Warning] Failed to copy auth secret to %s: %s", keyPath, err.Error())
+		log.Warnf("Failed to copy auth secret to %s: %s", keyPath, err.Error())
 	}
 }
 
@@ -308,12 +308,12 @@ func (gcp *GCP) ClusterInfo() (map[string]string, error) {
 
 	attribute, err := metadataClient.InstanceAttributeValue("cluster-name")
 	if err != nil {
-		klog.Infof("Error loading metadata cluster-name: %s", err.Error())
+		log.Infof("Error loading metadata cluster-name: %s", err.Error())
 	}
 
 	c, err := gcp.GetConfig()
 	if err != nil {
-		klog.V(1).Infof("Error opening config: %s", err.Error())
+		log.Errorf("Error opening config: %s", err.Error())
 	}
 	if c.ClusterName != "" {
 		attribute = c.ClusterName
@@ -594,7 +594,7 @@ func (gcp *GCP) parsePage(r io.Reader, inputKeys map[string]Key, pvKeys map[stri
 				for matchnum, group := range provIdRx.FindStringSubmatch(product.Description) {
 					if matchnum == 1 {
 						gpuType = strings.ToLower(strings.Join(strings.Split(group, " "), "-"))
-						klog.V(4).Info("GPU type found: " + gpuType)
+						log.Debug("GPU type found: " + gpuType)
 					}
 				}
 
@@ -635,8 +635,8 @@ func (gcp *GCP) parsePage(r io.Reader, inputKeys map[string]Key, pvKeys map[stri
 						for k, key := range inputKeys {
 							if key.GPUType() == gpuType+","+usageType {
 								if region == strings.Split(k, ",")[0] {
-									klog.V(3).Infof("Matched GPU to node in region \"%s\"", region)
-									klog.V(4).Infof("PRODUCT DESCRIPTION: %s", product.Description)
+									log.Infof("Matched GPU to node in region \"%s\"", region)
+									log.Debugf("PRODUCT DESCRIPTION: %s", product.Description)
 									matchedKey := key.Features()
 									if pl, ok := gcpPricingList[matchedKey]; ok {
 										pl.Node.GPUName = gpuType
@@ -650,7 +650,7 @@ func (gcp *GCP) parsePage(r io.Reader, inputKeys map[string]Key, pvKeys map[stri
 										}
 										gcpPricingList[matchedKey] = product
 									}
-									klog.V(3).Infof("Added data for " + matchedKey)
+									log.Infof("Added data for " + matchedKey)
 								}
 							}
 						}
@@ -671,7 +671,7 @@ func (gcp *GCP) parsePage(r io.Reader, inputKeys map[string]Key, pvKeys map[stri
 								continue
 							} else if strings.Contains(strings.ToUpper(product.Description), "RAM") {
 								if instanceType == "custom" {
-									klog.V(4).Infof("RAM custom sku is: " + product.Name)
+									log.Debug("RAM custom sku is: " + product.Name)
 								}
 								if _, ok := gcpPricingList[candidateKey]; ok {
 									gcpPricingList[candidateKey].Node.RAMCost = strconv.FormatFloat(hourlyPrice, 'f', -1, 64)
@@ -688,10 +688,10 @@ func (gcp *GCP) parsePage(r io.Reader, inputKeys map[string]Key, pvKeys map[stri
 									gcpPricingList[candidateKey] = product
 								}
 								if _, ok := gcpPricingList[candidateKeyGPU]; ok {
-									klog.V(1).Infof("Adding RAM %f for %s", hourlyPrice, candidateKeyGPU)
+									log.Infof("Adding RAM %f for %s", hourlyPrice, candidateKeyGPU)
 									gcpPricingList[candidateKeyGPU].Node.RAMCost = strconv.FormatFloat(hourlyPrice, 'f', -1, 64)
 								} else {
-									klog.V(1).Infof("Adding RAM %f for %s", hourlyPrice, candidateKeyGPU)
+									log.Infof("Adding RAM %f for %s", hourlyPrice, candidateKeyGPU)
 									product = &GCPPricing{}
 									product.Node = &Node{
 										RAMCost: strconv.FormatFloat(hourlyPrice, 'f', -1, 64),
@@ -743,7 +743,7 @@ func (gcp *GCP) parsePage(r io.Reader, inputKeys map[string]Key, pvKeys map[stri
 		if t == "nextPageToken" {
 			pageToken, err := dec.Token()
 			if err != nil {
-				klog.V(2).Infof("Error parsing nextpage token: " + err.Error())
+				log.Errorf("Error parsing nextpage token: " + err.Error())
 				return nil, "", err
 			}
 			if pageToken.(string) != "" {
@@ -763,7 +763,7 @@ func (gcp *GCP) parsePages(inputKeys map[string]Key, pvKeys map[string]PVKey) (m
 		return nil, err
 	}
 	url := "https://cloudbilling.googleapis.com/v1/services/6F81-5844-456A/skus?key=" + gcp.APIKey + "&currencyCode=" + c.CurrencyCode
-	klog.V(2).Infof("Fetch GCP Billing Data from URL: %s", url)
+	log.Infof("Fetch GCP Billing Data from URL: %s", url)
 	var parsePagesHelper func(string) error
 	parsePagesHelper = func(pageToken string) error {
 		if pageToken == "done" {
@@ -813,13 +813,13 @@ func (gcp *GCP) parsePages(inputKeys map[string]Key, pvKeys map[string]PVKey) (m
 			}
 		}
 	}
-	klog.V(1).Infof("ALL PAGES: %+v", returnPages)
+	log.Debugf("ALL PAGES: %+v", returnPages)
 	for k, v := range returnPages {
 		if v.Node != nil {
-			klog.V(1).Infof("Returned Page: %s : %+v", k, v.Node)
+			log.Debugf("Returned Page: %s : %+v", k, v.Node)
 		}
 		if v.PV != nil {
-			klog.V(1).Infof("Returned Page: %s : %+v", k, v.PV)
+			log.Debugf("Returned Page: %s : %+v", k, v.PV)
 		}
 	}
 	return returnPages, err
@@ -831,7 +831,7 @@ func (gcp *GCP) DownloadPricingData() error {
 	defer gcp.DownloadPricingDataLock.Unlock()
 	c, err := gcp.Config.GetCustomPricingData()
 	if err != nil {
-		klog.V(2).Infof("Error downloading default pricing data: %s", err.Error())
+		log.Errorf("Error downloading default pricing data: %s", err.Error())
 		return err
 	}
 	gcp.loadGCPAuthSecret()
@@ -883,12 +883,15 @@ func (gcp *GCP) DownloadPricingData() error {
 
 	reserved, err := gcp.getReservedInstances()
 	if err != nil {
-		klog.V(1).Infof("Failed to lookup reserved instance data: %s", err.Error())
+		log.Errorf("Failed to lookup reserved instance data: %s", err.Error())
 	} else {
-		klog.V(1).Infof("Found %d reserved instances", len(reserved))
 		gcp.ReservedInstances = reserved
-		for _, r := range reserved {
-			klog.V(1).Infof("%s", r)
+
+		if zerolog.GlobalLevel() <= zerolog.DebugLevel {
+			log.Debugf("Found %d reserved instances", len(reserved))
+			for _, r := range reserved {
+				log.Debugf("%s", r)
+			}
 		}
 	}
 
@@ -906,7 +909,7 @@ func (gcp *GCP) PVPricing(pvk PVKey) (*PV, error) {
 	defer gcp.DownloadPricingDataLock.RUnlock()
 	pricing, ok := gcp.Pricing[pvk.Features()]
 	if !ok {
-		klog.V(3).Infof("Persistent Volume pricing not found for %s: %s", pvk.GetStorageClass(), pvk.Features())
+		log.Infof("Persistent Volume pricing not found for %s: %s", pvk.GetStorageClass(), pvk.Features())
 		return &PV{}, nil
 	}
 	return pricing.PV, nil
@@ -1017,7 +1020,7 @@ func (gcp *GCP) ApplyReservedInstancePricing(nodes map[string]*Node) {
 
 	// Early return if no reserved instance data loaded
 	if numReserved == 0 {
-		klog.V(4).Infof("[Reserved] No Reserved Instances")
+		log.Debug("[Reserved] No Reserved Instances")
 		return
 	}
 
@@ -1026,7 +1029,7 @@ func (gcp *GCP) ApplyReservedInstancePricing(nodes map[string]*Node) {
 	counters := make(map[string][]*GCPReservedCounter)
 	for _, r := range gcp.ReservedInstances {
 		if now.Before(r.StartDate) || now.After(r.EndDate) {
-			klog.V(1).Infof("[Reserved] Skipped Reserved Instance due to dates")
+			log.Infof("[Reserved] Skipped Reserved Instance due to dates")
 			continue
 		}
 
@@ -1054,19 +1057,19 @@ func (gcp *GCP) ApplyReservedInstancePricing(nodes map[string]*Node) {
 
 		kNode, ok := gcpNodes[nodeName]
 		if !ok {
-			klog.V(4).Infof("[Reserved] Could not find K8s Node with name: %s", nodeName)
+			log.Debugf("[Reserved] Could not find K8s Node with name: %s", nodeName)
 			continue
 		}
 
 		nodeRegion, ok := util.GetRegion(kNode.Labels)
 		if !ok {
-			klog.V(4).Infof("[Reserved] Could not find node region")
+			log.Debug("[Reserved] Could not find node region")
 			continue
 		}
 
 		reservedCounters, ok := counters[nodeRegion]
 		if !ok {
-			klog.V(4).Infof("[Reserved] Could not find counters for region: %s", nodeRegion)
+			log.Debugf("[Reserved] Could not find counters for region: %s", nodeRegion)
 			continue
 		}
 
@@ -1137,7 +1140,7 @@ func (gcp *GCP) getReservedInstances() ([]*GCPReservedInstance, error) {
 				case GCPReservedInstanceResourceTypeCPU:
 					vcpu = resource.Amount
 				default:
-					klog.V(4).Infof("Failed to handle resource type: %s", resource.Type)
+					log.Debugf("Failed to handle resource type: %s", resource.Type)
 				}
 			}
 
@@ -1150,13 +1153,13 @@ func (gcp *GCP) getReservedInstances() ([]*GCPReservedInstance, error) {
 			timeLayout := "2006-01-02T15:04:05Z07:00"
 			startTime, err := time.Parse(timeLayout, commit.StartTimestamp)
 			if err != nil {
-				klog.V(1).Infof("Failed to parse start date: %s", commit.StartTimestamp)
+				log.Warnf("Failed to parse start date: %s", commit.StartTimestamp)
 				continue
 			}
 
 			endTime, err := time.Parse(timeLayout, commit.EndTimestamp)
 			if err != nil {
-				klog.V(1).Infof("Failed to parse end date: %s", commit.EndTimestamp)
+				log.Warnf("Failed to parse end date: %s", commit.EndTimestamp)
 				continue
 			}
 
@@ -1247,7 +1250,7 @@ func (gcp *gcpKey) GPUType() string {
 		} else {
 			usageType = "ondemand"
 		}
-		klog.V(4).Infof("GPU of type: \"%s\" found", t)
+		log.Debugf("GPU of type: \"%s\" found", t)
 		return t + "," + usageType
 	}
 	return ""
@@ -1331,7 +1334,7 @@ func (gcp *GCP) isValidPricingKey(key Key) bool {
 // NodePricing returns GCP pricing data for a single node
 func (gcp *GCP) NodePricing(key Key) (*Node, error) {
 	if n, ok := gcp.getPricing(key); ok {
-		klog.V(4).Infof("Returning pricing for node %s: %+v from SKU %s", key, n.Node, n.Name)
+		log.Debugf("Returning pricing for node %s: %+v from SKU %s", key, n.Node, n.Name)
 		n.Node.BaseCPUPrice = gcp.BaseCPUPrice
 		return n.Node, nil
 	} else if ok := gcp.isValidPricingKey(key); ok {
@@ -1340,11 +1343,11 @@ func (gcp *GCP) NodePricing(key Key) (*Node, error) {
 			return nil, fmt.Errorf("Download pricing data failed: %s", err.Error())
 		}
 		if n, ok := gcp.getPricing(key); ok {
-			klog.V(4).Infof("Returning pricing for node %s: %+v from SKU %s", key, n.Node, n.Name)
+			log.Debugf("Returning pricing for node %s: %+v from SKU %s", key, n.Node, n.Name)
 			n.Node.BaseCPUPrice = gcp.BaseCPUPrice
 			return n.Node, nil
 		}
-		klog.V(1).Infof("[Warning] no pricing data found for %s: %s", key.Features(), key)
+		log.Warnf("no pricing data found for %s: %s", key.Features(), key)
 		return nil, fmt.Errorf("Warning: no pricing data found for %s", key)
 	}
 	return nil, fmt.Errorf("Warning: no pricing data found for %s", key)

+ 6 - 8
pkg/cloud/provider.go

@@ -13,8 +13,6 @@ import (
 
 	"github.com/kubecost/cost-model/pkg/util"
 
-	"k8s.io/klog"
-
 	"cloud.google.com/go/compute/metadata"
 
 	"github.com/kubecost/cost-model/pkg/clustercache"
@@ -398,7 +396,7 @@ func SharedLabels(p Provider) ([]string, []string) {
 	ks := strings.Split(config.SharedLabelNames, ",")
 	vs := strings.Split(config.SharedLabelValues, ",")
 	if len(ks) != len(vs) {
-		klog.V(2).Infof("[Warning] shared labels have mis-matched lengths: %d names, %d values", len(ks), len(vs))
+		log.Warnf("Shared labels have mis-matched lengths: %d names, %d values", len(ks), len(vs))
 		return names, values
 	}
 
@@ -432,7 +430,7 @@ func NewProvider(cache clustercache.ClusterCache, apiKey string, config *config.
 
 	switch cp.provider {
 	case "CSV":
-		klog.Infof("Using CSV Provider with CSV at %s", env.GetCSVPath())
+		log.Infof("Using CSV Provider with CSV at %s", env.GetCSVPath())
 		return &CSVProvider{
 			CSVLocation: env.GetCSVPath(),
 			CustomProvider: &CustomProvider{
@@ -441,7 +439,7 @@ func NewProvider(cache clustercache.ClusterCache, apiKey string, config *config.
 			},
 		}, nil
 	case "GCP":
-		klog.V(3).Info("metadata reports we are in GCE")
+		log.Info("metadata reports we are in GCE")
 		if apiKey == "" {
 			return nil, errors.New("Supply a GCP Key to start getting data")
 		}
@@ -453,7 +451,7 @@ func NewProvider(cache clustercache.ClusterCache, apiKey string, config *config.
 			clusterProjectId: cp.projectID,
 		}, nil
 	case "AWS":
-		klog.V(2).Info("Found ProviderID starting with \"aws\", using AWS Provider")
+		log.Info("Found ProviderID starting with \"aws\", using AWS Provider")
 		return &AWS{
 			Clientset:            cache,
 			Config:               NewProviderConfig(config, cp.configFileName),
@@ -462,7 +460,7 @@ func NewProvider(cache clustercache.ClusterCache, apiKey string, config *config.
 			serviceAccountChecks: NewServiceAccountChecks(),
 		}, nil
 	case "AZURE":
-		klog.V(2).Info("Found ProviderID starting with \"azure\", using Azure Provider")
+		log.Info("Found ProviderID starting with \"azure\", using Azure Provider")
 		return &Azure{
 			Clientset:            cache,
 			Config:               NewProviderConfig(config, cp.configFileName),
@@ -471,7 +469,7 @@ func NewProvider(cache clustercache.ClusterCache, apiKey string, config *config.
 			serviceAccountChecks: NewServiceAccountChecks(),
 		}, nil
 	default:
-		klog.V(2).Info("Unsupported provider, falling back to default")
+		log.Info("Unsupported provider, falling back to default")
 		return &CustomProvider{
 			Clientset: cache,
 			Config:    NewProviderConfig(config, cp.configFileName),

+ 6 - 8
pkg/cloud/providerconfig.go

@@ -13,8 +13,6 @@ import (
 	"github.com/kubecost/cost-model/pkg/log"
 	"github.com/kubecost/cost-model/pkg/util/json"
 	"github.com/microcosm-cc/bluemonday"
-
-	"k8s.io/klog"
 )
 
 var sanitizePolicy = bluemonday.UGCPolicy()
@@ -61,7 +59,7 @@ func (pc *ProviderConfig) onConfigFileUpdated(changeType config.ChangeType, data
 		customPricing := new(CustomPricing)
 		err := json.Unmarshal(data, customPricing)
 		if err != nil {
-			klog.Infof("Could not decode Custom Pricing file at path %s. Using default.", pc.configFile.Path())
+			log.Infof("Could not decode Custom Pricing file at path %s. Using default.", pc.configFile.Path())
 			customPricing = DefaultPricing()
 		}
 
@@ -86,13 +84,13 @@ func (pc *ProviderConfig) loadConfig(writeIfNotExists bool) (*CustomPricing, err
 	exists, err := pc.configFile.Exists()
 	// File Error other than NotExists
 	if err != nil {
-		klog.Infof("Custom Pricing file at path '%s' read error: '%s'", pc.configFile.Path(), err.Error())
+		log.Infof("Custom Pricing file at path '%s' read error: '%s'", pc.configFile.Path(), err.Error())
 		return DefaultPricing(), err
 	}
 
 	// File Doesn't Exist
 	if !exists {
-		klog.Infof("Could not find Custom Pricing file at path '%s'", pc.configFile.Path())
+		log.Infof("Could not find Custom Pricing file at path '%s'", pc.configFile.Path())
 		pc.customPricing = DefaultPricing()
 
 		// Only write the file if flag enabled
@@ -104,7 +102,7 @@ func (pc *ProviderConfig) loadConfig(writeIfNotExists bool) (*CustomPricing, err
 
 			err = pc.configFile.Write(cj)
 			if err != nil {
-				klog.Infof("Could not write Custom Pricing file to path '%s'", pc.configFile.Path())
+				log.Infof("Could not write Custom Pricing file to path '%s'", pc.configFile.Path())
 				return pc.customPricing, err
 			}
 		}
@@ -115,7 +113,7 @@ func (pc *ProviderConfig) loadConfig(writeIfNotExists bool) (*CustomPricing, err
 	// File Exists - Read all contents of file, unmarshal json
 	byteValue, err := pc.configFile.Read()
 	if err != nil {
-		klog.Infof("Could not read Custom Pricing file at path %s", pc.configFile.Path())
+		log.Infof("Could not read Custom Pricing file at path %s", pc.configFile.Path())
 		// If read fails, we don't want to cache default, assuming that the file is valid
 		return DefaultPricing(), err
 	}
@@ -123,7 +121,7 @@ func (pc *ProviderConfig) loadConfig(writeIfNotExists bool) (*CustomPricing, err
 	var customPricing CustomPricing
 	err = json.Unmarshal(byteValue, &customPricing)
 	if err != nil {
-		klog.Infof("Could not decode Custom Pricing file at path %s", pc.configFile.Path())
+		log.Infof("Could not decode Custom Pricing file at path %s", pc.configFile.Path())
 		return DefaultPricing(), err
 	}
 

+ 3 - 3
pkg/clustercache/clustercache.go

@@ -4,7 +4,7 @@ import (
 	"sync"
 
 	"github.com/kubecost/cost-model/pkg/env"
-	"k8s.io/klog"
+	"github.com/kubecost/cost-model/pkg/log"
 
 	appsv1 "k8s.io/api/apps/v1"
 	autoscaling "k8s.io/api/autoscaling/v2beta1"
@@ -111,7 +111,7 @@ func NewKubernetesClusterCache(client kubernetes.Interface) ClusterCache {
 	pdbClient := client.PolicyV1beta1().RESTClient()
 
 	kubecostNamespace := env.GetKubecostNamespace()
-	klog.Infof("NAMESPACE: %s", kubecostNamespace)
+	log.Infof("NAMESPACE: %s", kubecostNamespace)
 
 	kcc := &KubernetesClusterCache{
 		client:                     client,
@@ -158,7 +158,7 @@ func NewKubernetesClusterCache(client kubernetes.Interface) ClusterCache {
 
 	wg.Wait()
 
-	klog.Infof("Done waiting")
+	log.Infof("Done waiting")
 
 	return kcc
 }

+ 2 - 2
pkg/clustercache/clusterexporter.go

@@ -59,7 +59,7 @@ func (ce *ClusterExporter) Run() {
 	ce.runState.WaitForReset()
 
 	if !ce.runState.Start() {
-		log.Warningf("ClusterExporter already running")
+		log.Warnf("ClusterExporter already running")
 		return
 	}
 
@@ -67,7 +67,7 @@ func (ce *ClusterExporter) Run() {
 		for {
 			err := ce.Export()
 			if err != nil {
-				log.Warningf("Failed to export cluster: %s", err)
+				log.Warnf("Failed to export cluster: %s", err)
 			}
 
 			select {

+ 3 - 3
pkg/clustercache/clusterimporter.go

@@ -49,7 +49,7 @@ func (ci *ClusterImporter) update(data []byte) {
 	ce := new(clusterEncoding)
 	err := json.Unmarshal(data, ce)
 	if err != nil {
-		log.Warningf("Failed to unmarshal cluster during import: %s", err)
+		log.Warnf("Failed to unmarshal cluster during import: %s", err)
 		return
 	}
 
@@ -74,7 +74,7 @@ func (ci *ClusterImporter) Run() {
 	if exists {
 		data, err := ci.source.Read()
 		if err != nil {
-			log.Warningf("Failed to import cluster: %s", err)
+			log.Warnf("Failed to import cluster: %s", err)
 		} else {
 			ci.update(data)
 		}
@@ -319,5 +319,5 @@ func (ci *ClusterImporter) GetAllReplicationControllers() []*v1.ReplicationContr
 func (ci *ClusterImporter) SetConfigMapUpdateFunc(_ func(interface{})) {
 	// TODO: (bolt) This function is still a bit strange to me for the ClusterCache interface.
 	// TODO: (bolt) no-op for now.
-	log.Warningf("SetConfigMapUpdateFunc is disabled for imported cluster data.")
+	log.Warnf("SetConfigMapUpdateFunc is disabled for imported cluster data.")
 }

+ 6 - 6
pkg/clustercache/watchcontroller.go

@@ -5,7 +5,7 @@ import (
 	"reflect"
 	"time"
 
-	"k8s.io/klog"
+	"github.com/kubecost/cost-model/pkg/log"
 
 	"k8s.io/apimachinery/pkg/fields"
 	rt "k8s.io/apimachinery/pkg/runtime"
@@ -135,7 +135,7 @@ func (c *CachingWatchController) processNextItem() bool {
 func (c *CachingWatchController) handle(key string) error {
 	obj, exists, err := c.indexer.GetByKey(key)
 	if err != nil {
-		klog.Errorf("Fetching %s with key %s from store failed with %v", c.resourceType, key, err)
+		log.Errorf("Fetching %s with key %s from store failed with %v", c.resourceType, key, err)
 		return err
 	}
 
@@ -163,7 +163,7 @@ func (c *CachingWatchController) handleErr(err error, key interface{}) {
 
 	// This controller retries 5 times if something goes wrong. After that, it stops trying.
 	if c.queue.NumRequeues(key) < 5 {
-		klog.V(3).Infof("Error syncing %s %v: %v", c.resourceType, key, err)
+		log.Errorf("Error syncing %s %v: %v", c.resourceType, key, err)
 
 		// Re-enqueue the key rate limited. Based on the rate limiter on the
 		// queue and the re-enqueue history, the key will be processed later again.
@@ -174,7 +174,7 @@ func (c *CachingWatchController) handleErr(err error, key interface{}) {
 	c.queue.Forget(key)
 	// Report to an external entity that, even after several retries, we could not successfully process this key
 	runtime.HandleError(err)
-	klog.Infof("Dropping %s %q out of the queue: %v", c.resourceType, key, err)
+	log.Infof("Dropping %s %q out of the queue: %v", c.resourceType, key, err)
 }
 
 func (c *CachingWatchController) WarmUp(cancelCh chan struct{}) {
@@ -192,14 +192,14 @@ func (c *CachingWatchController) Run(threadiness int, stopCh chan struct{}) {
 
 	// Let the workers stop when we are done
 	defer c.queue.ShutDown()
-	klog.V(3).Infof("Starting %s controller", c.resourceType)
+	log.Infof("Starting %s controller", c.resourceType)
 
 	for i := 0; i < threadiness; i++ {
 		go wait.Until(c.runWorker, time.Second, stopCh)
 	}
 
 	<-stopCh
-	klog.V(3).Infof("Stopping %s controller", c.resourceType)
+	log.Infof("Stopping %s controller", c.resourceType)
 }
 
 func (c *CachingWatchController) runWorker() {

+ 3 - 4
pkg/cmd/agent/agent.go

@@ -20,7 +20,6 @@ import (
 	prometheusAPI "github.com/prometheus/client_golang/api/prometheus/v1"
 	"github.com/prometheus/client_golang/prometheus/promhttp"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/klog"
 
 	"github.com/rs/cors"
 	"k8s.io/client-go/kubernetes"
@@ -152,7 +151,7 @@ func Execute(opts *AgentOpts) error {
 		scrapeInterval = si
 	}
 
-	klog.Infof("Using scrape interval of %f", scrapeInterval.Seconds())
+	log.Infof("Using scrape interval of %f", scrapeInterval.Seconds())
 
 	// initialize kubernetes client and cluster cache
 	k8sClient, clusterCache, err := newKubernetesClusterCache()
@@ -183,7 +182,7 @@ func Execute(opts *AgentOpts) error {
 	for _, cw := range watchedConfigs {
 		configs, err := k8sClient.CoreV1().ConfigMaps(kubecostNamespace).Get(context.Background(), cw, metav1.GetOptions{})
 		if err != nil {
-			klog.Infof("No %s configmap found at install time, using existing configs: %s", cw, err.Error())
+			log.Infof("No %s configmap found at install time, using existing configs: %s", cw, err.Error())
 		} else {
 			watchConfigFunc(configs)
 		}
@@ -220,7 +219,7 @@ func Execute(opts *AgentOpts) error {
 	// download pricing data
 	err = cloudProvider.DownloadPricingData()
 	if err != nil {
-		klog.Errorf("Error downloading pricing data: %s", err)
+		log.Errorf("Error downloading pricing data: %s", err)
 	}
 
 	// start emitting metrics

+ 17 - 11
pkg/cmd/commands.go

@@ -1,15 +1,15 @@
 package cmd
 
 import (
-	"flag"
 	"fmt"
 	"os"
+	"strings"
 
 	"github.com/kubecost/cost-model/pkg/cmd/agent"
 	"github.com/kubecost/cost-model/pkg/cmd/costmodel"
+	"github.com/kubecost/cost-model/pkg/log"
 	"github.com/spf13/cobra"
-	"github.com/spf13/pflag"
-	"k8s.io/klog"
+	"github.com/spf13/viper"
 )
 
 const (
@@ -41,14 +41,6 @@ func Execute(costModelCmd *cobra.Command) error {
 
 	rootCmd := newRootCommand(costModelCmd)
 
-	// initialize klog and make cobra aware of all the go flags
-	klog.InitFlags(nil)
-
-	flag.CommandLine.VisitAll(func(f *flag.Flag) {
-		pflag.CommandLine.AddGoFlag(f)
-	})
-	pflag.CommandLine.Set("v", "3")
-
 	// in the event that no directive/command is passed, we want to default to using the cost-model command
 	// cobra doesn't provide a way within the API to do this, so we'll prepend the command if it is omitted.
 	if len(os.Args) > 1 {
@@ -73,12 +65,26 @@ func newRootCommand(costModelCmd *cobra.Command) *cobra.Command {
 		SilenceUsage: true,
 	}
 
+	// Add our persistent flags, these are global and available anywhere
+	cmd.PersistentFlags().String("log-level", "info", "Set the log level")
+	cmd.PersistentFlags().String("log-format", "pretty", "Set the log format - Can be either 'JSON' or 'pretty'")
+
+	viper.BindPFlag("log-level", cmd.PersistentFlags().Lookup("log-level"))
+	viper.BindPFlag("log-format", cmd.PersistentFlags().Lookup("log-format"))
+
+	// Setup viper to read from the env, this allows reading flags from the command line or the env
+	// using the format 'LOG_LEVEL'
+	viper.AutomaticEnv()
+	viper.SetEnvKeyReplacer(strings.NewReplacer("-", "_"))
+
 	// add the modes of operation
 	cmd.AddCommand(
 		costModelCmd,
 		newAgentCommand(),
 	)
 
+	log.InitLogging()
+
 	return cmd
 }
 

+ 2 - 2
pkg/config/configfile.go

@@ -244,7 +244,7 @@ func (cf *ConfigFile) runWatcher() {
 	// if start fails after waiting for a reset, it means that another thread
 	// beat this thread to the start
 	if !cf.runState.Start() {
-		log.Warningf("Run watcher already running for file: %s", cf.file)
+		log.Warnf("Run watcher already running for file: %s", cf.file)
 		return
 	}
 
@@ -297,7 +297,7 @@ func (cf *ConfigFile) runWatcher() {
 			if !exists {
 				data, err := cf.internalRead(true)
 				if err != nil {
-					log.Warningf("Read() Error: %s\n", err)
+					log.Warnf("Read() Error: %s\n", err)
 					continue
 				}
 				exists = true

+ 2 - 2
pkg/config/configmanager.go

@@ -61,11 +61,11 @@ func NewConfigFileManager(opts *ConfigFileManagerOpts) *ConfigFileManager {
 	if opts.IsBucketStorageEnabled() {
 		bucketConfig, err := ioutil.ReadFile(opts.BucketStoreConfig)
 		if err != nil {
-			log.Warningf("Failed to initialize config bucket storage: %s", err)
+			log.Warnf("Failed to initialize config bucket storage: %s", err)
 		} else {
 			bucketStore, err := storage.NewBucketStorage(bucketConfig)
 			if err != nil {
-				log.Warningf("Failed to create config bucket storage: %s", err)
+				log.Warnf("Failed to create config bucket storage: %s", err)
 			} else {
 				configStore = bucketStore
 			}

+ 31 - 32
pkg/costmodel/aggregation.go

@@ -25,7 +25,6 @@ import (
 	"github.com/kubecost/cost-model/pkg/util/json"
 	"github.com/patrickmn/go-cache"
 	prometheusClient "github.com/prometheus/client_golang/api"
-	"k8s.io/klog"
 )
 
 const (
@@ -221,7 +220,7 @@ func (a *Accesses) ComputeIdleCoefficient(costData map[string]*CostData, cli pro
 
 	for cid, costs := range clusterCosts {
 		if costs.CPUCumulative == 0 && costs.RAMCumulative == 0 && costs.StorageCumulative == 0 {
-			klog.V(1).Infof("[Warning] No ClusterCosts data for cluster '%s'. Is it emitting data?", cid)
+			log.Warnf("No ClusterCosts data for cluster '%s'. Is it emitting data?", cid)
 			coefficients[cid] = 1.0
 			continue
 		}
@@ -272,13 +271,13 @@ type AggregationOptions struct {
 func clampAverage(requestsAvg float64, usedAverage float64, allocationAvg float64, resource string) (float64, float64) {
 	rAvg := requestsAvg
 	if rAvg > allocationAvg {
-		klog.V(4).Infof("[Warning] Average %s Requested (%f) > Average %s Allocated (%f). Clamping.", resource, rAvg, resource, allocationAvg)
+		log.Debugf("Average %s Requested (%f) > Average %s Allocated (%f). Clamping.", resource, rAvg, resource, allocationAvg)
 		rAvg = allocationAvg
 	}
 
 	uAvg := usedAverage
 	if uAvg > allocationAvg {
-		klog.V(4).Infof("[Warning]: Average %s Used (%f) > Average %s Allocated (%f). Clamping.", resource, uAvg, resource, allocationAvg)
+		log.Debugf(" Average %s Used (%f) > Average %s Allocated (%f). Clamping.", resource, uAvg, resource, allocationAvg)
 		uAvg = allocationAvg
 	}
 
@@ -414,7 +413,7 @@ func AggregateCostData(costData map[string]*CostData, field string, subfields []
 		if opts.SharedSplit == SplitTypeWeighted {
 			d := opts.TotalContainerCost - sharedResourceCost
 			if d == 0 {
-				klog.V(1).Infof("[Warning] Total container cost '%f' and shared resource cost '%f are the same'. Setting sharedCoefficient to 1", opts.TotalContainerCost, sharedResourceCost)
+				log.Warnf("Total container cost '%f' and shared resource cost '%f are the same'. Setting sharedCoefficient to 1", opts.TotalContainerCost, sharedResourceCost)
 				sharedCoefficient = 1.0
 			} else {
 				sharedCoefficient = (agg.CPUCost + agg.RAMCost + agg.GPUCost + agg.PVCost + agg.NetworkCost) / d
@@ -525,59 +524,59 @@ func AggregateCostData(costData map[string]*CostData, field string, subfields []
 		}
 		// Typesafety checks
 		if math.IsNaN(agg.CPUAllocationHourlyAverage) || math.IsInf(agg.CPUAllocationHourlyAverage, 0) {
-			klog.V(1).Infof("[Warning] CPUAllocationHourlyAverage is %f for '%s: %s/%s'", agg.CPUAllocationHourlyAverage, agg.Cluster, agg.Aggregator, agg.Environment)
+			log.Warnf("CPUAllocationHourlyAverage is %f for '%s: %s/%s'", agg.CPUAllocationHourlyAverage, agg.Cluster, agg.Aggregator, agg.Environment)
 			agg.CPUAllocationHourlyAverage = 0
 		}
 		if math.IsNaN(agg.CPUCost) || math.IsInf(agg.CPUCost, 0) {
-			klog.V(1).Infof("[Warning] CPUCost is %f for '%s: %s/%s'", agg.CPUCost, agg.Cluster, agg.Aggregator, agg.Environment)
+			log.Warnf("CPUCost is %f for '%s: %s/%s'", agg.CPUCost, agg.Cluster, agg.Aggregator, agg.Environment)
 			agg.CPUCost = 0
 		}
 		if math.IsNaN(agg.CPUEfficiency) || math.IsInf(agg.CPUEfficiency, 0) {
-			klog.V(1).Infof("[Warning] CPUEfficiency is %f for '%s: %s/%s'", agg.CPUEfficiency, agg.Cluster, agg.Aggregator, agg.Environment)
+			log.Warnf("CPUEfficiency is %f for '%s: %s/%s'", agg.CPUEfficiency, agg.Cluster, agg.Aggregator, agg.Environment)
 			agg.CPUEfficiency = 0
 		}
 		if math.IsNaN(agg.Efficiency) || math.IsInf(agg.Efficiency, 0) {
-			klog.V(1).Infof("[Warning] Efficiency is %f for '%s: %s/%s'", agg.Efficiency, agg.Cluster, agg.Aggregator, agg.Environment)
+			log.Warnf("Efficiency is %f for '%s: %s/%s'", agg.Efficiency, agg.Cluster, agg.Aggregator, agg.Environment)
 			agg.Efficiency = 0
 		}
 		if math.IsNaN(agg.GPUAllocationHourlyAverage) || math.IsInf(agg.GPUAllocationHourlyAverage, 0) {
-			klog.V(1).Infof("[Warning] GPUAllocationHourlyAverage is %f for '%s: %s/%s'", agg.GPUAllocationHourlyAverage, agg.Cluster, agg.Aggregator, agg.Environment)
+			log.Warnf("GPUAllocationHourlyAverage is %f for '%s: %s/%s'", agg.GPUAllocationHourlyAverage, agg.Cluster, agg.Aggregator, agg.Environment)
 			agg.GPUAllocationHourlyAverage = 0
 		}
 		if math.IsNaN(agg.GPUCost) || math.IsInf(agg.GPUCost, 0) {
-			klog.V(1).Infof("[Warning] GPUCost is %f for '%s: %s/%s'", agg.GPUCost, agg.Cluster, agg.Aggregator, agg.Environment)
+			log.Warnf("GPUCost is %f for '%s: %s/%s'", agg.GPUCost, agg.Cluster, agg.Aggregator, agg.Environment)
 			agg.GPUCost = 0
 		}
 		if math.IsNaN(agg.RAMAllocationHourlyAverage) || math.IsInf(agg.RAMAllocationHourlyAverage, 0) {
-			klog.V(1).Infof("[Warning] RAMAllocationHourlyAverage is %f for '%s: %s/%s'", agg.RAMAllocationHourlyAverage, agg.Cluster, agg.Aggregator, agg.Environment)
+			log.Warnf("RAMAllocationHourlyAverage is %f for '%s: %s/%s'", agg.RAMAllocationHourlyAverage, agg.Cluster, agg.Aggregator, agg.Environment)
 			agg.RAMAllocationHourlyAverage = 0
 		}
 		if math.IsNaN(agg.RAMCost) || math.IsInf(agg.RAMCost, 0) {
-			klog.V(1).Infof("[Warning] RAMCost is %f for '%s: %s/%s'", agg.RAMCost, agg.Cluster, agg.Aggregator, agg.Environment)
+			log.Warnf("RAMCost is %f for '%s: %s/%s'", agg.RAMCost, agg.Cluster, agg.Aggregator, agg.Environment)
 			agg.RAMCost = 0
 		}
 		if math.IsNaN(agg.RAMEfficiency) || math.IsInf(agg.RAMEfficiency, 0) {
-			klog.V(1).Infof("[Warning] RAMEfficiency is %f for '%s: %s/%s'", agg.RAMEfficiency, agg.Cluster, agg.Aggregator, agg.Environment)
+			log.Warnf("RAMEfficiency is %f for '%s: %s/%s'", agg.RAMEfficiency, agg.Cluster, agg.Aggregator, agg.Environment)
 			agg.RAMEfficiency = 0
 		}
 		if math.IsNaN(agg.PVAllocationHourlyAverage) || math.IsInf(agg.PVAllocationHourlyAverage, 0) {
-			klog.V(1).Infof("[Warning] PVAllocationHourlyAverage is %f for '%s: %s/%s'", agg.PVAllocationHourlyAverage, agg.Cluster, agg.Aggregator, agg.Environment)
+			log.Warnf("PVAllocationHourlyAverage is %f for '%s: %s/%s'", agg.PVAllocationHourlyAverage, agg.Cluster, agg.Aggregator, agg.Environment)
 			agg.PVAllocationHourlyAverage = 0
 		}
 		if math.IsNaN(agg.PVCost) || math.IsInf(agg.PVCost, 0) {
-			klog.V(1).Infof("[Warning] PVCost is %f for '%s: %s/%s'", agg.PVCost, agg.Cluster, agg.Aggregator, agg.Environment)
+			log.Warnf("PVCost is %f for '%s: %s/%s'", agg.PVCost, agg.Cluster, agg.Aggregator, agg.Environment)
 			agg.PVCost = 0
 		}
 		if math.IsNaN(agg.NetworkCost) || math.IsInf(agg.NetworkCost, 0) {
-			klog.V(1).Infof("[Warning] NetworkCost is %f for '%s: %s/%s'", agg.NetworkCost, agg.Cluster, agg.Aggregator, agg.Environment)
+			log.Warnf("NetworkCost is %f for '%s: %s/%s'", agg.NetworkCost, agg.Cluster, agg.Aggregator, agg.Environment)
 			agg.NetworkCost = 0
 		}
 		if math.IsNaN(agg.SharedCost) || math.IsInf(agg.SharedCost, 0) {
-			klog.V(1).Infof("[Warning] SharedCost is %f for '%s: %s/%s'", agg.SharedCost, agg.Cluster, agg.Aggregator, agg.Environment)
+			log.Warnf("SharedCost is %f for '%s: %s/%s'", agg.SharedCost, agg.Cluster, agg.Aggregator, agg.Environment)
 			agg.SharedCost = 0
 		}
 		if math.IsNaN(agg.TotalCost) || math.IsInf(agg.TotalCost, 0) {
-			klog.V(1).Infof("[Warning] TotalCost is %f for '%s: %s/%s'", agg.TotalCost, agg.Cluster, agg.Aggregator, agg.Environment)
+			log.Warnf("TotalCost is %f for '%s: %s/%s'", agg.TotalCost, agg.Cluster, agg.Aggregator, agg.Environment)
 			agg.TotalCost = 0
 		}
 	}
@@ -662,13 +661,13 @@ func getDiscounts(costDatum *CostData, cpuCost float64, ramCost float64, discoun
 	if reserved != nil && reserved.CPUCost > 0 && reserved.RAMCost > 0 {
 		reservedCPUDiscount := 0.0
 		if cpuCost == 0 {
-			klog.V(1).Infof("[Warning] No cpu cost found for cluster '%s' node '%s'", costDatum.ClusterID, costDatum.NodeName)
+			log.Warnf("No cpu cost found for cluster '%s' node '%s'", costDatum.ClusterID, costDatum.NodeName)
 		} else {
 			reservedCPUDiscount = 1.0 - (reserved.CPUCost / cpuCost)
 		}
 		reservedRAMDiscount := 0.0
 		if ramCost == 0 {
-			klog.V(1).Infof("[Warning] No ram cost found for cluster '%s' node '%s'", costDatum.ClusterID, costDatum.NodeName)
+			log.Warnf("No ram cost found for cluster '%s' node '%s'", costDatum.ClusterID, costDatum.NodeName)
 		} else {
 			reservedRAMDiscount = 1.0 - (reserved.RAMCost / ramCost)
 		}
@@ -690,7 +689,7 @@ func getDiscounts(costDatum *CostData, cpuCost float64, ramCost float64, discoun
 					blendedCPUDiscount = reservedCPUDiscount
 				} else {
 					if nodeCPU == 0 {
-						klog.V(1).Infof("[Warning] No ram found for cluster '%s' node '%s'", costDatum.ClusterID, costDatum.NodeName)
+						log.Warnf("No ram found for cluster '%s' node '%s'", costDatum.ClusterID, costDatum.NodeName)
 					} else {
 						blendedCPUDiscount = (float64(reserved.ReservedCPU) * reservedCPUDiscount) + (float64(nonReservedCPU)*discount)/float64(nodeCPU)
 					}
@@ -700,7 +699,7 @@ func getDiscounts(costDatum *CostData, cpuCost float64, ramCost float64, discoun
 					blendedRAMDiscount = reservedRAMDiscount
 				} else {
 					if nodeRAMGB == 0 {
-						klog.V(1).Infof("[Warning] No ram found for cluster '%s' node '%s'", costDatum.ClusterID, costDatum.NodeName)
+						log.Warnf("No ram found for cluster '%s' node '%s'", costDatum.ClusterID, costDatum.NodeName)
 					} else {
 						blendedRAMDiscount = (reservedRAMGB * reservedRAMDiscount) + (nonReservedRAM*discount)/nodeRAMGB
 					}
@@ -759,7 +758,7 @@ func getPriceVectors(cp cloud.Provider, costDatum *CostData, rate string, discou
 	// default cost values with custom values
 	customPricing, err := cp.GetConfig()
 	if err != nil {
-		klog.Errorf("failed to load custom pricing: %s", err)
+		log.Errorf("failed to load custom pricing: %s", err)
 	}
 	if cloud.CustomPricesEnabled(cp) && err == nil {
 		var cpuCostStr string
@@ -797,9 +796,9 @@ func getPriceVectors(cp cloud.Provider, costDatum *CostData, rate string, discou
 
 	cpuDiscount, ramDiscount := getDiscounts(costDatum, cpuCost, ramCost, discount)
 
-	klog.V(4).Infof("Node Name: %s", costDatum.NodeName)
-	klog.V(4).Infof("Blended CPU Discount: %f", cpuDiscount)
-	klog.V(4).Infof("Blended RAM Discount: %f", ramDiscount)
+	log.Debugf("Node Name: %s", costDatum.NodeName)
+	log.Debugf("Blended CPU Discount: %f", cpuDiscount)
+	log.Debugf("Blended RAM Discount: %f", ramDiscount)
 
 	// TODO should we try to apply the rate coefficient here or leave it as a totals-only metric?
 	rateCoeff := 1.0
@@ -1263,7 +1262,7 @@ func (a *Accesses) ComputeAggregateCostModel(promClient prometheusClient.Client,
 				labelValues[ln] = append(labelValues[ln], lv)
 			} else {
 				// label is not of the form name=value, so log it and move on
-				log.Warningf("ComputeAggregateCostModel: skipping illegal label filter: %s", l)
+				log.Warnf("ComputeAggregateCostModel: skipping illegal label filter: %s", l)
 			}
 		}
 
@@ -1312,7 +1311,7 @@ func (a *Accesses) ComputeAggregateCostModel(promClient prometheusClient.Client,
 				annotationValues[an] = append(annotationValues[an], av)
 			} else {
 				// annotation is not of the form name=value, so log it and move on
-				log.Warningf("ComputeAggregateCostModel: skipping illegal annotation filter: %s", annot)
+				log.Warnf("ComputeAggregateCostModel: skipping illegal annotation filter: %s", annot)
 			}
 		}
 
@@ -1682,7 +1681,7 @@ func GenerateAggKey(window kubecost.Window, field string, subfields []string, op
 				lFilters = append(lFilters, fmt.Sprintf("%s=%s", lfn, lfv))
 			} else {
 				// label is not of the form name=value, so log it and move on
-				klog.V(2).Infof("[Warning] GenerateAggKey: skipping illegal label filter: %s", lf)
+				log.Warnf("GenerateAggKey: skipping illegal label filter: %s", lf)
 			}
 		}
 	}
@@ -1703,7 +1702,7 @@ func GenerateAggKey(window kubecost.Window, field string, subfields []string, op
 				aFilters = append(aFilters, fmt.Sprintf("%s=%s", afn, afv))
 			} else {
 				// annotation is not of the form name=value, so log it and move on
-				klog.V(2).Infof("[Warning] GenerateAggKey: skipping illegal annotation filter: %s", af)
+				log.Warnf("GenerateAggKey: skipping illegal annotation filter: %s", af)
 			}
 		}
 	}
@@ -1800,7 +1799,7 @@ func (a *Accesses) warmAggregateCostModelCache() {
 			a.ClusterCostsCache.Set(key, totals, a.GetCacheExpiration(window.Duration()))
 			log.Infof("caching %s cluster costs for %s", fmtDuration, a.GetCacheExpiration(window.Duration()))
 		} else {
-			log.Warningf("not caching %s cluster costs: no data or less than %f minutes data ", fmtDuration, clusterCostsCacheMinutes)
+			log.Warnf("not caching %s cluster costs: no data or less than %f minutes data ", fmtDuration, clusterCostsCacheMinutes)
 		}
 		return aggErr, err
 	}

+ 37 - 37
pkg/costmodel/allocation.go

@@ -630,7 +630,7 @@ func (cm *CostModel) computeAllocation(start, end time.Time, resolution time.Dur
 					if pvcInterval, ok := pvcPodIntervalMap[pvcKey][podKey]; ok {
 						s, e = *pvcInterval.Start(), *pvcInterval.End()
 					} else {
-						log.Warningf("CostModel.ComputeAllocation: allocation %s and PVC %s have no associated active window", alloc.Name, pvc.Name)
+						log.Warnf("CostModel.ComputeAllocation: allocation %s and PVC %s have no associated active window", alloc.Name, pvc.Name)
 					}
 
 					minutes := e.Sub(s).Minutes()
@@ -648,7 +648,7 @@ func (cm *CostModel) computeAllocation(start, end time.Time, resolution time.Dur
 					if coeffComponents, ok := sharedPVCCostCoefficientMap[pvcKey][podKey]; ok {
 						cost *= getCoefficientFromComponents(coeffComponents)
 					} else {
-						log.Warningf("CostModel.ComputeAllocation: allocation %s and PVC %s have relation but no coeff", alloc.Name, pvc.Name)
+						log.Warnf("CostModel.ComputeAllocation: allocation %s and PVC %s have relation but no coeff", alloc.Name, pvc.Name)
 					}
 
 					// Apply the size and cost of the PV to the allocation, each
@@ -746,7 +746,7 @@ func (cm *CostModel) buildPodMap(window kubecost.Window, resolution, maxBatchSiz
 func applyPodResults(window kubecost.Window, resolution time.Duration, podMap map[podKey]*Pod, clusterStart, clusterEnd map[string]time.Time, resPods []*prom.QueryResult) {
 	for _, res := range resPods {
 		if len(res.Values) == 0 {
-			log.Warningf("CostModel.ComputeAllocation: empty minutes result")
+			log.Warnf("CostModel.ComputeAllocation: empty minutes result")
 			continue
 		}
 
@@ -757,7 +757,7 @@ func applyPodResults(window kubecost.Window, resolution time.Duration, podMap ma
 
 		labels, err := res.GetStrings("namespace", "pod")
 		if err != nil {
-			log.Warningf("CostModel.ComputeAllocation: minutes query result missing field: %s", err)
+			log.Warnf("CostModel.ComputeAllocation: minutes query result missing field: %s", err)
 			continue
 		}
 
@@ -912,7 +912,7 @@ func applyCPUCoresAllocated(podMap map[podKey]*Pod, resCPUCoresAllocated []*prom
 
 		node, err := res.GetString("node")
 		if err != nil {
-			log.Warningf("CostModel.ComputeAllocation: CPU allocation query result missing 'node': %s", key)
+			log.Warnf("CostModel.ComputeAllocation: CPU allocation query result missing 'node': %s", key)
 			continue
 		}
 		pod.Allocations[container].Properties.Node = node
@@ -956,7 +956,7 @@ func applyCPUCoresRequested(podMap map[podKey]*Pod, resCPUCoresRequested []*prom
 
 		node, err := res.GetString("node")
 		if err != nil {
-			log.Warningf("CostModel.ComputeAllocation: CPU request query result missing 'node': %s", key)
+			log.Warnf("CostModel.ComputeAllocation: CPU request query result missing 'node': %s", key)
 			continue
 		}
 		pod.Allocations[container].Properties.Node = node
@@ -1061,7 +1061,7 @@ func applyRAMBytesAllocated(podMap map[podKey]*Pod, resRAMBytesAllocated []*prom
 
 		node, err := res.GetString("node")
 		if err != nil {
-			log.Warningf("CostModel.ComputeAllocation: RAM allocation query result missing 'node': %s", key)
+			log.Warnf("CostModel.ComputeAllocation: RAM allocation query result missing 'node': %s", key)
 			continue
 		}
 		pod.Allocations[container].Properties.Node = node
@@ -1101,7 +1101,7 @@ func applyRAMBytesRequested(podMap map[podKey]*Pod, resRAMBytesRequested []*prom
 
 		node, err := res.GetString("node")
 		if err != nil {
-			log.Warningf("CostModel.ComputeAllocation: RAM request query result missing 'node': %s", key)
+			log.Warnf("CostModel.ComputeAllocation: RAM request query result missing 'node': %s", key)
 			continue
 		}
 		pod.Allocations[container].Properties.Node = node
@@ -1546,7 +1546,7 @@ func resToPodDaemonSetMap(resDaemonSetLabels []*prom.QueryResult) map[podKey]con
 
 		pod, err := res.GetString("pod")
 		if err != nil {
-			log.Warningf("CostModel.ComputeAllocation: DaemonSetLabel result without pod: %s", controllerKey)
+			log.Warnf("CostModel.ComputeAllocation: DaemonSetLabel result without pod: %s", controllerKey)
 		}
 
 		podKey := newPodKey(controllerKey.Cluster, controllerKey.Namespace, pod)
@@ -1575,7 +1575,7 @@ func resToPodJobMap(resJobLabels []*prom.QueryResult) map[podKey]controllerKey {
 
 		pod, err := res.GetString("pod")
 		if err != nil {
-			log.Warningf("CostModel.ComputeAllocation: JobLabel result without pod: %s", controllerKey)
+			log.Warnf("CostModel.ComputeAllocation: JobLabel result without pod: %s", controllerKey)
 		}
 
 		podKey := newPodKey(controllerKey.Cluster, controllerKey.Namespace, pod)
@@ -1617,7 +1617,7 @@ func resToPodReplicaSetMap(resPodsWithReplicaSetOwner []*prom.QueryResult, resRe
 
 		pod, err := res.GetString("pod")
 		if err != nil {
-			log.Warningf("CostModel.ComputeAllocation: ReplicaSet result without pod: %s", controllerKey)
+			log.Warnf("CostModel.ComputeAllocation: ReplicaSet result without pod: %s", controllerKey)
 		}
 
 		podKey := newPodKey(controllerKey.Cluster, controllerKey.Namespace, pod)
@@ -1691,19 +1691,19 @@ func applyNodeCostPerCPUHr(nodeMap map[nodeKey]*NodePricing, resNodeCostPerCPUHr
 
 		node, err := res.GetString("node")
 		if err != nil {
-			log.Warningf("CostModel.ComputeAllocation: Node CPU cost query result missing field: %s", err)
+			log.Warnf("CostModel.ComputeAllocation: Node CPU cost query result missing field: %s", err)
 			continue
 		}
 
 		instanceType, err := res.GetString("instance_type")
 		if err != nil {
-			log.Warningf("CostModel.ComputeAllocation: Node CPU cost query result missing field: %s", err)
+			log.Warnf("CostModel.ComputeAllocation: Node CPU cost query result missing field: %s", err)
 			continue
 		}
 
 		providerID, err := res.GetString("provider_id")
 		if err != nil {
-			log.Warningf("CostModel.ComputeAllocation: Node CPU cost query result missing field: %s", err)
+			log.Warnf("CostModel.ComputeAllocation: Node CPU cost query result missing field: %s", err)
 			continue
 		}
 
@@ -1729,19 +1729,19 @@ func applyNodeCostPerRAMGiBHr(nodeMap map[nodeKey]*NodePricing, resNodeCostPerRA
 
 		node, err := res.GetString("node")
 		if err != nil {
-			log.Warningf("CostModel.ComputeAllocation: Node RAM cost query result missing field: %s", err)
+			log.Warnf("CostModel.ComputeAllocation: Node RAM cost query result missing field: %s", err)
 			continue
 		}
 
 		instanceType, err := res.GetString("instance_type")
 		if err != nil {
-			log.Warningf("CostModel.ComputeAllocation: Node RAM cost query result missing field: %s", err)
+			log.Warnf("CostModel.ComputeAllocation: Node RAM cost query result missing field: %s", err)
 			continue
 		}
 
 		providerID, err := res.GetString("provider_id")
 		if err != nil {
-			log.Warningf("CostModel.ComputeAllocation: Node RAM cost query result missing field: %s", err)
+			log.Warnf("CostModel.ComputeAllocation: Node RAM cost query result missing field: %s", err)
 			continue
 		}
 
@@ -1767,19 +1767,19 @@ func applyNodeCostPerGPUHr(nodeMap map[nodeKey]*NodePricing, resNodeCostPerGPUHr
 
 		node, err := res.GetString("node")
 		if err != nil {
-			log.Warningf("CostModel.ComputeAllocation: Node GPU cost query result missing field: %s", err)
+			log.Warnf("CostModel.ComputeAllocation: Node GPU cost query result missing field: %s", err)
 			continue
 		}
 
 		instanceType, err := res.GetString("instance_type")
 		if err != nil {
-			log.Warningf("CostModel.ComputeAllocation: Node GPU cost query result missing field: %s", err)
+			log.Warnf("CostModel.ComputeAllocation: Node GPU cost query result missing field: %s", err)
 			continue
 		}
 
 		providerID, err := res.GetString("provider_id")
 		if err != nil {
-			log.Warningf("CostModel.ComputeAllocation: Node GPU cost query result missing field: %s", err)
+			log.Warnf("CostModel.ComputeAllocation: Node GPU cost query result missing field: %s", err)
 			continue
 		}
 
@@ -1805,13 +1805,13 @@ func applyNodeSpot(nodeMap map[nodeKey]*NodePricing, resNodeIsSpot []*prom.Query
 
 		node, err := res.GetString("node")
 		if err != nil {
-			log.Warningf("CostModel.ComputeAllocation: Node spot query result missing field: %s", err)
+			log.Warnf("CostModel.ComputeAllocation: Node spot query result missing field: %s", err)
 			continue
 		}
 
 		key := newNodeKey(cluster, node)
 		if _, ok := nodeMap[key]; !ok {
-			log.Warningf("CostModel.ComputeAllocation: Node spot  query result for missing node: %s", key)
+			log.Warnf("CostModel.ComputeAllocation: Node spot  query result for missing node: %s", key)
 			continue
 		}
 
@@ -1859,7 +1859,7 @@ func buildPVMap(pvMap map[pvKey]*PV, resPVCostPerGiBHour []*prom.QueryResult) {
 
 		name, err := res.GetString("volumename")
 		if err != nil {
-			log.Warningf("CostModel.ComputeAllocation: PV cost without volumename")
+			log.Warnf("CostModel.ComputeAllocation: PV cost without volumename")
 			continue
 		}
 
@@ -1877,12 +1877,12 @@ func applyPVBytes(pvMap map[pvKey]*PV, resPVBytes []*prom.QueryResult) {
 	for _, res := range resPVBytes {
 		key, err := resultPVKey(res, env.GetPromClusterLabel(), "persistentvolume")
 		if err != nil {
-			log.Warningf("CostModel.ComputeAllocation: PV bytes query result missing field: %s", err)
+			log.Warnf("CostModel.ComputeAllocation: PV bytes query result missing field: %s", err)
 			continue
 		}
 
 		if _, ok := pvMap[key]; !ok {
-			log.Warningf("CostModel.ComputeAllocation: PV bytes result for missing PV: %s", err)
+			log.Warnf("CostModel.ComputeAllocation: PV bytes result for missing PV: %s", err)
 			continue
 		}
 
@@ -1927,7 +1927,7 @@ func buildPVCMap(window kubecost.Window, pvcMap map[pvcKey]*PVC, pvMap map[pvKey
 			}
 		}
 		if pvcStart.IsZero() || pvcEnd.IsZero() {
-			log.Warningf("CostModel.ComputeAllocation: PVC %s has no running time", pvcKey)
+			log.Warnf("CostModel.ComputeAllocation: PVC %s has no running time", pvcKey)
 		}
 		pvcStart = pvcStart.Add(-time.Minute)
 
@@ -2151,7 +2151,7 @@ func getLoadBalancerCosts(resLBCost, resLBActiveMins []*prom.QueryResult, resolu
 			continue
 		}
 		if _, ok := lbHourlyCosts[serviceKey]; !ok {
-			log.Warningf("CostModel: failed to find hourly cost for Load Balancer: %v", serviceKey)
+			log.Warnf("CostModel: failed to find hourly cost for Load Balancer: %v", serviceKey)
 			continue
 		}
 
@@ -2222,7 +2222,7 @@ func (cm *CostModel) getNodePricing(nodeMap map[nodeKey]*NodePricing, nodeKey no
 	// node pricing with the custom values.
 	customPricingConfig, err := cm.Provider.GetConfig()
 	if err != nil {
-		log.Warningf("CostModel: failed to load custom pricing: %s", err)
+		log.Warnf("CostModel: failed to load custom pricing: %s", err)
 	}
 	if cloud.CustomPricesEnabled(cm.Provider) && customPricingConfig != nil {
 		return cm.getCustomNodePricing(node.Preemptible)
@@ -2236,42 +2236,42 @@ func (cm *CostModel) getNodePricing(nodeMap map[nodeKey]*NodePricing, nodeKey no
 	// them as strings like this?
 
 	if node.CostPerCPUHr == 0 || math.IsNaN(node.CostPerCPUHr) {
-		log.Warningf("CostModel: node pricing has illegal CostPerCPUHr; replacing with custom pricing: %s", nodeKey)
+		log.Warnf("CostModel: node pricing has illegal CostPerCPUHr; replacing with custom pricing: %s", nodeKey)
 		cpuCostStr := customPricingConfig.CPU
 		if node.Preemptible {
 			cpuCostStr = customPricingConfig.SpotCPU
 		}
 		costPerCPUHr, err := strconv.ParseFloat(cpuCostStr, 64)
 		if err != nil {
-			log.Warningf("CostModel: custom pricing has illegal CPU cost: %s", cpuCostStr)
+			log.Warnf("CostModel: custom pricing has illegal CPU cost: %s", cpuCostStr)
 		}
 		node.CostPerCPUHr = costPerCPUHr
 		node.Source += "/customCPU"
 	}
 
 	if math.IsNaN(node.CostPerGPUHr) {
-		log.Warningf("CostModel: node pricing has illegal CostPerGPUHr; replacing with custom pricing: %s", nodeKey)
+		log.Warnf("CostModel: node pricing has illegal CostPerGPUHr; replacing with custom pricing: %s", nodeKey)
 		gpuCostStr := customPricingConfig.GPU
 		if node.Preemptible {
 			gpuCostStr = customPricingConfig.SpotGPU
 		}
 		costPerGPUHr, err := strconv.ParseFloat(gpuCostStr, 64)
 		if err != nil {
-			log.Warningf("CostModel: custom pricing has illegal GPU cost: %s", gpuCostStr)
+			log.Warnf("CostModel: custom pricing has illegal GPU cost: %s", gpuCostStr)
 		}
 		node.CostPerGPUHr = costPerGPUHr
 		node.Source += "/customGPU"
 	}
 
 	if node.CostPerRAMGiBHr == 0 || math.IsNaN(node.CostPerRAMGiBHr) {
-		log.Warningf("CostModel: node pricing has illegal CostPerRAMHr; replacing with custom pricing: %s", nodeKey)
+		log.Warnf("CostModel: node pricing has illegal CostPerRAMHr; replacing with custom pricing: %s", nodeKey)
 		ramCostStr := customPricingConfig.RAM
 		if node.Preemptible {
 			ramCostStr = customPricingConfig.SpotRAM
 		}
 		costPerRAMHr, err := strconv.ParseFloat(ramCostStr, 64)
 		if err != nil {
-			log.Warningf("CostModel: custom pricing has illegal RAM cost: %s", ramCostStr)
+			log.Warnf("CostModel: custom pricing has illegal RAM cost: %s", ramCostStr)
 		}
 		node.CostPerRAMGiBHr = costPerRAMHr
 		node.Source += "/customRAM"
@@ -2301,19 +2301,19 @@ func (cm *CostModel) getCustomNodePricing(spot bool) *NodePricing {
 
 	costPerCPUHr, err := strconv.ParseFloat(cpuCostStr, 64)
 	if err != nil {
-		log.Warningf("CostModel: custom pricing has illegal CPU cost: %s", cpuCostStr)
+		log.Warnf("CostModel: custom pricing has illegal CPU cost: %s", cpuCostStr)
 	}
 	node.CostPerCPUHr = costPerCPUHr
 
 	costPerGPUHr, err := strconv.ParseFloat(gpuCostStr, 64)
 	if err != nil {
-		log.Warningf("CostModel: custom pricing has illegal GPU cost: %s", gpuCostStr)
+		log.Warnf("CostModel: custom pricing has illegal GPU cost: %s", gpuCostStr)
 	}
 	node.CostPerGPUHr = costPerGPUHr
 
 	costPerRAMHr, err := strconv.ParseFloat(ramCostStr, 64)
 	if err != nil {
-		log.Warningf("CostModel: custom pricing has illegal RAM cost: %s", ramCostStr)
+		log.Warnf("CostModel: custom pricing has illegal RAM cost: %s", ramCostStr)
 	}
 	node.CostPerRAMGiBHr = costPerRAMHr
 

+ 25 - 26
pkg/costmodel/cluster.go

@@ -14,7 +14,6 @@ import (
 	"github.com/kubecost/cost-model/pkg/prom"
 
 	prometheus "github.com/prometheus/client_golang/api"
-	"k8s.io/klog"
 )
 
 const (
@@ -189,7 +188,7 @@ func ClusterDisks(client prometheus.Client, provider cloud.Provider, start, end
 
 		name, err := result.GetString("instance")
 		if err != nil {
-			log.Warningf("ClusterDisks: local storage data missing instance")
+			log.Warnf("ClusterDisks: local storage data missing instance")
 			continue
 		}
 
@@ -214,7 +213,7 @@ func ClusterDisks(client prometheus.Client, provider cloud.Provider, start, end
 
 		name, err := result.GetString("instance")
 		if err != nil {
-			log.Warningf("ClusterDisks: local storage usage data missing instance")
+			log.Warnf("ClusterDisks: local storage usage data missing instance")
 			continue
 		}
 
@@ -239,7 +238,7 @@ func ClusterDisks(client prometheus.Client, provider cloud.Provider, start, end
 
 		name, err := result.GetString("instance")
 		if err != nil {
-			log.Warningf("ClusterDisks: local storage data missing instance")
+			log.Warnf("ClusterDisks: local storage data missing instance")
 			continue
 		}
 
@@ -441,7 +440,7 @@ func ClusterNodes(cp cloud.Provider, client prometheus.Client, start, end time.T
 
 	if optionalCtx.HasErrors() {
 		for _, err := range optionalCtx.Errors() {
-			log.Warningf("ClusterNodes: %s", err)
+			log.Warnf("ClusterNodes: %s", err)
 		}
 	}
 	if requiredCtx.HasErrors() {
@@ -573,12 +572,12 @@ func ClusterLoadBalancers(client prometheus.Client, start, end time.Time) (map[L
 		}
 		namespace, err := result.GetString("namespace")
 		if err != nil {
-			log.Warningf("ClusterLoadBalancers: LB cost data missing namespace")
+			log.Warnf("ClusterLoadBalancers: LB cost data missing namespace")
 			continue
 		}
 		name, err := result.GetString("service_name")
 		if err != nil {
-			log.Warningf("ClusterLoadBalancers: LB cost data missing service_name")
+			log.Warnf("ClusterLoadBalancers: LB cost data missing service_name")
 			continue
 		}
 		providerID, err := result.GetString("ingress_ip")
@@ -629,12 +628,12 @@ func ClusterLoadBalancers(client prometheus.Client, start, end time.Time) (map[L
 		}
 		namespace, err := result.GetString("namespace")
 		if err != nil {
-			log.Warningf("ClusterLoadBalancers: LB cost data missing namespace")
+			log.Warnf("ClusterLoadBalancers: LB cost data missing namespace")
 			continue
 		}
 		name, err := result.GetString("service_name")
 		if err != nil {
-			log.Warningf("ClusterLoadBalancers: LB cost data missing service_name")
+			log.Warnf("ClusterLoadBalancers: LB cost data missing service_name")
 			continue
 		}
 
@@ -808,7 +807,7 @@ func (a *Accesses) ComputeClusterCosts(client prometheus.Client, provider cloud.
 		if len(result.Values) > 0 {
 			dataMins = result.Values[0].Value
 		} else {
-			klog.V(3).Infof("[Warning] cluster cost data count returned no results for cluster %s", clusterID)
+			log.Warnf("Cluster cost data count returned no results for cluster %s", clusterID)
 		}
 		dataMinsByCluster[clusterID] = dataMins
 	}
@@ -884,7 +883,7 @@ func (a *Accesses) ComputeClusterCosts(client prometheus.Client, provider cloud.
 
 			mode, err := result.GetString("mode")
 			if err != nil {
-				klog.V(3).Infof("[Warning] ComputeClusterCosts: unable to read CPU mode: %s", err)
+				log.Warnf("ComputeClusterCosts: unable to read CPU mode: %s", err)
 				mode = "other"
 			}
 
@@ -958,11 +957,11 @@ func (a *Accesses) ComputeClusterCosts(client prometheus.Client, provider cloud.
 		dataMins, ok := dataMinsByCluster[id]
 		if !ok {
 			dataMins = mins
-			klog.V(3).Infof("[Warning] cluster cost data count not found for cluster %s", id)
+			log.Warnf("Cluster cost data count not found for cluster %s", id)
 		}
 		costs, err := NewClusterCostsFromCumulative(cd["cpu"], cd["gpu"], cd["ram"], cd["storage"]+cd["localstorage"], window, offset, dataMins/timeutil.MinsPerHour)
 		if err != nil {
-			klog.V(3).Infof("[Warning] Failed to parse cluster costs on %s (%s) from cumulative data: %+v", window, offset, cd)
+			log.Warnf("Failed to parse cluster costs on %s (%s) from cumulative data: %+v", window, offset, cd)
 			return nil, err
 		}
 
@@ -1021,19 +1020,19 @@ func ClusterCostsOverTime(cli prometheus.Client, provider cloud.Provider, startS
 
 	start, err := time.Parse(layout, startString)
 	if err != nil {
-		klog.V(1).Infof("Error parsing time %s. Error: %s", startString, err.Error())
+		log.Errorf("Error parsing time %s. Error: %s", startString, err.Error())
 		return nil, err
 	}
 	end, err := time.Parse(layout, endString)
 	if err != nil {
-		klog.V(1).Infof("Error parsing time %s. Error: %s", endString, err.Error())
+		log.Errorf("Error parsing time %s. Error: %s", endString, err.Error())
 		return nil, err
 	}
 	fmtWindow := timeutil.DurationString(window)
 
 	if fmtWindow == "" {
 		err := fmt.Errorf("window value invalid or missing")
-		klog.V(1).Infof("Error parsing time %v. Error: %s", window, err.Error())
+		log.Errorf("Error parsing time %v. Error: %s", window, err.Error())
 		return nil, err
 	}
 
@@ -1072,19 +1071,19 @@ func ClusterCostsOverTime(cli prometheus.Client, provider cloud.Provider, startS
 
 	coreTotal, err := resultToTotals(resultClusterCores)
 	if err != nil {
-		klog.Infof("[Warning] ClusterCostsOverTime: no cpu data: %s", err)
+		log.Infof("[Warning] ClusterCostsOverTime: no cpu data: %s", err)
 		return nil, err
 	}
 
 	ramTotal, err := resultToTotals(resultClusterRAM)
 	if err != nil {
-		klog.Infof("[Warning] ClusterCostsOverTime: no ram data: %s", err)
+		log.Infof("[Warning] ClusterCostsOverTime: no ram data: %s", err)
 		return nil, err
 	}
 
 	storageTotal, err := resultToTotals(resultStorage)
 	if err != nil {
-		klog.Infof("[Warning] ClusterCostsOverTime: no storage data: %s", err)
+		log.Infof("[Warning] ClusterCostsOverTime: no storage data: %s", err)
 	}
 
 	clusterTotal, err := resultToTotals(resultTotal)
@@ -1096,7 +1095,7 @@ func ClusterCostsOverTime(cli prometheus.Client, provider cloud.Provider, startS
 
 		resultNodes, warnings, err := ctx.QueryRangeSync(qNodes, start, end, window)
 		for _, warning := range warnings {
-			log.Warningf(warning)
+			log.Warnf(warning)
 		}
 		if err != nil {
 			return nil, err
@@ -1104,7 +1103,7 @@ func ClusterCostsOverTime(cli prometheus.Client, provider cloud.Provider, startS
 
 		clusterTotal, err = resultToTotals(resultNodes)
 		if err != nil {
-			klog.Infof("[Warning] ClusterCostsOverTime: no node data: %s", err)
+			log.Infof("[Warning] ClusterCostsOverTime: no node data: %s", err)
 			return nil, err
 		}
 	}
@@ -1126,7 +1125,7 @@ func pvCosts(diskMap map[DiskIdentifier]*Disk, resolution time.Duration, resActi
 
 		name, err := result.GetString("persistentvolume")
 		if err != nil {
-			log.Warningf("ClusterDisks: active mins missing pv name")
+			log.Warnf("ClusterDisks: active mins missing pv name")
 			continue
 		}
 
@@ -1161,7 +1160,7 @@ func pvCosts(diskMap map[DiskIdentifier]*Disk, resolution time.Duration, resActi
 
 		name, err := result.GetString("persistentvolume")
 		if err != nil {
-			log.Warningf("ClusterDisks: PV size data missing persistentvolume")
+			log.Warnf("ClusterDisks: PV size data missing persistentvolume")
 			continue
 		}
 
@@ -1182,7 +1181,7 @@ func pvCosts(diskMap map[DiskIdentifier]*Disk, resolution time.Duration, resActi
 	customPricingEnabled := cloud.CustomPricesEnabled(cp)
 	customPricingConfig, err := cp.GetConfig()
 	if err != nil {
-		log.Warningf("ClusterDisks: failed to load custom pricing: %s", err)
+		log.Warnf("ClusterDisks: failed to load custom pricing: %s", err)
 	}
 
 	for _, result := range resPVCost {
@@ -1193,7 +1192,7 @@ func pvCosts(diskMap map[DiskIdentifier]*Disk, resolution time.Duration, resActi
 
 		name, err := result.GetString("persistentvolume")
 		if err != nil {
-			log.Warningf("ClusterDisks: PV cost data missing persistentvolume")
+			log.Warnf("ClusterDisks: PV cost data missing persistentvolume")
 			continue
 		}
 
@@ -1205,7 +1204,7 @@ func pvCosts(diskMap map[DiskIdentifier]*Disk, resolution time.Duration, resActi
 
 			customPVCost, err := strconv.ParseFloat(customPVCostStr, 64)
 			if err != nil {
-				log.Warningf("ClusterDisks: error parsing custom PV price: %s", customPVCostStr)
+				log.Warnf("ClusterDisks: error parsing custom PV price: %s", customPVCostStr)
 			}
 
 			cost = customPVCost

+ 17 - 17
pkg/costmodel/cluster_helpers.go

@@ -43,7 +43,7 @@ func buildCPUCostMap(
 	customPricingEnabled := cloud.CustomPricesEnabled(cp)
 	customPricingConfig, err := cp.GetConfig()
 	if err != nil {
-		log.Warningf("ClusterNodes: failed to load custom pricing: %s", err)
+		log.Warnf("ClusterNodes: failed to load custom pricing: %s", err)
 	}
 
 	for _, result := range resNodeCPUCost {
@@ -54,7 +54,7 @@ func buildCPUCostMap(
 
 		name, err := result.GetString("node")
 		if err != nil {
-			log.Warningf("ClusterNodes: CPU cost data missing node")
+			log.Warnf("ClusterNodes: CPU cost data missing node")
 			continue
 		}
 
@@ -84,7 +84,7 @@ func buildCPUCostMap(
 
 			customCPUCost, err := strconv.ParseFloat(customCPUStr, 64)
 			if err != nil {
-				log.Warningf("ClusterNodes: error parsing custom CPU price: %s", customCPUStr)
+				log.Warnf("ClusterNodes: error parsing custom CPU price: %s", customCPUStr)
 			}
 			cpuCost = customCPUCost
 
@@ -117,7 +117,7 @@ func buildRAMCostMap(
 	customPricingEnabled := cloud.CustomPricesEnabled(cp)
 	customPricingConfig, err := cp.GetConfig()
 	if err != nil {
-		log.Warningf("ClusterNodes: failed to load custom pricing: %s", err)
+		log.Warnf("ClusterNodes: failed to load custom pricing: %s", err)
 	}
 
 	for _, result := range resNodeRAMCost {
@@ -128,7 +128,7 @@ func buildRAMCostMap(
 
 		name, err := result.GetString("node")
 		if err != nil {
-			log.Warningf("ClusterNodes: RAM cost data missing node")
+			log.Warnf("ClusterNodes: RAM cost data missing node")
 			continue
 		}
 
@@ -158,7 +158,7 @@ func buildRAMCostMap(
 
 			customRAMCost, err := strconv.ParseFloat(customRAMStr, 64)
 			if err != nil {
-				log.Warningf("ClusterNodes: error parsing custom RAM price: %s", customRAMStr)
+				log.Warnf("ClusterNodes: error parsing custom RAM price: %s", customRAMStr)
 			}
 			ramCost = customRAMCost / 1024 / 1024 / 1024
 
@@ -192,7 +192,7 @@ func buildGPUCostMap(
 	customPricingEnabled := cloud.CustomPricesEnabled(cp)
 	customPricingConfig, err := cp.GetConfig()
 	if err != nil {
-		log.Warningf("ClusterNodes: failed to load custom pricing: %s", err)
+		log.Warnf("ClusterNodes: failed to load custom pricing: %s", err)
 	}
 
 	for _, result := range resNodeGPUCost {
@@ -203,7 +203,7 @@ func buildGPUCostMap(
 
 		name, err := result.GetString("node")
 		if err != nil {
-			log.Warningf("ClusterNodes: GPU cost data missing node")
+			log.Warnf("ClusterNodes: GPU cost data missing node")
 			continue
 		}
 
@@ -233,7 +233,7 @@ func buildGPUCostMap(
 
 			customGPUCost, err := strconv.ParseFloat(customGPUStr, 64)
 			if err != nil {
-				log.Warningf("ClusterNodes: error parsing custom GPU price: %s", customGPUStr)
+				log.Warnf("ClusterNodes: error parsing custom GPU price: %s", customGPUStr)
 			}
 			gpuCost = customGPUCost
 
@@ -271,7 +271,7 @@ func buildGPUCountMap(
 
 		name, err := result.GetString("node")
 		if err != nil {
-			log.Warningf("ClusterNodes: GPU count data missing node")
+			log.Warnf("ClusterNodes: GPU count data missing node")
 			continue
 		}
 
@@ -303,7 +303,7 @@ func buildCPUCoresMap(
 
 		name, err := result.GetString("node")
 		if err != nil {
-			log.Warningf("ClusterNodes: CPU cores data missing node")
+			log.Warnf("ClusterNodes: CPU cores data missing node")
 			continue
 		}
 
@@ -331,7 +331,7 @@ func buildRAMBytesMap(resNodeRAMBytes []*prom.QueryResult) map[nodeIdentifierNoP
 
 		name, err := result.GetString("node")
 		if err != nil {
-			log.Warningf("ClusterNodes: RAM bytes data missing node")
+			log.Warnf("ClusterNodes: RAM bytes data missing node")
 			continue
 		}
 
@@ -373,7 +373,7 @@ func buildCPUBreakdownMap(resNodeCPUModeTotal []*prom.QueryResult) map[nodeIdent
 
 		mode, err := result.GetString("mode")
 		if err != nil {
-			log.Warningf("ClusterNodes: unable to read CPU mode: %s", err)
+			log.Warnf("ClusterNodes: unable to read CPU mode: %s", err)
 			mode = "other"
 		}
 
@@ -437,7 +437,7 @@ func buildRAMUserPctMap(resNodeRAMUserPct []*prom.QueryResult) map[nodeIdentifie
 
 		name, err := result.GetString("instance")
 		if err != nil {
-			log.Warningf("ClusterNodes: RAM user percent missing node")
+			log.Warnf("ClusterNodes: RAM user percent missing node")
 			continue
 		}
 
@@ -466,7 +466,7 @@ func buildRAMSystemPctMap(resNodeRAMSystemPct []*prom.QueryResult) map[nodeIdent
 
 		name, err := result.GetString("instance")
 		if err != nil {
-			log.Warningf("ClusterNodes: RAM system percent missing node")
+			log.Warnf("ClusterNodes: RAM system percent missing node")
 			continue
 		}
 
@@ -501,7 +501,7 @@ func buildActiveDataMap(resActiveMins []*prom.QueryResult, resolution time.Durat
 
 		name, err := result.GetString("node")
 		if err != nil {
-			log.Warningf("ClusterNodes: active mins missing node")
+			log.Warnf("ClusterNodes: active mins missing node")
 			continue
 		}
 
@@ -625,7 +625,7 @@ func checkForKeyAndInitIfMissing(
 		}]; ok {
 			nodeType = t
 		} else {
-			log.Warningf("ClusterNodes: Type does not exist for node identifier %s", key)
+			log.Warnf("ClusterNodes: Type does not exist for node identifier %s", key)
 		}
 
 		nodeMap[key] = &Node{

+ 5 - 6
pkg/costmodel/clusterinfo.go

@@ -12,7 +12,6 @@ import (
 	"github.com/kubecost/cost-model/pkg/util/json"
 
 	"k8s.io/client-go/kubernetes"
-	"k8s.io/klog"
 )
 
 var (
@@ -64,12 +63,12 @@ func (dlcip *localClusterInfoProvider) GetClusterInfo() map[string]string {
 	if ok && data != nil {
 		v, err := kc.ServerVersion()
 		if err != nil {
-			klog.Infof("Could not get k8s version info: %s", err.Error())
+			log.Infof("Could not get k8s version info: %s", err.Error())
 		} else if v != nil {
 			data["version"] = v.Major + "." + v.Minor
 		}
 	} else {
-		klog.Infof("Could not get k8s version info: %s", err.Error())
+		log.Infof("Could not get k8s version info: %s", err.Error())
 	}
 
 	writeClusterProfile(data)
@@ -104,7 +103,7 @@ func (ccip *configuredClusterInfoProvider) GetClusterInfo() map[string]string {
 
 	err = json.Unmarshal(data, &clusterInfo)
 	if err != nil {
-		log.Warningf("ClusterInfo failed to load from configuration: %s", err)
+		log.Warnf("ClusterInfo failed to load from configuration: %s", err)
 		return clusterInfo
 	}
 
@@ -131,13 +130,13 @@ func (ciw *clusterInfoWriteOnRequest) GetClusterInfo() map[string]string {
 
 	result, err := json.Marshal(cInfo)
 	if err != nil {
-		log.Warningf("Failed to write the cluster info: %s", err)
+		log.Warnf("Failed to write the cluster info: %s", err)
 		return cInfo
 	}
 
 	err = ciw.config.Write(result)
 	if err != nil {
-		log.Warningf("Failed to write the cluster info to config: %s", err)
+		log.Warnf("Failed to write the cluster info to config: %s", err)
 	}
 
 	return cInfo

+ 2 - 2
pkg/costmodel/clusters/clustermap.go

@@ -156,13 +156,13 @@ func (pcm *PrometheusClusterMap) loadClusters() (map[string]*ClusterInfo, error)
 	for _, result := range qr {
 		id, err := result.GetString("id")
 		if err != nil {
-			log.Warningf("Failed to load 'id' field for ClusterInfo")
+			log.Warnf("Failed to load 'id' field for ClusterInfo")
 			continue
 		}
 
 		name, err := result.GetString("name")
 		if err != nil {
-			log.Warningf("Failed to load 'name' field for ClusterInfo")
+			log.Warnf("Failed to load 'name' field for ClusterInfo")
 			continue
 		}
 

+ 89 - 90
pkg/costmodel/costmodel.go

@@ -21,7 +21,6 @@ import (
 	v1 "k8s.io/api/core/v1"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/labels"
-	"k8s.io/klog"
 
 	"golang.org/x/sync/singleflight"
 )
@@ -299,7 +298,7 @@ func (cm *CostModel) ComputeCostData(cli prometheusClient.Client, cp costAnalyze
 
 		// ErrorCollection is an collection of errors wrapped in a single error implementation
 		// We opt to not return an error for the sake of running as a pure exporter.
-		log.Warningf("ComputeCostData: continuing despite prometheus errors: %s", ctx.ErrorCollection().Error())
+		log.Warnf("ComputeCostData: continuing despite prometheus errors: %s", ctx.ErrorCollection().Error())
 	}
 
 	defer measureTime(time.Now(), profileThreshold, "ComputeCostData: Processing Query Data")
@@ -307,7 +306,7 @@ func (cm *CostModel) ComputeCostData(cli prometheusClient.Client, cp costAnalyze
 	normalizationValue, err := getNormalization(resNormalization)
 	if err != nil {
 		// We opt to not return an error for the sake of running as a pure exporter.
-		log.Warningf("ComputeCostData: continuing despite error parsing normalization values from %s: %s", queryNormalization, err.Error())
+		log.Warnf("ComputeCostData: continuing despite error parsing normalization values from %s: %s", queryNormalization, err.Error())
 	}
 
 	// Determine if there are vgpus configured and if so get the total allocatable number
@@ -320,7 +319,7 @@ func (cm *CostModel) ComputeCostData(cli prometheusClient.Client, cp costAnalyze
 
 	nodes, err := cm.GetNodeCost(cp)
 	if err != nil {
-		log.Warningf("GetNodeCost: no node cost model available: " + err.Error())
+		log.Warnf("GetNodeCost: no node cost model available: " + err.Error())
 		return nil, err
 	}
 
@@ -328,7 +327,7 @@ func (cm *CostModel) ComputeCostData(cli prometheusClient.Client, cp costAnalyze
 	unmountedPVs := make(map[string][]*PersistentVolumeClaimData)
 	pvClaimMapping, err := GetPVInfoLocal(cm.Cache, clusterID)
 	if err != nil {
-		log.Warningf("GetPVInfo: unable to get PV data: %s", err.Error())
+		log.Warnf("GetPVInfo: unable to get PV data: %s", err.Error())
 	}
 	if pvClaimMapping != nil {
 		err = addPVData(cm.Cache, pvClaimMapping, cp)
@@ -343,7 +342,7 @@ func (cm *CostModel) ComputeCostData(cli prometheusClient.Client, cp costAnalyze
 
 	networkUsageMap, err := GetNetworkUsageData(resNetZoneRequests, resNetRegionRequests, resNetInternetRequests, clusterID)
 	if err != nil {
-		klog.V(1).Infof("[Warning] Unable to get Network Cost Data: %s", err.Error())
+		log.Warnf("Unable to get Network Cost Data: %s", err.Error())
 		networkUsageMap = make(map[string]*NetworkUsageData)
 	}
 
@@ -452,7 +451,7 @@ func (cm *CostModel) ComputeCostData(cli prometheusClient.Client, cp costAnalyze
 			if usage, ok := networkUsageMap[ns+","+podName+","+clusterID]; ok {
 				netCosts, err := GetNetworkCost(usage, cp)
 				if err != nil {
-					klog.V(4).Infof("Error pulling network costs: %s", err.Error())
+					log.Debugf("Error pulling network costs: %s", err.Error())
 				} else {
 					podNetCosts = netCosts
 				}
@@ -521,13 +520,13 @@ func (cm *CostModel) ComputeCostData(cli prometheusClient.Client, cp costAnalyze
 
 				RAMUsedV, ok := RAMUsedMap[newKey]
 				if !ok {
-					klog.V(4).Info("no RAM usage for " + newKey)
+					log.Debug("no RAM usage for " + newKey)
 					RAMUsedV = []*util.Vector{{}}
 				}
 
 				CPUUsedV, ok := CPUUsedMap[newKey]
 				if !ok {
-					klog.V(4).Info("no CPU usage for " + newKey)
+					log.Debug("no CPU usage for " + newKey)
 					CPUUsedV = []*util.Vector{{}}
 				}
 
@@ -572,7 +571,7 @@ func (cm *CostModel) ComputeCostData(cli prometheusClient.Client, cp costAnalyze
 			}
 		} else {
 			// The container has been deleted. Not all information is sent to prometheus via ksm, so fill out what we can without k8s api
-			klog.V(4).Info("The container " + key + " has been deleted. Calculating allocation but resulting object will be missing data.")
+			log.Debug("The container " + key + " has been deleted. Calculating allocation but resulting object will be missing data.")
 			c, err := NewContainerMetricFromKey(key)
 			if err != nil {
 				return nil, err
@@ -590,19 +589,19 @@ func (cm *CostModel) ComputeCostData(cli prometheusClient.Client, cp costAnalyze
 
 			RAMUsedV, ok := RAMUsedMap[key]
 			if !ok {
-				klog.V(4).Info("no RAM usage for " + key)
+				log.Debug("no RAM usage for " + key)
 				RAMUsedV = []*util.Vector{{}}
 			}
 
 			CPUUsedV, ok := CPUUsedMap[key]
 			if !ok {
-				klog.V(4).Info("no CPU usage for " + key)
+				log.Debug("no CPU usage for " + key)
 				CPUUsedV = []*util.Vector{{}}
 			}
 
 			node, ok := nodes[c.NodeName]
 			if !ok {
-				klog.V(4).Infof("Node \"%s\" has been deleted from Kubernetes. Query historical data to get it.", c.NodeName)
+				log.Debugf("Node \"%s\" has been deleted from Kubernetes. Query historical data to get it.", c.NodeName)
 				if n, ok := missingNodes[c.NodeName]; ok {
 					node = n
 				} else {
@@ -645,7 +644,7 @@ func (cm *CostModel) ComputeCostData(cli prometheusClient.Client, cp costAnalyze
 	// to pass along the cost data
 	unmounted := findUnmountedPVCostData(cm.ClusterMap, unmountedPVs, namespaceLabelsMapping, namespaceAnnotationsMapping)
 	for k, costs := range unmounted {
-		klog.V(4).Infof("Unmounted PVs in Namespace/ClusterID: %s/%s", costs.Namespace, costs.ClusterID)
+		log.Debugf("Unmounted PVs in Namespace/ClusterID: %s/%s", costs.Namespace, costs.ClusterID)
 
 		if filterNamespace == "" {
 			containerNameCost[k] = costs
@@ -656,12 +655,12 @@ func (cm *CostModel) ComputeCostData(cli prometheusClient.Client, cp costAnalyze
 
 	err = findDeletedNodeInfo(cli, missingNodes, window, "")
 	if err != nil {
-		klog.V(1).Infof("Error fetching historical node data: %s", err.Error())
+		log.Errorf("Error fetching historical node data: %s", err.Error())
 	}
 
 	err = findDeletedPodInfo(cli, missingContainers, window)
 	if err != nil {
-		klog.V(1).Infof("Error fetching historical pod data: %s", err.Error())
+		log.Errorf("Error fetching historical pod data: %s", err.Error())
 	}
 	return containerNameCost, err
 }
@@ -675,7 +674,7 @@ func findUnmountedPVCostData(clusterMap clusters.ClusterMap, unmountedPVs map[st
 	for k, pv := range unmountedPVs {
 		keyParts := strings.Split(k, ",")
 		if len(keyParts) != 3 {
-			klog.V(1).Infof("Unmounted PV used key with incorrect parts: %s", k)
+			log.Warnf("Unmounted PV used key with incorrect parts: %s", k)
 			continue
 		}
 
@@ -783,7 +782,7 @@ func findDeletedNodeInfo(cli prometheusClient.Client, missingNodes map[string]*c
 		}
 
 		if len(cpuCosts) == 0 {
-			klog.V(1).Infof("Kubecost prometheus metrics not currently available. Ingest this server's /metrics endpoint to get that data.")
+			log.Infof("Kubecost prometheus metrics not currently available. Ingest this server's /metrics endpoint to get that data.")
 		}
 
 		for node, costv := range cpuCosts {
@@ -814,12 +813,12 @@ func getContainerAllocation(req []*util.Vector, used []*util.Vector, allocationT
 		if x != nil && y != nil {
 			x1 := *x
 			if math.IsNaN(x1) {
-				klog.V(1).Infof("[Warning] NaN value found during %s allocation calculation for requests.", allocationType)
+				log.Warnf("NaN value found during %s allocation calculation for requests.", allocationType)
 				x1 = 0.0
 			}
 			y1 := *y
 			if math.IsNaN(y1) {
-				klog.V(1).Infof("[Warning] NaN value found during %s allocation calculation for used.", allocationType)
+				log.Warnf("NaN value found during %s allocation calculation for used.", allocationType)
 				y1 = 0.0
 			}
 
@@ -864,7 +863,7 @@ func addPVData(cache clustercache.ClusterCache, pvClaimMapping map[string]*Persi
 	for _, pv := range pvs {
 		parameters, ok := storageClassMap[pv.Spec.StorageClassName]
 		if !ok {
-			klog.V(4).Infof("Unable to find parameters for storage class \"%s\". Does pv \"%s\" have a storageClassName?", pv.Spec.StorageClassName, pv.Name)
+			log.Debugf("Unable to find parameters for storage class \"%s\". Does pv \"%s\" have a storageClassName?", pv.Spec.StorageClassName, pv.Name)
 		}
 		var region string
 		if r, ok := util.GetRegion(pv.Labels); ok {
@@ -889,7 +888,7 @@ func addPVData(cache clustercache.ClusterCache, pvClaimMapping map[string]*Persi
 		if vol, ok := pvMap[pvc.VolumeName]; ok {
 			pvc.Volume = vol
 		} else {
-			klog.V(4).Infof("PV not found, using default")
+			log.Debugf("PV not found, using default")
 			pvc.Volume = &costAnalyzerCloud.PV{
 				Cost: cfg.Storage,
 			}
@@ -955,7 +954,7 @@ func (cm *CostModel) GetNodeCost(cp costAnalyzerCloud.Provider) (map[string]*cos
 
 		cnode, err := cp.NodePricing(cp.GetKey(nodeLabels, n))
 		if err != nil {
-			klog.Infof("Error getting node pricing. Error: %s", err.Error())
+			log.Infof("Error getting node pricing. Error: %s", err.Error())
 			if cnode != nil {
 				nodes[name] = cnode
 				continue
@@ -991,11 +990,11 @@ func (cm *CostModel) GetNodeCost(cp costAnalyzerCloud.Provider) (map[string]*cos
 		} else {
 			cpu, err = strconv.ParseFloat(newCnode.VCPU, 64)
 			if err != nil {
-				klog.V(1).Infof("[Warning] parsing VCPU value: \"%s\" as float64", newCnode.VCPU)
+				log.Warnf("parsing VCPU value: \"%s\" as float64", newCnode.VCPU)
 			}
 		}
 		if math.IsNaN(cpu) {
-			klog.V(1).Infof("[Warning] cpu parsed as NaN. Setting to 0.")
+			log.Warnf("cpu parsed as NaN. Setting to 0.")
 			cpu = 0
 		}
 
@@ -1005,7 +1004,7 @@ func (cm *CostModel) GetNodeCost(cp costAnalyzerCloud.Provider) (map[string]*cos
 		}
 		ram = float64(n.Status.Capacity.Memory().Value())
 		if math.IsNaN(ram) {
-			klog.V(1).Infof("[Warning] ram parsed as NaN. Setting to 0.")
+			log.Warnf("ram parsed as NaN. Setting to 0.")
 			ram = 0
 		}
 
@@ -1035,65 +1034,65 @@ func (cm *CostModel) GetNodeCost(cp costAnalyzerCloud.Provider) (map[string]*cos
 			}
 		}
 		if math.IsNaN(gpuc) {
-			klog.V(1).Infof("[Warning] gpu count parsed as NaN. Setting to 0.")
+			log.Warnf("gpu count parsed as NaN. Setting to 0.")
 			gpuc = 0.0
 		}
 
 		if newCnode.GPU != "" && newCnode.GPUCost == "" {
 			// We couldn't find a gpu cost, so fix cpu and ram, then accordingly
-			klog.V(4).Infof("GPU without cost found for %s, calculating...", cp.GetKey(nodeLabels, n).Features())
+			log.Debugf("GPU without cost found for %s, calculating...", cp.GetKey(nodeLabels, n).Features())
 
 			defaultCPU, err := strconv.ParseFloat(cfg.CPU, 64)
 			if err != nil {
-				klog.V(3).Infof("Could not parse default cpu price")
+				log.Errorf("Could not parse default cpu price")
 				defaultCPU = 0
 			}
 			if math.IsNaN(defaultCPU) {
-				klog.V(1).Infof("[Warning] defaultCPU parsed as NaN. Setting to 0.")
+				log.Warnf("defaultCPU parsed as NaN. Setting to 0.")
 				defaultCPU = 0
 			}
 
 			defaultRAM, err := strconv.ParseFloat(cfg.RAM, 64)
 			if err != nil {
-				klog.V(3).Infof("Could not parse default ram price")
+				log.Errorf("Could not parse default ram price")
 				defaultRAM = 0
 			}
 			if math.IsNaN(defaultRAM) {
-				klog.V(1).Infof("[Warning] defaultRAM parsed as NaN. Setting to 0.")
+				log.Warnf("defaultRAM parsed as NaN. Setting to 0.")
 				defaultRAM = 0
 			}
 
 			defaultGPU, err := strconv.ParseFloat(cfg.GPU, 64)
 			if err != nil {
-				klog.V(3).Infof("Could not parse default gpu price")
+				log.Errorf("Could not parse default gpu price")
 				defaultGPU = 0
 			}
 			if math.IsNaN(defaultGPU) {
-				klog.V(1).Infof("[Warning] defaultGPU parsed as NaN. Setting to 0.")
+				log.Warnf("defaultGPU parsed as NaN. Setting to 0.")
 				defaultGPU = 0
 			}
 
 			cpuToRAMRatio := defaultCPU / defaultRAM
 			if math.IsNaN(cpuToRAMRatio) {
-				klog.V(1).Infof("[Warning] cpuToRAMRatio[defaultCPU: %f / defaultRAM: %f] is NaN. Setting to 0.", defaultCPU, defaultRAM)
+				log.Warnf("cpuToRAMRatio[defaultCPU: %f / defaultRAM: %f] is NaN. Setting to 0.", defaultCPU, defaultRAM)
 				cpuToRAMRatio = 0
 			}
 
 			gpuToRAMRatio := defaultGPU / defaultRAM
 			if math.IsNaN(gpuToRAMRatio) {
-				klog.V(1).Infof("[Warning] gpuToRAMRatio is NaN. Setting to 0.")
+				log.Warnf("gpuToRAMRatio is NaN. Setting to 0.")
 				gpuToRAMRatio = 0
 			}
 
 			ramGB := ram / 1024 / 1024 / 1024
 			if math.IsNaN(ramGB) {
-				klog.V(1).Infof("[Warning] ramGB is NaN. Setting to 0.")
+				log.Warnf("ramGB is NaN. Setting to 0.")
 				ramGB = 0
 			}
 
 			ramMultiple := gpuc*gpuToRAMRatio + cpu*cpuToRAMRatio + ramGB
 			if math.IsNaN(ramMultiple) {
-				klog.V(1).Infof("[Warning] ramMultiple is NaN. Setting to 0.")
+				log.Warnf("ramMultiple is NaN. Setting to 0.")
 				ramMultiple = 0
 			}
 
@@ -1101,24 +1100,24 @@ func (cm *CostModel) GetNodeCost(cp costAnalyzerCloud.Provider) (map[string]*cos
 			if newCnode.Cost != "" {
 				nodePrice, err = strconv.ParseFloat(newCnode.Cost, 64)
 				if err != nil {
-					klog.V(3).Infof("Could not parse total node price")
+					log.Errorf("Could not parse total node price")
 					return nil, err
 				}
 			} else {
 				nodePrice, err = strconv.ParseFloat(newCnode.VCPUCost, 64) // all the price was allocated to the CPU
 				if err != nil {
-					klog.V(3).Infof("Could not parse node vcpu price")
+					log.Errorf("Could not parse node vcpu price")
 					return nil, err
 				}
 			}
 			if math.IsNaN(nodePrice) {
-				klog.V(1).Infof("[Warning] nodePrice parsed as NaN. Setting to 0.")
+				log.Warnf("nodePrice parsed as NaN. Setting to 0.")
 				nodePrice = 0
 			}
 
 			ramPrice := (nodePrice / ramMultiple)
 			if math.IsNaN(ramPrice) {
-				klog.V(1).Infof("[Warning] ramPrice[nodePrice: %f / ramMultiple: %f] parsed as NaN. Setting to 0.", nodePrice, ramMultiple)
+				log.Warnf("ramPrice[nodePrice: %f / ramMultiple: %f] parsed as NaN. Setting to 0.", nodePrice, ramMultiple)
 				ramPrice = 0
 			}
 
@@ -1131,43 +1130,43 @@ func (cm *CostModel) GetNodeCost(cp costAnalyzerCloud.Provider) (map[string]*cos
 			newCnode.GPUCost = fmt.Sprintf("%f", gpuPrice)
 		} else if newCnode.RAMCost == "" {
 			// We couldn't find a ramcost, so fix cpu and allocate ram accordingly
-			klog.V(4).Infof("No RAM cost found for %s, calculating...", cp.GetKey(nodeLabels, n).Features())
+			log.Debugf("No RAM cost found for %s, calculating...", cp.GetKey(nodeLabels, n).Features())
 
 			defaultCPU, err := strconv.ParseFloat(cfg.CPU, 64)
 			if err != nil {
-				klog.V(3).Infof("Could not parse default cpu price")
+				log.Warnf("Could not parse default cpu price")
 				defaultCPU = 0
 			}
 			if math.IsNaN(defaultCPU) {
-				klog.V(1).Infof("[Warning] defaultCPU parsed as NaN. Setting to 0.")
+				log.Warnf("defaultCPU parsed as NaN. Setting to 0.")
 				defaultCPU = 0
 			}
 
 			defaultRAM, err := strconv.ParseFloat(cfg.RAM, 64)
 			if err != nil {
-				klog.V(3).Infof("Could not parse default ram price")
+				log.Warnf("Could not parse default ram price")
 				defaultRAM = 0
 			}
 			if math.IsNaN(defaultRAM) {
-				klog.V(1).Infof("[Warning] defaultRAM parsed as NaN. Setting to 0.")
+				log.Warnf("defaultRAM parsed as NaN. Setting to 0.")
 				defaultRAM = 0
 			}
 
 			cpuToRAMRatio := defaultCPU / defaultRAM
 			if math.IsNaN(cpuToRAMRatio) {
-				klog.V(1).Infof("[Warning] cpuToRAMRatio[defaultCPU: %f / defaultRAM: %f] is NaN. Setting to 0.", defaultCPU, defaultRAM)
+				log.Warnf("cpuToRAMRatio[defaultCPU: %f / defaultRAM: %f] is NaN. Setting to 0.", defaultCPU, defaultRAM)
 				cpuToRAMRatio = 0
 			}
 
 			ramGB := ram / 1024 / 1024 / 1024
 			if math.IsNaN(ramGB) {
-				klog.V(1).Infof("[Warning] ramGB is NaN. Setting to 0.")
+				log.Warnf("ramGB is NaN. Setting to 0.")
 				ramGB = 0
 			}
 
 			ramMultiple := cpu*cpuToRAMRatio + ramGB
 			if math.IsNaN(ramMultiple) {
-				klog.V(1).Infof("[Warning] ramMultiple is NaN. Setting to 0.")
+				log.Warnf("ramMultiple is NaN. Setting to 0.")
 				ramMultiple = 0
 			}
 
@@ -1175,24 +1174,24 @@ func (cm *CostModel) GetNodeCost(cp costAnalyzerCloud.Provider) (map[string]*cos
 			if newCnode.Cost != "" {
 				nodePrice, err = strconv.ParseFloat(newCnode.Cost, 64)
 				if err != nil {
-					klog.V(3).Infof("Could not parse total node price")
+					log.Warnf("Could not parse total node price")
 					return nil, err
 				}
 			} else {
 				nodePrice, err = strconv.ParseFloat(newCnode.VCPUCost, 64) // all the price was allocated to the CPU
 				if err != nil {
-					klog.V(3).Infof("Could not parse node vcpu price")
+					log.Warnf("Could not parse node vcpu price")
 					return nil, err
 				}
 			}
 			if math.IsNaN(nodePrice) {
-				klog.V(1).Infof("[Warning] nodePrice parsed as NaN. Setting to 0.")
+				log.Warnf("nodePrice parsed as NaN. Setting to 0.")
 				nodePrice = 0
 			}
 
 			ramPrice := (nodePrice / ramMultiple)
 			if math.IsNaN(ramPrice) {
-				klog.V(1).Infof("[Warning] ramPrice[nodePrice: %f / ramMultiple: %f] parsed as NaN. Setting to 0.", nodePrice, ramMultiple)
+				log.Warnf("ramPrice[nodePrice: %f / ramMultiple: %f] parsed as NaN. Setting to 0.", nodePrice, ramMultiple)
 				ramPrice = 0
 			}
 
@@ -1210,7 +1209,7 @@ func (cm *CostModel) GetNodeCost(cp costAnalyzerCloud.Provider) (map[string]*cos
 			}
 			newCnode.RAMBytes = fmt.Sprintf("%f", ram)
 
-			klog.V(4).Infof("Computed \"%s\" RAM Cost := %v", name, newCnode.RAMCost)
+			log.Debugf("Computed \"%s\" RAM Cost := %v", name, newCnode.RAMCost)
 		}
 
 		nodes[name] = &newCnode
@@ -1300,7 +1299,7 @@ func getPodStatefulsets(cache clustercache.ClusterCache, podList []*v1.Pod, clus
 		}
 		s, err := metav1.LabelSelectorAsSelector(ss.Spec.Selector)
 		if err != nil {
-			klog.V(2).Infof("Error doing deployment label conversion: " + err.Error())
+			log.Errorf("Error doing deployment label conversion: " + err.Error())
 		}
 		for _, pod := range podList {
 			labelSet := labels.Set(pod.GetObjectMeta().GetLabels())
@@ -1331,7 +1330,7 @@ func getPodDeployments(cache clustercache.ClusterCache, podList []*v1.Pod, clust
 		}
 		s, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
 		if err != nil {
-			klog.V(2).Infof("Error doing deployment label conversion: " + err.Error())
+			log.Errorf("Error doing deployment label conversion: " + err.Error())
 		}
 		for _, pod := range podList {
 			labelSet := labels.Set(pod.GetObjectMeta().GetLabels())
@@ -1530,7 +1529,7 @@ func (cm *CostModel) ComputeCostDataRange(cli prometheusClient.Client, cp costAn
 	// for the specific inputs to prevent multiple queries for identical data.
 	key := requestKeyFor(window, resolution, filterNamespace, filterCluster, remoteEnabled)
 
-	klog.V(4).Infof("ComputeCostDataRange with Key: %s", key)
+	log.Debugf("ComputeCostDataRange with Key: %s", key)
 
 	// If there is already a request out that uses the same data, wait for it to return to share the results.
 	// Otherwise, start executing.
@@ -1566,7 +1565,7 @@ func (cm *CostModel) costDataRange(cli prometheusClient.Client, cp costAnalyzerC
 
 	// Warn if resolution does not evenly divide window
 	if int64(window.Minutes())%int64(resolution.Minutes()) != 0 {
-		log.Warningf("CostDataRange: window should be divisible by resolution or else samples may be missed: %s %% %s = %dm", window, resolution, int64(window.Minutes())%int64(resolution.Minutes()))
+		log.Warnf("CostDataRange: window should be divisible by resolution or else samples may be missed: %s %% %s = %dm", window, resolution, int64(window.Minutes())%int64(resolution.Minutes()))
 	}
 
 	// Convert to Prometheus-style duration string in terms of m or h
@@ -1579,7 +1578,7 @@ func (cm *CostModel) costDataRange(cli prometheusClient.Client, cp costAnalyzerC
 		remoteLayout := "2006-01-02T15:04:05Z"
 		remoteStartStr := window.Start().Format(remoteLayout)
 		remoteEndStr := window.End().Format(remoteLayout)
-		klog.V(1).Infof("Using remote database for query from %s to %s with window %s", remoteStartStr, remoteEndStr, resolution)
+		log.Infof("Using remote database for query from %s to %s with window %s", remoteStartStr, remoteEndStr, resolution)
 		return CostDataRangeFromSQL("", "", resolution.String(), remoteStartStr, remoteEndStr)
 	}
 
@@ -1707,7 +1706,7 @@ func (cm *CostModel) costDataRange(cli prometheusClient.Client, cp costAnalyzerC
 	pvClaimMapping, err := GetPVInfo(resPVRequests, clusterID)
 	if err != nil {
 		// Just log for compatibility with KSM less than 1.6
-		klog.Infof("Unable to get PV Data: %s", err.Error())
+		log.Infof("Unable to get PV Data: %s", err.Error())
 	}
 	if pvClaimMapping != nil {
 		err = addPVData(cm.Cache, pvClaimMapping, cp)
@@ -1718,13 +1717,13 @@ func (cm *CostModel) costDataRange(cli prometheusClient.Client, cp costAnalyzerC
 
 	pvCostMapping, err := GetPVCostMetrics(resPVHourlyCost, clusterID)
 	if err != nil {
-		klog.V(1).Infof("Unable to get PV Hourly Cost Data: %s", err.Error())
+		log.Errorf("Unable to get PV Hourly Cost Data: %s", err.Error())
 	}
 
 	unmountedPVs := make(map[string][]*PersistentVolumeClaimData)
 	pvAllocationMapping, err := GetPVAllocationMetrics(resPVCAlloc, clusterID)
 	if err != nil {
-		klog.V(1).Infof("Unable to get PV Allocation Cost Data: %s", err.Error())
+		log.Errorf("Unable to get PV Allocation Cost Data: %s", err.Error())
 	}
 	if pvAllocationMapping != nil {
 		addMetricPVData(pvAllocationMapping, pvCostMapping, cp)
@@ -1735,7 +1734,7 @@ func (cm *CostModel) costDataRange(cli prometheusClient.Client, cp costAnalyzerC
 
 	nsLabels, err := GetNamespaceLabelsMetrics(resNSLabels, clusterID)
 	if err != nil {
-		klog.V(1).Infof("Unable to get Namespace Labels for Metrics: %s", err.Error())
+		log.Errorf("Unable to get Namespace Labels for Metrics: %s", err.Error())
 	}
 	if nsLabels != nil {
 		mergeStringMap(namespaceLabelsMapping, nsLabels)
@@ -1743,12 +1742,12 @@ func (cm *CostModel) costDataRange(cli prometheusClient.Client, cp costAnalyzerC
 
 	podLabels, err := GetPodLabelsMetrics(resPodLabels, clusterID)
 	if err != nil {
-		klog.V(1).Infof("Unable to get Pod Labels for Metrics: %s", err.Error())
+		log.Errorf("Unable to get Pod Labels for Metrics: %s", err.Error())
 	}
 
 	nsAnnotations, err := GetNamespaceAnnotationsMetrics(resNSAnnotations, clusterID)
 	if err != nil {
-		klog.V(1).Infof("Unable to get Namespace Annotations for Metrics: %s", err.Error())
+		log.Errorf("Unable to get Namespace Annotations for Metrics: %s", err.Error())
 	}
 	if nsAnnotations != nil {
 		mergeStringMap(namespaceAnnotationsMapping, nsAnnotations)
@@ -1756,55 +1755,55 @@ func (cm *CostModel) costDataRange(cli prometheusClient.Client, cp costAnalyzerC
 
 	podAnnotations, err := GetPodAnnotationsMetrics(resPodAnnotations, clusterID)
 	if err != nil {
-		klog.V(1).Infof("Unable to get Pod Annotations for Metrics: %s", err.Error())
+		log.Errorf("Unable to get Pod Annotations for Metrics: %s", err.Error())
 	}
 
 	serviceLabels, err := GetServiceSelectorLabelsMetrics(resServiceLabels, clusterID)
 	if err != nil {
-		klog.V(1).Infof("Unable to get Service Selector Labels for Metrics: %s", err.Error())
+		log.Errorf("Unable to get Service Selector Labels for Metrics: %s", err.Error())
 	}
 
 	deploymentLabels, err := GetDeploymentMatchLabelsMetrics(resDeploymentLabels, clusterID)
 	if err != nil {
-		klog.V(1).Infof("Unable to get Deployment Match Labels for Metrics: %s", err.Error())
+		log.Errorf("Unable to get Deployment Match Labels for Metrics: %s", err.Error())
 	}
 
 	statefulsetLabels, err := GetStatefulsetMatchLabelsMetrics(resStatefulsetLabels, clusterID)
 	if err != nil {
-		klog.V(1).Infof("Unable to get Deployment Match Labels for Metrics: %s", err.Error())
+		log.Errorf("Unable to get Deployment Match Labels for Metrics: %s", err.Error())
 	}
 
 	podStatefulsetMetricsMapping, err := getPodDeploymentsWithMetrics(statefulsetLabels, podLabels)
 	if err != nil {
-		klog.V(1).Infof("Unable to get match Statefulset Labels Metrics to Pods: %s", err.Error())
+		log.Errorf("Unable to get match Statefulset Labels Metrics to Pods: %s", err.Error())
 	}
 	appendLabelsList(podStatefulsetsMapping, podStatefulsetMetricsMapping)
 
 	podDeploymentsMetricsMapping, err := getPodDeploymentsWithMetrics(deploymentLabels, podLabels)
 	if err != nil {
-		klog.V(1).Infof("Unable to get match Deployment Labels Metrics to Pods: %s", err.Error())
+		log.Errorf("Unable to get match Deployment Labels Metrics to Pods: %s", err.Error())
 	}
 	appendLabelsList(podDeploymentsMapping, podDeploymentsMetricsMapping)
 
 	podDaemonsets, err := GetPodDaemonsetsWithMetrics(resDaemonsets, clusterID)
 	if err != nil {
-		klog.V(1).Infof("Unable to get Pod Daemonsets for Metrics: %s", err.Error())
+		log.Errorf("Unable to get Pod Daemonsets for Metrics: %s", err.Error())
 	}
 
 	podJobs, err := GetPodJobsWithMetrics(resJobs, clusterID)
 	if err != nil {
-		klog.V(1).Infof("Unable to get Pod Jobs for Metrics: %s", err.Error())
+		log.Errorf("Unable to get Pod Jobs for Metrics: %s", err.Error())
 	}
 
 	podServicesMetricsMapping, err := getPodServicesWithMetrics(serviceLabels, podLabels)
 	if err != nil {
-		klog.V(1).Infof("Unable to get match Service Labels Metrics to Pods: %s", err.Error())
+		log.Errorf("Unable to get match Service Labels Metrics to Pods: %s", err.Error())
 	}
 	appendLabelsList(podServicesMapping, podServicesMetricsMapping)
 
 	networkUsageMap, err := GetNetworkUsageData(resNetZoneRequests, resNetRegionRequests, resNetInternetRequests, clusterID)
 	if err != nil {
-		klog.V(1).Infof("Unable to get Network Cost Data: %s", err.Error())
+		log.Errorf("Unable to get Network Cost Data: %s", err.Error())
 		networkUsageMap = make(map[string]*NetworkUsageData)
 	}
 
@@ -1885,37 +1884,37 @@ func (cm *CostModel) costDataRange(cli prometheusClient.Client, cp costAnalyzerC
 		c, _ := NewContainerMetricFromKey(key)
 		RAMReqV, ok := RAMReqMap[key]
 		if !ok {
-			klog.V(4).Info("no RAM requests for " + key)
+			log.Debug("no RAM requests for " + key)
 			RAMReqV = []*util.Vector{}
 		}
 		RAMUsedV, ok := RAMUsedMap[key]
 		if !ok {
-			klog.V(4).Info("no RAM usage for " + key)
+			log.Debug("no RAM usage for " + key)
 			RAMUsedV = []*util.Vector{}
 		}
 		CPUReqV, ok := CPUReqMap[key]
 		if !ok {
-			klog.V(4).Info("no CPU requests for " + key)
+			log.Debug("no CPU requests for " + key)
 			CPUReqV = []*util.Vector{}
 		}
 		CPUUsedV, ok := CPUUsedMap[key]
 		if !ok {
-			klog.V(4).Info("no CPU usage for " + key)
+			log.Debug("no CPU usage for " + key)
 			CPUUsedV = []*util.Vector{}
 		}
 		RAMAllocsV, ok := RAMAllocMap[key]
 		if !ok {
-			klog.V(4).Info("no RAM allocation for " + key)
+			log.Debug("no RAM allocation for " + key)
 			RAMAllocsV = []*util.Vector{}
 		}
 		CPUAllocsV, ok := CPUAllocMap[key]
 		if !ok {
-			klog.V(4).Info("no CPU allocation for " + key)
+			log.Debug("no CPU allocation for " + key)
 			CPUAllocsV = []*util.Vector{}
 		}
 		GPUReqV, ok := GPUReqMap[key]
 		if !ok {
-			klog.V(4).Info("no GPU requests for " + key)
+			log.Debug("no GPU requests for " + key)
 			GPUReqV = []*util.Vector{}
 		}
 
@@ -1992,7 +1991,7 @@ func (cm *CostModel) costDataRange(cli prometheusClient.Client, cp costAnalyzerC
 		// the pod_pvc_allocation metric
 		podPVData, ok := pvAllocationMapping[podKey]
 		if !ok {
-			klog.V(4).Infof("Failed to locate pv allocation mapping for missing pod.")
+			log.Debugf("Failed to locate pv allocation mapping for missing pod.")
 		}
 
 		// Delete the current pod key from potentially unmounted pvs
@@ -2004,7 +2003,7 @@ func (cm *CostModel) costDataRange(cli prometheusClient.Client, cp costAnalyzerC
 		if usage, ok := networkUsageMap[podKey]; ok {
 			netCosts, err := GetNetworkCost(usage, cp)
 			if err != nil {
-				klog.V(3).Infof("Error pulling network costs: %s", err.Error())
+				log.Errorf("Error pulling network costs: %s", err.Error())
 			} else {
 				podNetworkCosts = netCosts
 			}
@@ -2064,7 +2063,7 @@ func (cm *CostModel) costDataRange(cli prometheusClient.Client, cp costAnalyzerC
 
 	unmounted := findUnmountedPVCostData(cm.ClusterMap, unmountedPVs, namespaceLabelsMapping, namespaceAnnotationsMapping)
 	for k, costs := range unmounted {
-		klog.V(4).Infof("Unmounted PVs in Namespace/ClusterID: %s/%s", costs.Namespace, costs.ClusterID)
+		log.Debugf("Unmounted PVs in Namespace/ClusterID: %s/%s", costs.Namespace, costs.ClusterID)
 
 		if costDataPassesFilters(cm.ClusterMap, costs, filterNamespace, filterCluster) {
 			containerNameCost[k] = costs
@@ -2075,7 +2074,7 @@ func (cm *CostModel) costDataRange(cli prometheusClient.Client, cp costAnalyzerC
 		dur, off := window.DurationOffsetStrings()
 		err = findDeletedNodeInfo(cli, missingNodes, dur, off)
 		if err != nil {
-			klog.V(1).Infof("Error fetching historical node data: %s", err.Error())
+			log.Errorf("Error fetching historical node data: %s", err.Error())
 		}
 	}
 
@@ -2118,7 +2117,7 @@ func applyAllocationToRequests(allocationMap map[string][]*util.Vector, requestM
 func addMetricPVData(pvAllocationMap map[string][]*PersistentVolumeClaimData, pvCostMap map[string]*costAnalyzerCloud.PV, cp costAnalyzerCloud.Provider) {
 	cfg, err := cp.GetConfig()
 	if err != nil {
-		klog.V(1).Infof("Failed to get provider config while adding pv metrics data.")
+		log.Errorf("Failed to get provider config while adding pv metrics data.")
 		return
 	}
 
@@ -2218,7 +2217,7 @@ func getAllocatableVGPUs(cache clustercache.ClusterCache) (float64, error) {
 					if strings.Contains(arg, "--vgpu=") {
 						vgpus, err := strconv.ParseFloat(arg[strings.IndexByte(arg, '=')+1:], 64)
 						if err != nil {
-							klog.V(1).Infof("failed to parse vgpu allocation string %s: %v", arg, err)
+							log.Errorf("failed to parse vgpu allocation string %s: %v", arg, err)
 							continue
 						}
 						vgpuCount = vgpus
@@ -2246,7 +2245,7 @@ type PersistentVolumeClaimData struct {
 func measureTime(start time.Time, threshold time.Duration, name string) {
 	elapsed := time.Since(start)
 	if elapsed > threshold {
-		klog.V(3).Infof("[Profiler] %s: %s", elapsed, name)
+		log.Infof("[Profiler] %s: %s", elapsed, name)
 	}
 }
 

+ 22 - 24
pkg/costmodel/metrics.go

@@ -22,8 +22,6 @@ import (
 	"github.com/prometheus/client_golang/prometheus"
 	dto "github.com/prometheus/client_model/go"
 	v1 "k8s.io/api/core/v1"
-
-	"k8s.io/klog"
 )
 
 //--------------------------------------------------------------------------
@@ -426,7 +424,7 @@ func (cmme *CostModelMetricsEmitter) Start() bool {
 		}
 
 		for {
-			klog.V(4).Info("Recording prices...")
+			log.Debugf("Recording prices...")
 			podlist := cmme.KubeClusterCache.GetAllPods()
 			podStatus := make(map[string]v1.PodPhase)
 			for _, pod := range podlist {
@@ -437,14 +435,14 @@ func (cmme *CostModelMetricsEmitter) Start() bool {
 
 			provisioner, clusterManagementCost, err := cmme.CloudProvider.ClusterManagementPricing()
 			if err != nil {
-				klog.V(1).Infof("Error getting cluster management cost %s", err.Error())
+				log.Errorf("Error getting cluster management cost %s", err.Error())
 			}
 			cmme.ClusterManagementCostRecorder.WithLabelValues(provisioner).Set(clusterManagementCost)
 
 			// Record network pricing at global scope
 			networkCosts, err := cmme.CloudProvider.NetworkPricing()
 			if err != nil {
-				klog.V(4).Infof("Failed to retrieve network costs: %s", err.Error())
+				log.Debugf("Failed to retrieve network costs: %s", err.Error())
 			} else {
 				cmme.NetworkZoneEgressRecorder.Set(networkCosts.ZoneNetworkEgressCost)
 				cmme.NetworkRegionEgressRecorder.Set(networkCosts.RegionNetworkEgressCost)
@@ -471,7 +469,7 @@ func (cmme *CostModelMetricsEmitter) Start() bool {
 			// TODO: Pass CloudProvider into CostModel on instantiation so this isn't so awkward
 			nodes, err := cmme.Model.GetNodeCost(cmme.CloudProvider)
 			if err != nil {
-				log.Warningf("Metric emission: error getting Node cost: %s", err)
+				log.Warnf("Metric emission: error getting Node cost: %s", err)
 			}
 			for nodeName, node := range nodes {
 				// Emit costs, guarding against NaN inputs for custom pricing.
@@ -540,14 +538,14 @@ func (cmme *CostModelMetricsEmitter) Start() bool {
 					avgCosts.CpuCostAverage = (avgCosts.CpuCostAverage*avgCosts.NumCpuDataPoints + cpuCost) / (avgCosts.NumCpuDataPoints + 1)
 					avgCosts.NumCpuDataPoints += 1
 				} else {
-					log.Warningf("CPU cost outlier detected; skipping data point.")
+					log.Warnf("CPU cost outlier detected; skipping data point.")
 				}
 				if ramCost < outlierFactor*avgCosts.RamCostAverage {
 					cmme.RAMPriceRecorder.WithLabelValues(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID).Set(ramCost)
 					avgCosts.RamCostAverage = (avgCosts.RamCostAverage*avgCosts.NumRamDataPoints + ramCost) / (avgCosts.NumRamDataPoints + 1)
 					avgCosts.NumRamDataPoints += 1
 				} else {
-					log.Warningf("RAM cost outlier detected; skipping data point.")
+					log.Warnf("RAM cost outlier detected; skipping data point.")
 				}
 				// skip redording totalCost if any constituent costs were outliers
 				if cpuCost < outlierFactor*avgCosts.CpuCostAverage &&
@@ -568,7 +566,7 @@ func (cmme *CostModelMetricsEmitter) Start() bool {
 			// TODO: Pass CloudProvider into CostModel on instantiation so this isn't so awkward
 			loadBalancers, err := cmme.Model.GetLBCost(cmme.CloudProvider)
 			if err != nil {
-				log.Warningf("Metric emission: error getting LoadBalancer cost: %s", err)
+				log.Warnf("Metric emission: error getting LoadBalancer cost: %s", err)
 			}
 			for lbKey, lb := range loadBalancers {
 				// TODO: parse (if necessary) and calculate cost associated with loadBalancer based on dynamic cloud prices fetched into each lb struct on GetLBCost() call
@@ -644,7 +642,7 @@ func (cmme *CostModelMetricsEmitter) Start() bool {
 
 				parameters, ok := storageClassMap[pv.Spec.StorageClassName]
 				if !ok {
-					klog.V(4).Infof("Unable to find parameters for storage class \"%s\". Does pv \"%s\" have a storageClassName?", pv.Spec.StorageClassName, pv.Name)
+					log.Debugf("Unable to find parameters for storage class \"%s\". Does pv \"%s\" have a storageClassName?", pv.Spec.StorageClassName, pv.Name)
 				}
 				var region string
 				if r, ok := util.GetRegion(pv.Labels); ok {
@@ -668,43 +666,43 @@ func (cmme *CostModelMetricsEmitter) Start() bool {
 
 			for labelString, seen := range nodeSeen {
 				if !seen {
-					klog.V(4).Infof("Removing %s from nodes", labelString)
+					log.Debugf("Removing %s from nodes", labelString)
 					labels := getLabelStringsFromKey(labelString)
 					ok := cmme.NodeTotalPriceRecorder.DeleteLabelValues(labels...)
 					if ok {
-						klog.V(4).Infof("removed %s from totalprice", labelString)
+						log.Debugf("removed %s from totalprice", labelString)
 					} else {
-						klog.Infof("FAILURE TO REMOVE %s from totalprice", labelString)
+						log.Infof("FAILURE TO REMOVE %s from totalprice", labelString)
 					}
 					ok = cmme.NodeSpotRecorder.DeleteLabelValues(labels...)
 					if ok {
-						klog.V(4).Infof("removed %s from spot records", labelString)
+						log.Debugf("removed %s from spot records", labelString)
 					} else {
-						klog.Infof("FAILURE TO REMOVE %s from spot records", labelString)
+						log.Infof("FAILURE TO REMOVE %s from spot records", labelString)
 					}
 					ok = cmme.CPUPriceRecorder.DeleteLabelValues(labels...)
 					if ok {
-						klog.V(4).Infof("removed %s from cpuprice", labelString)
+						log.Debugf("removed %s from cpuprice", labelString)
 					} else {
-						klog.Infof("FAILURE TO REMOVE %s from cpuprice", labelString)
+						log.Infof("FAILURE TO REMOVE %s from cpuprice", labelString)
 					}
 					ok = cmme.GPUPriceRecorder.DeleteLabelValues(labels...)
 					if ok {
-						klog.V(4).Infof("removed %s from gpuprice", labelString)
+						log.Debugf("removed %s from gpuprice", labelString)
 					} else {
-						klog.Infof("FAILURE TO REMOVE %s from gpuprice", labelString)
+						log.Infof("FAILURE TO REMOVE %s from gpuprice", labelString)
 					}
 					ok = cmme.GPUCountRecorder.DeleteLabelValues(labels...)
 					if ok {
-						klog.V(4).Infof("removed %s from gpucount", labelString)
+						log.Debugf("removed %s from gpucount", labelString)
 					} else {
-						klog.Infof("FAILURE TO REMOVE %s from gpucount", labelString)
+						log.Infof("FAILURE TO REMOVE %s from gpucount", labelString)
 					}
 					ok = cmme.RAMPriceRecorder.DeleteLabelValues(labels...)
 					if ok {
-						klog.V(4).Infof("removed %s from ramprice", labelString)
+						log.Debugf("removed %s from ramprice", labelString)
 					} else {
-						klog.Infof("FAILURE TO REMOVE %s from ramprice", labelString)
+						log.Infof("FAILURE TO REMOVE %s from ramprice", labelString)
 					}
 					delete(nodeSeen, labelString)
 					delete(nodeCostAverages, labelString)
@@ -717,7 +715,7 @@ func (cmme *CostModelMetricsEmitter) Start() bool {
 					labels := getLabelStringsFromKey(labelString)
 					ok := cmme.LBCostRecorder.DeleteLabelValues(labels...)
 					if !ok {
-						log.Warningf("Metric emission: failed to delete LoadBalancer with labels: %v", labels)
+						log.Warnf("Metric emission: failed to delete LoadBalancer with labels: %v", labels)
 					}
 					delete(loadBalancerSeen, labelString)
 				} else {

+ 1 - 1
pkg/costmodel/promparsers.go

@@ -118,7 +118,7 @@ func GetPVAllocationMetrics(qrs []*prom.QueryResult, defaultClusterID string) (m
 
 		pvName, err := val.GetString("persistentvolume")
 		if err != nil {
-			log.Warningf("persistentvolume field does not exist for pv %s", pvcName) // This is possible for an unfulfilled claim
+			log.Warnf("persistentvolume field does not exist for pv %s", pvcName) // This is possible for an unfulfilled claim
 			continue
 		}
 

+ 31 - 32
pkg/costmodel/router.go

@@ -21,7 +21,6 @@ import (
 	"github.com/microcosm-cc/bluemonday"
 
 	v1 "k8s.io/api/core/v1"
-	"k8s.io/klog"
 
 	"github.com/julienschmidt/httprouter"
 
@@ -200,7 +199,7 @@ func filterFields(fields string, data map[string]*CostData) map[string]CostData
 	fmap := make(map[string]bool)
 	for _, f := range fs {
 		fieldNameLower := strings.ToLower(f) // convert to go struct name by uppercasing first letter
-		klog.V(1).Infof("to delete: %s", fieldNameLower)
+		log.Debugf("to delete: %s", fieldNameLower)
 		fmap[fieldNameLower] = true
 	}
 	filteredData := make(map[string]CostData)
@@ -264,7 +263,7 @@ func WrapData(data interface{}, err error) []byte {
 	var resp []byte
 
 	if err != nil {
-		klog.V(1).Infof("Error returned to client: %s", err.Error())
+		log.Errorf("Error returned to client: %s", err.Error())
 		resp, _ = json.Marshal(&Response{
 			Code:    http.StatusInternalServerError,
 			Status:  "error",
@@ -286,7 +285,7 @@ func WrapDataWithMessage(data interface{}, err error, message string) []byte {
 	var resp []byte
 
 	if err != nil {
-		klog.V(1).Infof("Error returned to client: %s", err.Error())
+		log.Errorf("Error returned to client: %s", err.Error())
 		resp, _ = json.Marshal(&Response{
 			Code:    http.StatusInternalServerError,
 			Status:  "error",
@@ -309,7 +308,7 @@ func WrapDataWithWarning(data interface{}, err error, warning string) []byte {
 	var resp []byte
 
 	if err != nil {
-		klog.V(1).Infof("Error returned to client: %s", err.Error())
+		log.Errorf("Error returned to client: %s", err.Error())
 		resp, _ = json.Marshal(&Response{
 			Code:    http.StatusInternalServerError,
 			Status:  "error",
@@ -333,7 +332,7 @@ func WrapDataWithMessageAndWarning(data interface{}, err error, message, warning
 	var resp []byte
 
 	if err != nil {
-		klog.V(1).Infof("Error returned to client: %s", err.Error())
+		log.Errorf("Error returned to client: %s", err.Error())
 		resp, _ = json.Marshal(&Response{
 			Code:    http.StatusInternalServerError,
 			Status:  "error",
@@ -369,7 +368,7 @@ func (a *Accesses) RefreshPricingData(w http.ResponseWriter, r *http.Request, ps
 
 	err := a.CloudProvider.DownloadPricingData()
 	if err != nil {
-		klog.V(1).Infof("Error refreshing pricing data: %s", err.Error())
+		log.Errorf("Error refreshing pricing data: %s", err.Error())
 	}
 
 	w.Write(WrapData(nil, err))
@@ -581,7 +580,7 @@ func (a *Accesses) UpdateSpotInfoConfigs(w http.ResponseWriter, r *http.Request,
 	w.Write(WrapData(data, err))
 	err = a.CloudProvider.DownloadPricingData()
 	if err != nil {
-		klog.V(1).Infof("Error redownloading data on config update: %s", err.Error())
+		log.Errorf("Error redownloading data on config update: %s", err.Error())
 	}
 	return
 }
@@ -865,7 +864,7 @@ func (a *Accesses) GetPrometheusQueueState(w http.ResponseWriter, _ *http.Reques
 	if thanos.IsEnabled() {
 		thanosQueueState, err := prom.GetPrometheusQueueState(a.ThanosClient)
 		if err != nil {
-			log.Warningf("Error getting Thanos queue state: %s", err)
+			log.Warnf("Error getting Thanos queue state: %s", err)
 		} else {
 			result["thanos"] = thanosQueueState
 		}
@@ -892,7 +891,7 @@ func (a *Accesses) GetPrometheusMetrics(w http.ResponseWriter, _ *http.Request,
 	if thanos.IsEnabled() {
 		thanosMetrics, err := prom.GetPrometheusMetrics(a.ThanosClient, thanos.QueryOffset())
 		if err != nil {
-			log.Warningf("Error getting Thanos queue state: %s", err)
+			log.Warnf("Error getting Thanos queue state: %s", err)
 		} else {
 			result["thanos"] = thanosMetrics
 		}
@@ -1320,7 +1319,7 @@ func (a *Accesses) Status(w http.ResponseWriter, r *http.Request, _ httprouter.P
 // captures the panic event in sentry
 func capturePanicEvent(err string, stack string) {
 	msg := fmt.Sprintf("Panic: %s\nStackTrace: %s\n", err, stack)
-	klog.V(1).Infoln(msg)
+	log.Infof(msg)
 	sentry.CurrentHub().CaptureEvent(&sentry.Event{
 		Level:   sentry.LevelError,
 		Message: msg,
@@ -1348,7 +1347,7 @@ func handlePanic(p errors.Panic) bool {
 }
 
 func Initialize(additionalConfigWatchers ...*watcher.ConfigMapWatcher) *Accesses {
-	klog.V(1).Infof("Starting cost-model (git commit \"%s\")", env.GetAppVersion())
+	log.Infof("Starting cost-model (git commit \"%s\")", env.GetAppVersion())
 
 	configWatchers := watcher.NewConfigMapWatchers(additionalConfigWatchers...)
 
@@ -1356,22 +1355,22 @@ func Initialize(additionalConfigWatchers ...*watcher.ConfigMapWatcher) *Accesses
 	if errorReportingEnabled {
 		err = sentry.Init(sentry.ClientOptions{Release: env.GetAppVersion()})
 		if err != nil {
-			klog.Infof("Failed to initialize sentry for error reporting")
+			log.Infof("Failed to initialize sentry for error reporting")
 		} else {
 			err = errors.SetPanicHandler(handlePanic)
 			if err != nil {
-				klog.Infof("Failed to set panic handler: %s", err)
+				log.Infof("Failed to set panic handler: %s", err)
 			}
 		}
 	}
 
 	address := env.GetPrometheusServerEndpoint()
 	if address == "" {
-		klog.Fatalf("No address for prometheus set in $%s. Aborting.", env.PrometheusServerEndpointEnvVar)
+		log.Fatalf("No address for prometheus set in $%s. Aborting.", env.PrometheusServerEndpointEnvVar)
 	}
 
 	queryConcurrency := env.GetMaxQueryConcurrency()
-	klog.Infof("Prometheus/Thanos Client Max Concurrency set to %d", queryConcurrency)
+	log.Infof("Prometheus/Thanos Client Max Concurrency set to %d", queryConcurrency)
 
 	timeout := 120 * time.Second
 	keepAlive := 120 * time.Second
@@ -1401,26 +1400,26 @@ func Initialize(additionalConfigWatchers ...*watcher.ConfigMapWatcher) *Accesses
 		QueryLogFile:     "",
 	})
 	if err != nil {
-		klog.Fatalf("Failed to create prometheus client, Error: %v", err)
+		log.Fatalf("Failed to create prometheus client, Error: %v", err)
 	}
 
 	m, err := prom.Validate(promCli)
 	if err != nil || !m.Running {
 		if err != nil {
-			klog.Errorf("Failed to query prometheus at %s. Error: %s . Troubleshooting help available at: %s", address, err.Error(), prom.PrometheusTroubleshootingURL)
+			log.Errorf("Failed to query prometheus at %s. Error: %s . Troubleshooting help available at: %s", address, err.Error(), prom.PrometheusTroubleshootingURL)
 		} else if !m.Running {
-			klog.Errorf("Prometheus at %s is not running. Troubleshooting help available at: %s", address, prom.PrometheusTroubleshootingURL)
+			log.Errorf("Prometheus at %s is not running. Troubleshooting help available at: %s", address, prom.PrometheusTroubleshootingURL)
 		}
 	} else {
-		klog.V(1).Info("Success: retrieved the 'up' query against prometheus at: " + address)
+		log.Infof("Success: retrieved the 'up' query against prometheus at: " + address)
 	}
 
 	api := prometheusAPI.NewAPI(promCli)
 	_, err = api.Config(context.Background())
 	if err != nil {
-		klog.Infof("No valid prometheus config file at %s. Error: %s . Troubleshooting help available at: %s. Ignore if using cortex/thanos here.", address, err.Error(), prom.PrometheusTroubleshootingURL)
+		log.Infof("No valid prometheus config file at %s. Error: %s . Troubleshooting help available at: %s. Ignore if using cortex/thanos here.", address, err.Error(), prom.PrometheusTroubleshootingURL)
 	} else {
-		klog.Infof("Retrieved a prometheus config file from: %s", address)
+		log.Infof("Retrieved a prometheus config file from: %s", address)
 	}
 
 	// Lookup scrape interval for kubecost job, update if found
@@ -1429,7 +1428,7 @@ func Initialize(additionalConfigWatchers ...*watcher.ConfigMapWatcher) *Accesses
 		scrapeInterval = si
 	}
 
-	klog.Infof("Using scrape interval of %f", scrapeInterval.Seconds())
+	log.Infof("Using scrape interval of %f", scrapeInterval.Seconds())
 
 	// Kubernetes API setup
 	var kc *rest.Config
@@ -1481,9 +1480,9 @@ func Initialize(additionalConfigWatchers ...*watcher.ConfigMapWatcher) *Accesses
 	for _, cw := range watchedConfigs {
 		configs, err := kubeClientset.CoreV1().ConfigMaps(kubecostNamespace).Get(context.Background(), cw, metav1.GetOptions{})
 		if err != nil {
-			klog.Infof("No %s configmap found at install time, using existing configs: %s", cw, err.Error())
+			log.Infof("No %s configmap found at install time, using existing configs: %s", cw, err.Error())
 		} else {
-			klog.Infof("Found configmap %s, watching...", configs.Name)
+			log.Infof("Found configmap %s, watching...", configs.Name)
 			watchConfigFunc(configs)
 		}
 	}
@@ -1493,13 +1492,13 @@ func Initialize(additionalConfigWatchers ...*watcher.ConfigMapWatcher) *Accesses
 	remoteEnabled := env.IsRemoteEnabled()
 	if remoteEnabled {
 		info, err := cloudProvider.ClusterInfo()
-		klog.Infof("Saving cluster  with id:'%s', and name:'%s' to durable storage", info["id"], info["name"])
+		log.Infof("Saving cluster  with id:'%s', and name:'%s' to durable storage", info["id"], info["name"])
 		if err != nil {
-			klog.Infof("Error saving cluster id %s", err.Error())
+			log.Infof("Error saving cluster id %s", err.Error())
 		}
 		_, _, err = cloud.GetOrCreateClusterMeta(info["id"], info["name"])
 		if err != nil {
-			klog.Infof("Unable to set cluster id '%s' for cluster '%s', %s", info["id"], info["name"], err.Error())
+			log.Infof("Unable to set cluster id '%s' for cluster '%s', %s", info["id"], info["name"], err.Error())
 		}
 	}
 
@@ -1526,16 +1525,16 @@ func Initialize(additionalConfigWatchers ...*watcher.ConfigMapWatcher) *Accesses
 
 			_, err = prom.Validate(thanosCli)
 			if err != nil {
-				klog.V(1).Infof("[Warning] Failed to query Thanos at %s. Error: %s.", thanosAddress, err.Error())
+				log.Warnf("Failed to query Thanos at %s. Error: %s.", thanosAddress, err.Error())
 				thanosClient = thanosCli
 			} else {
-				klog.V(1).Info("Success: retrieved the 'up' query against Thanos at: " + thanosAddress)
+				log.Infof("Success: retrieved the 'up' query against Thanos at: " + thanosAddress)
 
 				thanosClient = thanosCli
 			}
 
 		} else {
-			klog.Infof("Error resolving environment variable: $%s", env.ThanosQueryUrlEnvVar)
+			log.Infof("Error resolving environment variable: $%s", env.ThanosQueryUrlEnvVar)
 		}
 	}
 
@@ -1615,7 +1614,7 @@ func Initialize(additionalConfigWatchers ...*watcher.ConfigMapWatcher) *Accesses
 
 	err = a.CloudProvider.DownloadPricingData()
 	if err != nil {
-		klog.V(1).Info("Failed to download pricing data: " + err.Error())
+		log.Infof("Failed to download pricing data: " + err.Error())
 	}
 
 	// Warm the aggregate cache unless explicitly set to false

+ 4 - 5
pkg/costmodel/settings.go

@@ -7,7 +7,6 @@ import (
 	"github.com/kubecost/cost-model/pkg/cloud"
 	"github.com/kubecost/cost-model/pkg/log"
 	"github.com/patrickmn/go-cache"
-	"k8s.io/klog"
 )
 
 // InitializeSettingsPubSub sets up the pub/sub mechanisms and kicks of
@@ -82,7 +81,7 @@ func (a *Accesses) SubscribeToDiscountChanges(ch chan string) {
 func (a *Accesses) customPricingHasChanged() bool {
 	customPricing, err := a.CloudProvider.GetConfig()
 	if err != nil || customPricing == nil {
-		klog.Errorf("error accessing cloud provider configuration: %s", err)
+		log.Errorf("error accessing cloud provider configuration: %s", err)
 		return false
 	}
 
@@ -105,7 +104,7 @@ func (a *Accesses) customPricingHasChanged() bool {
 	}
 	cpStrCached, ok := val.(string)
 	if !ok {
-		klog.Errorf("caching error: failed to cast custom pricing to string")
+		log.Errorf("caching error: failed to cast custom pricing to string")
 	}
 	if cpStr == cpStrCached {
 		return false
@@ -122,7 +121,7 @@ func (a *Accesses) customPricingHasChanged() bool {
 func (a *Accesses) discountHasChanged() bool {
 	customPricing, err := a.CloudProvider.GetConfig()
 	if err != nil || customPricing == nil {
-		klog.Errorf("error accessing cloud provider configuration: %s", err)
+		log.Errorf("error accessing cloud provider configuration: %s", err)
 		return false
 	}
 
@@ -144,7 +143,7 @@ func (a *Accesses) discountHasChanged() bool {
 	}
 	discStrCached, ok := val.(string)
 	if !ok {
-		klog.Errorf("caching error: failed to cast discount to string")
+		log.Errorf("caching error: failed to cast discount to string")
 	}
 	if discStr == discStrCached {
 		return false

+ 2 - 3
pkg/costmodel/sql.go

@@ -5,10 +5,9 @@ import (
 	"fmt"
 	"time"
 
-	"k8s.io/klog"
-
 	costAnalyzerCloud "github.com/kubecost/cost-model/pkg/cloud"
 	"github.com/kubecost/cost-model/pkg/env"
+	"github.com/kubecost/cost-model/pkg/log"
 	"github.com/kubecost/cost-model/pkg/util"
 	"github.com/kubecost/cost-model/pkg/util/json"
 
@@ -302,7 +301,7 @@ func CostDataRangeFromSQL(field string, value string, window string, start strin
 
 	volumes, err := getPVCosts(db)
 	if err != nil {
-		klog.Infof("Error fetching pv data from sql: %s. Skipping PVData", err.Error())
+		log.Infof("Error fetching pv data from sql: %s. Skipping PVData", err.Error())
 	} else {
 		query = `SELECT time_bucket($1, time) AS bucket, name, avg(value), labels->>'persistentvolumeclaim' AS claim, labels->>'pod' AS pod,labels->>'namespace' AS namespace, labels->>'persistentvolume' AS volumename, labels->>'cluster_id' AS clusterid
 		FROM metrics

+ 1 - 1
pkg/env/costmodelenv.go

@@ -395,7 +395,7 @@ func GetParsedUTCOffset() time.Duration {
 		regex := regexp.MustCompile(`^(\+|-)(\d\d):(\d\d)$`)
 		match := regex.FindStringSubmatch(offsetStr)
 		if match == nil {
-			log.Warningf("Illegal UTC offset: %s", offsetStr)
+			log.Warnf("Illegal UTC offset: %s", offsetStr)
 			return offset
 		}
 

+ 12 - 12
pkg/kubecost/allocation.go

@@ -685,7 +685,7 @@ func (a *Allocation) String() string {
 
 func (a *Allocation) add(that *Allocation) {
 	if a == nil {
-		log.Warningf("Allocation.AggregateBy: trying to add a nil receiver")
+		log.Warnf("Allocation.AggregateBy: trying to add a nil receiver")
 		return
 	}
 
@@ -1033,7 +1033,7 @@ func (as *AllocationSet) AggregateBy(aggregateBy []string, options *AllocationAg
 	if idleSet.Length() > 0 && options.ShareIdle != ShareNone {
 		idleCoefficients, allocatedTotalsMap, err = computeIdleCoeffs(options, as, shareSet)
 		if err != nil {
-			log.Warningf("AllocationSet.AggregateBy: compute idle coeff: %s", err)
+			log.Warnf("AllocationSet.AggregateBy: compute idle coeff: %s", err)
 			return fmt.Errorf("error computing idle coefficients: %s", err)
 		}
 	}
@@ -1157,11 +1157,11 @@ func (as *AllocationSet) AggregateBy(aggregateBy []string, options *AllocationAg
 
 				// Make sure idle coefficients exist
 				if _, ok := idleCoefficients[idleId]; !ok {
-					log.Warningf("AllocationSet.AggregateBy: error getting idle coefficient: no idleId '%s' for '%s'", idleId, alloc.Name)
+					log.Warnf("AllocationSet.AggregateBy: error getting idle coefficient: no idleId '%s' for '%s'", idleId, alloc.Name)
 					continue
 				}
 				if _, ok := idleCoefficients[idleId][alloc.Name]; !ok {
-					log.Warningf("AllocationSet.AggregateBy: error getting idle coefficient for '%s'", alloc.Name)
+					log.Warnf("AllocationSet.AggregateBy: error getting idle coefficient for '%s'", alloc.Name)
 					continue
 				}
 
@@ -1213,11 +1213,11 @@ func (as *AllocationSet) AggregateBy(aggregateBy []string, options *AllocationAg
 
 				// Make sure idle coefficients exist
 				if _, ok := idleCoefficients[idleId]; !ok {
-					log.Warningf("AllocationSet.AggregateBy: error getting idle coefficient: no idleId '%s' for '%s'", idleId, alloc.Name)
+					log.Warnf("AllocationSet.AggregateBy: error getting idle coefficient: no idleId '%s' for '%s'", idleId, alloc.Name)
 					continue
 				}
 				if _, ok := idleCoefficients[idleId][alloc.Name]; !ok {
-					log.Warningf("AllocationSet.AggregateBy: error getting idle coefficient for '%s'", alloc.Name)
+					log.Warnf("AllocationSet.AggregateBy: error getting idle coefficient for '%s'", alloc.Name)
 					continue
 				}
 
@@ -1289,7 +1289,7 @@ func (as *AllocationSet) AggregateBy(aggregateBy []string, options *AllocationAg
 			for _, sharedAlloc := range shareSet.allocations {
 				if _, ok := shareCoefficients[alloc.Name]; !ok {
 					if !alloc.IsIdle() && !alloc.IsUnmounted() {
-						log.Warningf("AllocationSet.AggregateBy: error getting share coefficienct for '%s'", alloc.Name)
+						log.Warnf("AllocationSet.AggregateBy: error getting share coefficienct for '%s'", alloc.Name)
 					}
 					continue
 				}
@@ -1509,7 +1509,7 @@ func computeShareCoeffs(aggregateBy []string, options *AllocationAggregationOpti
 		if coeffs[a] > 0 && total > 0 {
 			coeffs[a] /= total
 		} else {
-			log.Warningf("ETL: invalid values for shared coefficients: %d, %d", coeffs[a], total)
+			log.Warnf("ETL: invalid values for shared coefficients: %v, %v", coeffs[a], total)
 			coeffs[a] = 0.0
 		}
 	}
@@ -1703,11 +1703,11 @@ func (as *AllocationSet) Each(f func(string, *Allocation)) {
 // End returns the End time of the AllocationSet window
 func (as *AllocationSet) End() time.Time {
 	if as == nil {
-		log.Warningf("AllocationSet: calling End on nil AllocationSet")
+		log.Warnf("AllocationSet: calling End on nil AllocationSet")
 		return time.Unix(0, 0)
 	}
 	if as.Window.End() == nil {
-		log.Warningf("AllocationSet: AllocationSet with illegal window: End is nil; len(as.allocations)=%d", len(as.allocations))
+		log.Warnf("AllocationSet: AllocationSet with illegal window: End is nil; len(as.allocations)=%d", len(as.allocations))
 		return time.Unix(0, 0)
 	}
 	return *as.Window.End()
@@ -1930,11 +1930,11 @@ func (as *AllocationSet) Set(alloc *Allocation) error {
 // Start returns the Start time of the AllocationSet window
 func (as *AllocationSet) Start() time.Time {
 	if as == nil {
-		log.Warningf("AllocationSet: calling Start on nil AllocationSet")
+		log.Warnf("AllocationSet: calling Start on nil AllocationSet")
 		return time.Unix(0, 0)
 	}
 	if as.Window.Start() == nil {
-		log.Warningf("AllocationSet: AllocationSet with illegal window: Start is nil; len(as.allocations)=%d", len(as.allocations))
+		log.Warnf("AllocationSet: AllocationSet with illegal window: Start is nil; len(as.allocations)=%d", len(as.allocations))
 		return time.Unix(0, 0)
 	}
 	return *as.Window.Start()

+ 1 - 1
pkg/kubecost/allocationprops.go

@@ -394,7 +394,7 @@ func (p *AllocationProperties) GenerateKey(aggregateBy []string, labelConfig *La
 			// This case should never be reached, as input up until this point
 			// should be checked and rejected if invalid. But if we do get a
 			// value we don't recognize, log a warning.
-			log.Warningf("generateKey: illegal aggregation parameter: %s", agg)
+			log.Warnf("generateKey: illegal aggregation parameter: %s", agg)
 		}
 	}
 

+ 15 - 15
pkg/kubecost/asset.go

@@ -536,13 +536,13 @@ func (a *Any) SetStartEnd(start, end time.Time) {
 	if a.Window().Contains(start) {
 		a.start = start
 	} else {
-		log.Warningf("Any.SetStartEnd: start %s not in %s", start, a.Window())
+		log.Warnf("Any.SetStartEnd: start %s not in %s", start, a.Window())
 	}
 
 	if a.Window().Contains(end) {
 		a.end = end
 	} else {
-		log.Warningf("Any.SetStartEnd: end %s not in %s", end, a.Window())
+		log.Warnf("Any.SetStartEnd: end %s not in %s", end, a.Window())
 	}
 }
 
@@ -722,13 +722,13 @@ func (ca *Cloud) SetStartEnd(start, end time.Time) {
 	if ca.Window().Contains(start) {
 		ca.start = start
 	} else {
-		log.Warningf("Cloud.SetStartEnd: start %s not in %s", start, ca.Window())
+		log.Warnf("Cloud.SetStartEnd: start %s not in %s", start, ca.Window())
 	}
 
 	if ca.Window().Contains(end) {
 		ca.end = end
 	} else {
-		log.Warningf("Cloud.SetStartEnd: end %s not in %s", end, ca.Window())
+		log.Warnf("Cloud.SetStartEnd: end %s not in %s", end, ca.Window())
 	}
 }
 
@@ -1124,7 +1124,7 @@ func (d *Disk) Minutes() float64 {
 	windowMins := d.window.Minutes()
 
 	if diskMins > windowMins {
-		log.Warningf("Asset ETL: Disk.Minutes exceeds window: %.2f > %.2f", diskMins, windowMins)
+		log.Warnf("Asset ETL: Disk.Minutes exceeds window: %.2f > %.2f", diskMins, windowMins)
 		diskMins = windowMins
 	}
 
@@ -1150,13 +1150,13 @@ func (d *Disk) SetStartEnd(start, end time.Time) {
 	if d.Window().Contains(start) {
 		d.start = start
 	} else {
-		log.Warningf("Disk.SetStartEnd: start %s not in %s", start, d.Window())
+		log.Warnf("Disk.SetStartEnd: start %s not in %s", start, d.Window())
 	}
 
 	if d.Window().Contains(end) {
 		d.end = end
 	} else {
-		log.Warningf("Disk.SetStartEnd: end %s not in %s", end, d.Window())
+		log.Warnf("Disk.SetStartEnd: end %s not in %s", end, d.Window())
 	}
 }
 
@@ -1445,7 +1445,7 @@ func (n *Network) Minutes() float64 {
 	windowMins := n.window.Minutes()
 
 	if netMins > windowMins {
-		log.Warningf("Asset ETL: Network.Minutes exceeds window: %.2f > %.2f", netMins, windowMins)
+		log.Warnf("Asset ETL: Network.Minutes exceeds window: %.2f > %.2f", netMins, windowMins)
 		netMins = windowMins
 	}
 
@@ -1471,13 +1471,13 @@ func (n *Network) SetStartEnd(start, end time.Time) {
 	if n.Window().Contains(start) {
 		n.start = start
 	} else {
-		log.Warningf("Disk.SetStartEnd: start %s not in %s", start, n.Window())
+		log.Warnf("Disk.SetStartEnd: start %s not in %s", start, n.Window())
 	}
 
 	if n.Window().Contains(end) {
 		n.end = end
 	} else {
-		log.Warningf("Disk.SetStartEnd: end %s not in %s", end, n.Window())
+		log.Warnf("Disk.SetStartEnd: end %s not in %s", end, n.Window())
 	}
 }
 
@@ -1697,7 +1697,7 @@ func (n *Node) Minutes() float64 {
 	windowMins := n.window.Minutes()
 
 	if nodeMins > windowMins {
-		log.Warningf("Asset ETL: Node.Minutes exceeds window: %.2f > %.2f", nodeMins, windowMins)
+		log.Warnf("Asset ETL: Node.Minutes exceeds window: %.2f > %.2f", nodeMins, windowMins)
 		nodeMins = windowMins
 	}
 
@@ -1723,13 +1723,13 @@ func (n *Node) SetStartEnd(start, end time.Time) {
 	if n.Window().Contains(start) {
 		n.start = start
 	} else {
-		log.Warningf("Disk.SetStartEnd: start %s not in %s", start, n.Window())
+		log.Warnf("Disk.SetStartEnd: start %s not in %s", start, n.Window())
 	}
 
 	if n.Window().Contains(end) {
 		n.end = end
 	} else {
-		log.Warningf("Disk.SetStartEnd: end %s not in %s", end, n.Window())
+		log.Warnf("Disk.SetStartEnd: end %s not in %s", end, n.Window())
 	}
 }
 
@@ -2095,13 +2095,13 @@ func (lb *LoadBalancer) SetStartEnd(start, end time.Time) {
 	if lb.Window().Contains(start) {
 		lb.start = start
 	} else {
-		log.Warningf("Disk.SetStartEnd: start %s not in %s", start, lb.Window())
+		log.Warnf("Disk.SetStartEnd: start %s not in %s", start, lb.Window())
 	}
 
 	if lb.Window().Contains(end) {
 		lb.end = end
 	} else {
-		log.Warningf("Disk.SetStartEnd: end %s not in %s", end, lb.Window())
+		log.Warnf("Disk.SetStartEnd: end %s not in %s", end, lb.Window())
 	}
 }
 

+ 4 - 4
pkg/kubecost/summaryallocation.go

@@ -890,7 +890,7 @@ func (sas *SummaryAllocationSet) AggregateBy(aggregateBy []string, options *Allo
 		sharingCoeffDenominator -= totalUnmountedCost
 
 		if sharingCoeffDenominator <= 0.0 {
-			log.Warningf("SummaryAllocation: sharing coefficient denominator is %f", sharingCoeffDenominator)
+			log.Warnf("SummaryAllocation: sharing coefficient denominator is %f", sharingCoeffDenominator)
 		} else {
 			// Compute sharing coeffs by dividing the thus-far accumulated
 			// numerators by the now-finalized denominator.
@@ -898,7 +898,7 @@ func (sas *SummaryAllocationSet) AggregateBy(aggregateBy []string, options *Allo
 				if sharingCoeffs[key] > 0.0 {
 					sharingCoeffs[key] /= sharingCoeffDenominator
 				} else {
-					log.Warningf("SummaryAllocation: detected illegal sharing coefficient for %s: %v (setting to zero)", key, sharingCoeffs[key])
+					log.Warnf("SummaryAllocation: detected illegal sharing coefficient for %s: %v (setting to zero)", key, sharingCoeffs[key])
 					sharingCoeffs[key] = 0.0
 				}
 			}
@@ -978,7 +978,7 @@ func (sas *SummaryAllocationSet) AggregateBy(aggregateBy []string, options *Allo
 
 		rt, ok := allocTotals[key]
 		if !ok {
-			log.Warningf("SummaryAllocation: AggregateBy: cannot handle undistributed idle for '%s'", key)
+			log.Warnf("SummaryAllocation: AggregateBy: cannot handle undistributed idle for '%s'", key)
 			continue
 		}
 
@@ -1174,7 +1174,7 @@ func NewSummaryAllocationSetRange(sass ...*SummaryAllocationSet) *SummaryAllocat
 		if step == 0 {
 			step = sas.Window.Duration()
 		} else if step != sas.Window.Duration() {
-			log.Warningf("instantiating range with step %s using set of step %s is illegal", step, sas.Window.Duration())
+			log.Warnf("instantiating range with step %s using set of step %s is illegal", step, sas.Window.Duration())
 		}
 	}
 

+ 43 - 9
pkg/log/log.go

@@ -2,9 +2,13 @@ package log
 
 import (
 	"fmt"
+	"os"
+	"strings"
 	"time"
 
-	"k8s.io/klog"
+	"github.com/rs/zerolog"
+	"github.com/rs/zerolog/log"
+	"github.com/spf13/viper"
 )
 
 // TODO for deduped functions, if timeLogged > logTypeLimit, should we log once
@@ -13,8 +17,26 @@ import (
 // concurrency-safe counter
 var ctr = newCounter()
 
+func InitLogging() {
+	zerolog.TimeFieldFormat = time.RFC3339Nano
+	// Default to using pretty formatting
+	if strings.ToLower(viper.GetString("log-format")) != "json" {
+		log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr, TimeFormat: time.RFC3339Nano})
+	}
+
+	level, err := zerolog.ParseLevel(viper.GetString("log-level"))
+	if err != nil {
+		zerolog.SetGlobalLevel(zerolog.InfoLevel)
+		log.Warn().Msg("Error parsing log-level, setting level to 'info'")
+		return
+	}
+	zerolog.SetGlobalLevel(level)
+	log.Log().Msgf("Log level set to %v", level)
+
+}
+
 func Errorf(format string, a ...interface{}) {
-	klog.Errorf(fmt.Sprintf("[Error] %s", format), a...)
+	log.Error().Msgf(format, a...)
 }
 
 func DedupedErrorf(logTypeLimit int, format string, a ...interface{}) {
@@ -28,23 +50,27 @@ func DedupedErrorf(logTypeLimit int, format string, a ...interface{}) {
 	}
 }
 
-func Warningf(format string, a ...interface{}) {
-	klog.V(2).Infof(fmt.Sprintf("[Warning] %s", format), a...)
+func Warnf(format string, a ...interface{}) {
+	log.Warn().Msgf(format, a...)
 }
 
 func DedupedWarningf(logTypeLimit int, format string, a ...interface{}) {
 	timesLogged := ctr.increment(format)
 
 	if timesLogged < logTypeLimit {
-		Warningf(format, a...)
+		Warnf(format, a...)
 	} else if timesLogged == logTypeLimit {
-		Warningf(format, a...)
+		Warnf(format, a...)
 		Infof("%s logged %d times: suppressing future logs", format, logTypeLimit)
 	}
 }
 
+func Info(msg string) {
+	log.Info().Msg(msg)
+}
+
 func Infof(format string, a ...interface{}) {
-	klog.V(3).Infof(fmt.Sprintf("[Info] %s", format), a...)
+	log.Info().Msgf(format, a...)
 }
 
 func DedupedInfof(logTypeLimit int, format string, a ...interface{}) {
@@ -59,11 +85,19 @@ func DedupedInfof(logTypeLimit int, format string, a ...interface{}) {
 }
 
 func Profilef(format string, a ...interface{}) {
-	klog.V(3).Infof(fmt.Sprintf("[Profiler] %s", format), a...)
+	log.Info().Msgf(fmt.Sprintf("[Profiler] %s", format), a...)
+}
+
+func Debug(msg string) {
+	log.Debug().Msg(msg)
 }
 
 func Debugf(format string, a ...interface{}) {
-	klog.V(5).Infof(fmt.Sprintf("[Debug] %s", format), a...)
+	log.Debug().Msgf(format, a...)
+}
+
+func Fatalf(format string, a ...interface{}) {
+	log.Fatal().Msgf(format, a...)
 }
 
 func Profile(start time.Time, name string) {

+ 2 - 2
pkg/prom/query.go

@@ -258,7 +258,7 @@ func (ctx *Context) query(query string, t time.Time) (interface{}, prometheus.Wa
 			return nil, warnings, CommErrorf("Error: %s, Body: %s, Query: %s", w, body, query)
 		}
 
-		log.Warningf("fetching query '%s': %s", query, w)
+		log.Warnf("fetching query '%s': %s", query, w)
 	}
 
 	return toReturn, warnings, nil
@@ -382,7 +382,7 @@ func (ctx *Context) queryRange(query string, start, end time.Time, step time.Dur
 			return nil, warnings, CommErrorf("Error: %s, Body: %s, Query: %s", w, body, query)
 		}
 
-		log.Warningf("fetching query '%s': %s", query, w)
+		log.Warnf("fetching query '%s': %s", query, w)
 	}
 
 	return toReturn, warnings, nil

+ 2 - 2
pkg/prom/result.go

@@ -264,7 +264,7 @@ func (qr *QueryResult) GetLabels() map[string]string {
 		label := strings.TrimPrefix(k, "label_")
 		value, ok := v.(string)
 		if !ok {
-			log.Warningf("Failed to parse label value for label: '%s'", label)
+			log.Warnf("Failed to parse label value for label: '%s'", label)
 			continue
 		}
 
@@ -287,7 +287,7 @@ func (qr *QueryResult) GetAnnotations() map[string]string {
 		annotations := strings.TrimPrefix(k, "annotation_")
 		value, ok := v.(string)
 		if !ok {
-			log.Warningf("Failed to parse label value for label: '%s'", annotations)
+			log.Warnf("Failed to parse label value for label: '%s'", annotations)
 			continue
 		}
 

+ 5 - 5
pkg/services/clusters/clustermanager.go

@@ -8,10 +8,10 @@ import (
 
 	"github.com/google/uuid"
 
+	"github.com/kubecost/cost-model/pkg/log"
 	"github.com/kubecost/cost-model/pkg/util/fileutil"
 	"github.com/kubecost/cost-model/pkg/util/json"
 
-	"k8s.io/klog"
 	"sigs.k8s.io/yaml"
 )
 
@@ -92,7 +92,7 @@ func NewConfiguredClusterManager(storage ClusterStorage, config string) *Cluster
 	exists, err := fileutil.FileExists(config)
 	if !exists {
 		if err != nil {
-			klog.V(1).Infof("[Error] Failed to load config file: %s. Error: %s", config, err.Error())
+			log.Errorf("Failed to load config file: %s. Error: %s", config, err.Error())
 		}
 		return clusterManager
 	}
@@ -117,7 +117,7 @@ func NewConfiguredClusterManager(storage ClusterStorage, config string) *Cluster
 		if entry.Auth != nil {
 			authData, err := getAuth(entry.Auth)
 			if err != nil {
-				klog.V(1).Infof("[Error]: %s", err)
+				log.Errorf("%s", err)
 			} else {
 				details[DetailsAuthKey] = authData
 			}
@@ -188,7 +188,7 @@ func (cm *ClusterManager) GetAll() []*ClusterDefinition {
 		var cd ClusterDefinition
 		err := json.Unmarshal(cluster, &cd)
 		if err != nil {
-			klog.V(1).Infof("[Error] Failed to unmarshal json cluster definition for key: %s", key)
+			log.Errorf("Failed to unmarshal json cluster definition for key: %s", key)
 			return nil
 		}
 
@@ -197,7 +197,7 @@ func (cm *ClusterManager) GetAll() []*ClusterDefinition {
 	})
 
 	if err != nil {
-		klog.Infof("[Error] Failed to load list of clusters: %s", err.Error())
+		log.Infof("[Error] Failed to load list of clusters: %s", err.Error())
 	}
 
 	return clusters

+ 2 - 2
pkg/services/clusters/clustersendpoints.go

@@ -7,8 +7,8 @@ import (
 
 	"github.com/julienschmidt/httprouter"
 
+	"github.com/kubecost/cost-model/pkg/log"
 	"github.com/kubecost/cost-model/pkg/util/json"
-	"k8s.io/klog"
 )
 
 // DataEnvelope is a generic wrapper struct for http response data
@@ -94,7 +94,7 @@ func wrapData(data interface{}, err error) []byte {
 	var resp []byte
 
 	if err != nil {
-		klog.V(1).Infof("Error returned to client: %s", err.Error())
+		log.Infof("Error returned to client: %s", err.Error())
 		resp, _ = json.Marshal(&DataEnvelope{
 			Code:   http.StatusInternalServerError,
 			Status: "error",

+ 2 - 2
pkg/services/clusterservice.go

@@ -22,13 +22,13 @@ func newClusterManager() *clusters.ClusterManager {
 		path := env.GetConfigPath()
 		db, err := bolt.Open(path+"costmodel.db", 0600, nil)
 		if err != nil {
-			klog.V(1).Infof("[Error] Failed to create costmodel.db: %s", err.Error())
+			log.Errorf("[Error] Failed to create costmodel.db: %s", err.Error())
 			return cm.NewConfiguredClusterManager(cm.NewMapDBClusterStorage(), clustersConfigFile)
 		}
 
 		store, err := clusters.NewBoltDBClusterStorage("clusters", db)
 		if err != nil {
-			klog.V(1).Infof("[Error] Failed to Create Cluster Storage: %s", err.Error())
+			log.Errorf("[Error] Failed to Create Cluster Storage: %s", err.Error())
 			return clusters.NewConfiguredClusterManager(clusters.NewMapDBClusterStorage(), clustersConfigFile)
 		}
 

+ 1 - 1
pkg/services/services.go

@@ -32,7 +32,7 @@ type defaultHTTPServices struct {
 // Add a HTTPService implementation for
 func (dhs *defaultHTTPServices) Add(service HTTPService) {
 	if service == nil {
-		log.Warningf("Attempting to Add nil HTTPService")
+		log.Warnf("Attempting to Add nil HTTPService")
 		return
 	}
 

+ 2 - 2
pkg/util/watcher/configwatchers.go

@@ -1,8 +1,8 @@
 package watcher
 
 import (
+	"github.com/kubecost/cost-model/pkg/log"
 	v1 "k8s.io/api/core/v1"
-	"k8s.io/klog"
 )
 
 // ConfigMapWatcher represents a single configmap watcher
@@ -66,7 +66,7 @@ func (cmw *ConfigMapWatchers) ToWatchFunc() func(interface{}) {
 			for _, cw := range watchers {
 				err := cw.WatchFunc(name, data)
 				if err != nil {
-					klog.Infof("ERROR UPDATING %s CONFIG: %s", name, err.Error())
+					log.Infof("ERROR UPDATING %s CONFIG: %s", name, err.Error())
 				}
 			}
 		}